Bug Summary

File:dev/pci/drm/i915/i915_query.c
Warning:line 410, column 12
Potential leak of memory pointed to by 'oa_config_ids'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_query.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_query.c

/usr/src/sys/dev/pci/drm/i915/i915_query.c

1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/nospec.h>
8
9#include "i915_drv.h"
10#include "i915_perf.h"
11#include "i915_query.h"
12#include "gt/intel_engine_user.h"
13#include <uapi/drm/i915_drm.h>
14
15static int copy_query_item(void *query_hdr, size_t query_sz,
16 u32 total_length,
17 struct drm_i915_query_item *query_item)
18{
19 if (query_item->length == 0)
20 return total_length;
21
22 if (query_item->length < total_length)
23 return -EINVAL22;
24
25 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr)),
26 query_sz))
27 return -EFAULT14;
28
29 return 0;
30}
31
32static int fill_topology_info(const struct sseu_dev_info *sseu,
33 struct drm_i915_query_item *query_item,
34 intel_sseu_ss_mask_t subslice_mask)
35{
36 struct drm_i915_query_topology_info topo;
37 u32 slice_length, subslice_length, eu_length, total_length;
38 int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices)(((sseu->max_subslices) + ((8) - 1)) / (8));
39 int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice)(((sseu->max_eus_per_subslice) + ((8) - 1)) / (8));
40 int ret;
41
42 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask))extern char _ctassert[(!(sizeof(u8) != sizeof(sseu->slice_mask
))) ? 1 : -1 ] __attribute__((__unused__))
;
43
44 if (sseu->max_slices == 0)
45 return -ENODEV19;
46
47 slice_length = sizeof(sseu->slice_mask);
48 subslice_length = sseu->max_slices * ss_stride;
49 eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
50 total_length = sizeof(topo) + slice_length + subslice_length +
51 eu_length;
52
53 ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
54
55 if (ret != 0)
56 return ret;
57
58 memset(&topo, 0, sizeof(topo))__builtin_memset((&topo), (0), (sizeof(topo)));
59 topo.max_slices = sseu->max_slices;
60 topo.max_subslices = sseu->max_subslices;
61 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
62
63 topo.subslice_offset = slice_length;
64 topo.subslice_stride = ss_stride;
65 topo.eu_offset = slice_length + subslice_length;
66 topo.eu_stride = eu_stride;
67
68 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr)),
69 &topo, sizeof(topo)))
70 return -EFAULT14;
71
72 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo))((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo))),
73 &sseu->slice_mask, slice_length))
74 return -EFAULT14;
75
76 if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length))
77 sizeof(topo) + slice_length)((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length))
,
78 sseu))
79 return -EFAULT14;
80
81 if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
82 sizeof(topo) +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
83 slice_length + subslice_length)((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
,
84 sseu))
85 return -EFAULT14;
86
87 return total_length;
88}
89
90static int query_topology_info(struct drm_i915_privateinteldrm_softc *dev_priv,
91 struct drm_i915_query_item *query_item)
92{
93 const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
94
95 if (query_item->flags != 0)
96 return -EINVAL22;
97
98 return fill_topology_info(sseu, query_item, sseu->subslice_mask);
99}
100
101static int query_geometry_subslices(struct drm_i915_privateinteldrm_softc *i915,
102 struct drm_i915_query_item *query_item)
103{
104 const struct sseu_dev_info *sseu;
105 struct intel_engine_cs *engine;
106 struct i915_engine_class_instance classinstance;
107
108 if (GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8
| ((&(i915)->__runtime)->graphics.ip.rel))
< IP_VER(12, 50)((12) << 8 | (50)))
109 return -ENODEV19;
110
111 classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
112
113 engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
114 (u8)classinstance.engine_instance);
115
116 if (!engine)
117 return -EINVAL22;
118
119 if (engine->class != RENDER_CLASS0)
120 return -EINVAL22;
121
122 sseu = &engine->gt->info.sseu;
123
124 return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
125}
126
127static int
128query_engine_info(struct drm_i915_privateinteldrm_softc *i915,
129 struct drm_i915_query_item *query_item)
130{
131 struct drm_i915_query_engine_info __user *query_ptr =
132 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
133 struct drm_i915_engine_info __user *info_ptr;
134 struct drm_i915_query_engine_info query;
135 struct drm_i915_engine_info info = { };
136 unsigned int num_uabi_engines = 0;
137 struct intel_engine_cs *engine;
138 int len, ret;
139
140 if (query_item->flags)
141 return -EINVAL22;
142
143 for_each_uabi_engine(engine, i915)for ((engine) = (linux_root_RB_MINMAX((struct linux_root *)(&
(i915)->uabi_engines), -1) ? ({ const __typeof( ((struct intel_engine_cs
*)0)->uabi_node ) *__mptr = (linux_root_RB_MINMAX((struct
linux_root *)(&(i915)->uabi_engines), -1)); (struct intel_engine_cs
*)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs
, uabi_node) );}) : ((void *)0)); (engine); (engine) = (linux_root_RB_NEXT
((&(engine)->uabi_node)) ? ({ const __typeof( ((struct
intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_NEXT
((&(engine)->uabi_node))); (struct intel_engine_cs *)(
(char *)__mptr - __builtin_offsetof(struct intel_engine_cs, uabi_node
) );}) : ((void *)0)))
144 num_uabi_engines++;
145
146 len = struct_size(query_ptr, engines, num_uabi_engines)(sizeof(*(query_ptr)) + ((num_uabi_engines) * (sizeof(*(query_ptr
)->engines))))
;
147
148 ret = copy_query_item(&query, sizeof(query), len, query_item);
149 if (ret != 0)
150 return ret;
151
152 if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
153 query.rsvd[2])
154 return -EINVAL22;
155
156 info_ptr = &query_ptr->engines[0];
157
158 for_each_uabi_engine(engine, i915)for ((engine) = (linux_root_RB_MINMAX((struct linux_root *)(&
(i915)->uabi_engines), -1) ? ({ const __typeof( ((struct intel_engine_cs
*)0)->uabi_node ) *__mptr = (linux_root_RB_MINMAX((struct
linux_root *)(&(i915)->uabi_engines), -1)); (struct intel_engine_cs
*)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs
, uabi_node) );}) : ((void *)0)); (engine); (engine) = (linux_root_RB_NEXT
((&(engine)->uabi_node)) ? ({ const __typeof( ((struct
intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_NEXT
((&(engine)->uabi_node))); (struct intel_engine_cs *)(
(char *)__mptr - __builtin_offsetof(struct intel_engine_cs, uabi_node
) );}) : ((void *)0)))
{
159 info.engine.engine_class = engine->uabi_class;
160 info.engine.engine_instance = engine->uabi_instance;
161 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE(1 << 0);
162 info.capabilities = engine->uabi_capabilities;
163 info.logical_instance = ilog2(engine->logical_mask)((sizeof(engine->logical_mask) <= 4) ? (fls(engine->
logical_mask) - 1) : (flsl(engine->logical_mask) - 1))
;
164
165 if (copy_to_user(info_ptr, &info, sizeof(info)))
166 return -EFAULT14;
167
168 query.num_engines++;
169 info_ptr++;
170 }
171
172 if (copy_to_user(query_ptr, &query, sizeof(query)))
173 return -EFAULT14;
174
175 return len;
176}
177
178static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
179 u64 user_regs_ptr,
180 u32 kernel_n_regs)
181{
182 /*
183 * We'll just put the number of registers, and won't copy the
184 * register.
185 */
186 if (user_n_regs == 0)
187 return 0;
188
189 if (user_n_regs < kernel_n_regs)
190 return -EINVAL22;
191
192 return 0;
193}
194
195static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
196 u32 kernel_n_regs,
197 u64 user_regs_ptr,
198 u32 *user_n_regs)
199{
200 u32 __user *p = u64_to_user_ptr(user_regs_ptr)((void *)(uintptr_t)(user_regs_ptr));
201 u32 r;
202
203 if (*user_n_regs == 0) {
204 *user_n_regs = kernel_n_regs;
205 return 0;
206 }
207
208 *user_n_regs = kernel_n_regs;
209
210 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)access_ok(p, 2 * sizeof(u32) * kernel_n_regs))
211 return -EFAULT14;
212
213 for (r = 0; r < kernel_n_regs; r++, p += 2) {
214 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),({ __typeof((i915_mmio_reg_offset(kernel_regs[r].addr))) __tmp
= (i915_mmio_reg_offset(kernel_regs[r].addr)); if (copyout(&
(__tmp), p, sizeof(__tmp)) != 0) goto Efault; })
215 p, Efault)({ __typeof((i915_mmio_reg_offset(kernel_regs[r].addr))) __tmp
= (i915_mmio_reg_offset(kernel_regs[r].addr)); if (copyout(&
(__tmp), p, sizeof(__tmp)) != 0) goto Efault; })
;
216 unsafe_put_user(kernel_regs[r].value, p + 1, Efault)({ __typeof((kernel_regs[r].value)) __tmp = (kernel_regs[r].value
); if (copyout(&(__tmp), p + 1, sizeof(__tmp)) != 0) goto
Efault; })
;
217 }
218 user_write_access_end();
219 return 0;
220Efault:
221 user_write_access_end();
222 return -EFAULT14;
223}
224
225static int query_perf_config_data(struct drm_i915_privateinteldrm_softc *i915,
226 struct drm_i915_query_item *query_item,
227 bool_Bool use_uuid)
228{
229 struct drm_i915_query_perf_config __user *user_query_config_ptr =
230 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
231 struct drm_i915_perf_oa_config __user *user_config_ptr =
232 u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(struct drm_i915_query_perf_config
)))
233 sizeof(struct drm_i915_query_perf_config))((void *)(uintptr_t)(query_item->data_ptr + sizeof(struct drm_i915_query_perf_config
)))
;
234 struct drm_i915_perf_oa_config user_config;
235 struct i915_perf *perf = &i915->perf;
236 struct i915_oa_config *oa_config;
237 char uuid[UUID_STRING_LEN36 + 1];
238 u64 config_id;
239 u32 flags, total_size;
240 int ret;
241
242 if (!perf->i915)
243 return -ENODEV19;
244
245 total_size =
246 sizeof(struct drm_i915_query_perf_config) +
247 sizeof(struct drm_i915_perf_oa_config);
248
249 if (query_item->length == 0)
250 return total_size;
251
252 if (query_item->length < total_size) {
253 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",___drm_dbg(((void *)0), DRM_UT_CORE, "Invalid query config data item size=%u expected=%u\n"
, query_item->length, total_size)
254 query_item->length, total_size)___drm_dbg(((void *)0), DRM_UT_CORE, "Invalid query config data item size=%u expected=%u\n"
, query_item->length, total_size)
;
255 return -EINVAL22;
256 }
257
258 if (get_user(flags, &user_query_config_ptr->flags)-copyin(&user_query_config_ptr->flags, &(flags), sizeof
(flags))
)
259 return -EFAULT14;
260
261 if (flags != 0)
262 return -EINVAL22;
263
264 if (use_uuid) {
265 struct i915_oa_config *tmp;
266 int id;
267
268 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid))extern char _ctassert[(!(sizeof(user_query_config_ptr->uuid
) >= sizeof(uuid))) ? 1 : -1 ] __attribute__((__unused__))
;
269
270 memset(&uuid, 0, sizeof(uuid))__builtin_memset((&uuid), (0), (sizeof(uuid)));
271 if (copy_from_user(uuid, user_query_config_ptr->uuid,
272 sizeof(user_query_config_ptr->uuid)))
273 return -EFAULT14;
274
275 oa_config = NULL((void *)0);
276 rcu_read_lock();
277 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
{
278 if (!strcmp(tmp->uuid, uuid)) {
279 oa_config = i915_oa_config_get(tmp);
280 break;
281 }
282 }
283 rcu_read_unlock();
284 } else {
285 if (get_user(config_id, &user_query_config_ptr->config)-copyin(&user_query_config_ptr->config, &(config_id
), sizeof(config_id))
)
286 return -EFAULT14;
287
288 oa_config = i915_perf_get_oa_config(perf, config_id);
289 }
290 if (!oa_config)
291 return -ENOENT2;
292
293 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
294 ret = -EFAULT14;
295 goto out;
296 }
297
298 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
299 user_config.boolean_regs_ptr,
300 oa_config->b_counter_regs_len);
301 if (ret)
302 goto out;
303
304 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
305 user_config.flex_regs_ptr,
306 oa_config->flex_regs_len);
307 if (ret)
308 goto out;
309
310 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
311 user_config.mux_regs_ptr,
312 oa_config->mux_regs_len);
313 if (ret)
314 goto out;
315
316 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
317 oa_config->b_counter_regs_len,
318 user_config.boolean_regs_ptr,
319 &user_config.n_boolean_regs);
320 if (ret)
321 goto out;
322
323 ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
324 oa_config->flex_regs_len,
325 user_config.flex_regs_ptr,
326 &user_config.n_flex_regs);
327 if (ret)
328 goto out;
329
330 ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
331 oa_config->mux_regs_len,
332 user_config.mux_regs_ptr,
333 &user_config.n_mux_regs);
334 if (ret)
335 goto out;
336
337 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid))__builtin_memcpy((user_config.uuid), (oa_config->uuid), (sizeof
(user_config.uuid)))
;
338
339 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
340 ret = -EFAULT14;
341 goto out;
342 }
343
344 ret = total_size;
345
346out:
347 i915_oa_config_put(oa_config);
348 return ret;
349}
350
351static size_t sizeof_perf_config_list(size_t count)
352{
353 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
354}
355
356static size_t sizeof_perf_metrics(struct i915_perf *perf)
357{
358 struct i915_oa_config *tmp;
359 size_t i;
360 int id;
361
362 i = 1;
363 rcu_read_lock();
364 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
365 i++;
366 rcu_read_unlock();
367
368 return sizeof_perf_config_list(i);
369}
370
371static int query_perf_config_list(struct drm_i915_privateinteldrm_softc *i915,
372 struct drm_i915_query_item *query_item)
373{
374 struct drm_i915_query_perf_config __user *user_query_config_ptr =
375 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
376 struct i915_perf *perf = &i915->perf;
377 u64 *oa_config_ids = NULL((void *)0);
378 int alloc, n_configs;
379 u32 flags;
380 int ret;
381
382 if (!perf->i915)
3
Assuming field 'i915' is non-null
4
Taking false branch
383 return -ENODEV19;
384
385 if (query_item->length == 0)
5
Assuming field 'length' is not equal to 0
6
Taking false branch
386 return sizeof_perf_metrics(perf);
387
388 if (get_user(flags, &user_query_config_ptr->flags)-copyin(&user_query_config_ptr->flags, &(flags), sizeof
(flags))
)
7
Assuming the condition is false
8
Taking false branch
389 return -EFAULT14;
390
391 if (flags != 0)
9
Assuming 'flags' is equal to 0
10
Taking false branch
392 return -EINVAL22;
393
394 n_configs = 1;
395 do {
22
Loop condition is true. Execution continues on line 396
396 struct i915_oa_config *tmp;
397 u64 *ids;
398 int id;
399
400#ifdef __linux__
401 ids = krealloc(oa_config_ids,
402 n_configs * sizeof(*oa_config_ids),
403 GFP_KERNEL(0x0001 | 0x0004));
404 if (!ids)
405 return -ENOMEM12;
406#else
407 ids = kmalloc(n_configs * sizeof(*oa_config_ids),
11
Calling 'kmalloc'
13
Returned allocated memory
408 GFP_KERNEL(0x0001 | 0x0004));
409 if (!ids)
14
Assuming 'ids' is non-null
15
Taking false branch
23
Assuming 'ids' is null
24
Taking true branch
410 return -ENOMEM12;
25
Potential leak of memory pointed to by 'oa_config_ids'
411 if (n_configs
15.1
'n_configs' is <= 1
15.1
'n_configs' is <= 1
> 1)
16
Taking false branch
412 memcpy(ids, oa_config_ids,__builtin_memcpy((ids), (oa_config_ids), ((n_configs - 1) * sizeof
(*oa_config_ids)))
413 (n_configs - 1) * sizeof(*oa_config_ids))__builtin_memcpy((ids), (oa_config_ids), ((n_configs - 1) * sizeof
(*oa_config_ids)))
;
414 kfree(oa_config_ids);
415#endif
416
417 alloc = fetch_and_zero(&n_configs)({ typeof(*&n_configs) __T = *(&n_configs); *(&n_configs
) = (typeof(*&n_configs))0; __T; })
;
418
419 ids[n_configs++] = 1ull; /* reserved for test_config */
420 rcu_read_lock();
421 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
{
17
Assuming the condition is true
18
Loop condition is true. Entering loop body
20
Assuming the condition is false
21
Loop condition is false. Execution continues on line 428
422 if (n_configs
18.1
'n_configs' is >= 'alloc'
18.1
'n_configs' is >= 'alloc'
< alloc)
19
Taking false branch
423 ids[n_configs] = id;
424 n_configs++;
425 }
426 rcu_read_unlock();
427
428 oa_config_ids = ids;
429 } while (n_configs > alloc);
430
431 if (query_item->length < sizeof_perf_config_list(n_configs)) {
432 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",___drm_dbg(((void *)0), DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
433 query_item->length,___drm_dbg(((void *)0), DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
434 sizeof_perf_config_list(n_configs))___drm_dbg(((void *)0), DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
;
435 kfree(oa_config_ids);
436 return -EINVAL22;
437 }
438
439 if (put_user(n_configs, &user_query_config_ptr->config)({ __typeof((n_configs)) __tmp = (n_configs); -copyout(&(
__tmp), &user_query_config_ptr->config, sizeof(__tmp))
; })
) {
440 kfree(oa_config_ids);
441 return -EFAULT14;
442 }
443
444 ret = copy_to_user(user_query_config_ptr + 1,
445 oa_config_ids,
446 n_configs * sizeof(*oa_config_ids));
447 kfree(oa_config_ids);
448 if (ret)
449 return -EFAULT14;
450
451 return sizeof_perf_config_list(n_configs);
452}
453
454static int query_perf_config(struct drm_i915_privateinteldrm_softc *i915,
455 struct drm_i915_query_item *query_item)
456{
457 switch (query_item->flags) {
1
Control jumps to 'case 1:' at line 458
458 case DRM_I915_QUERY_PERF_CONFIG_LIST1:
459 return query_perf_config_list(i915, query_item);
2
Calling 'query_perf_config_list'
460 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID2:
461 return query_perf_config_data(i915, query_item, true1);
462 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID3:
463 return query_perf_config_data(i915, query_item, false0);
464 default:
465 return -EINVAL22;
466 }
467}
468
469static int query_memregion_info(struct drm_i915_privateinteldrm_softc *i915,
470 struct drm_i915_query_item *query_item)
471{
472 struct drm_i915_query_memory_regions __user *query_ptr =
473 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
474 struct drm_i915_memory_region_info __user *info_ptr =
475 &query_ptr->regions[0];
476 struct drm_i915_memory_region_info info = { };
477 struct drm_i915_query_memory_regions query;
478 struct intel_memory_region *mr;
479 u32 total_length;
480 int ret, id, i;
481
482 if (query_item->flags != 0)
483 return -EINVAL22;
484
485 total_length = sizeof(query);
486 for_each_memory_region(mr, i915, id)for (id = 0; id < (sizeof(((i915)->mm.regions)) / sizeof
(((i915)->mm.regions)[0])); id++) if (!((mr) = (i915)->
mm.regions[id])) {} else
{
487 if (mr->private)
488 continue;
489
490 total_length += sizeof(info);
491 }
492
493 ret = copy_query_item(&query, sizeof(query), total_length, query_item);
494 if (ret != 0)
495 return ret;
496
497 if (query.num_regions)
498 return -EINVAL22;
499
500 for (i = 0; i < ARRAY_SIZE(query.rsvd)(sizeof((query.rsvd)) / sizeof((query.rsvd)[0])); i++) {
501 if (query.rsvd[i])
502 return -EINVAL22;
503 }
504
505 for_each_memory_region(mr, i915, id)for (id = 0; id < (sizeof(((i915)->mm.regions)) / sizeof
(((i915)->mm.regions)[0])); id++) if (!((mr) = (i915)->
mm.regions[id])) {} else
{
506 if (mr->private)
507 continue;
508
509 info.region.memory_class = mr->type;
510 info.region.memory_instance = mr->instance;
511 info.probed_size = mr->total;
512
513 if (mr->type == INTEL_MEMORY_LOCAL)
514 info.probed_cpu_visible_size = mr->io_size;
515 else
516 info.probed_cpu_visible_size = mr->total;
517
518 if (perfmon_capable()) {
519 intel_memory_region_avail(mr,
520 &info.unallocated_size,
521 &info.unallocated_cpu_visible_size);
522 } else {
523 info.unallocated_size = info.probed_size;
524 info.unallocated_cpu_visible_size =
525 info.probed_cpu_visible_size;
526 }
527
528 if (__copy_to_user(info_ptr, &info, sizeof(info)))
529 return -EFAULT14;
530
531 query.num_regions++;
532 info_ptr++;
533 }
534
535 if (__copy_to_user(query_ptr, &query, sizeof(query)))
536 return -EFAULT14;
537
538 return total_length;
539}
540
541static int query_hwconfig_blob(struct drm_i915_privateinteldrm_softc *i915,
542 struct drm_i915_query_item *query_item)
543{
544 struct intel_gt *gt = to_gt(i915);
545 struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
546
547 if (!hwconfig->size || !hwconfig->ptr)
548 return -ENODEV19;
549
550 if (query_item->length == 0)
551 return hwconfig->size;
552
553 if (query_item->length < hwconfig->size)
554 return -EINVAL22;
555
556 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr)),
557 hwconfig->ptr, hwconfig->size))
558 return -EFAULT14;
559
560 return hwconfig->size;
561}
562
563static int (* const i915_query_funcs[])(struct drm_i915_privateinteldrm_softc *dev_priv,
564 struct drm_i915_query_item *query_item) = {
565 query_topology_info,
566 query_engine_info,
567 query_perf_config,
568 query_memregion_info,
569 query_hwconfig_blob,
570 query_geometry_subslices,
571};
572
573int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
574{
575 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev);
576 struct drm_i915_query *args = data;
577 struct drm_i915_query_item __user *user_item_ptr =
578 u64_to_user_ptr(args->items_ptr)((void *)(uintptr_t)(args->items_ptr));
579 u32 i;
580
581 if (args->flags != 0)
582 return -EINVAL22;
583
584 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
585 struct drm_i915_query_item item;
586 unsigned long func_idx;
587 int ret;
588
589 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
590 return -EFAULT14;
591
592 if (item.query_id == 0)
593 return -EINVAL22;
594
595 if (overflows_type(item.query_id - 1, unsigned long)(sizeof(item.query_id - 1) > sizeof(unsigned long) &&
(item.query_id - 1) >> (8 * sizeof(unsigned long)))
)
596 return -EINVAL22;
597
598 func_idx = item.query_id - 1;
599
600 ret = -EINVAL22;
601 if (func_idx < ARRAY_SIZE(i915_query_funcs)(sizeof((i915_query_funcs)) / sizeof((i915_query_funcs)[0]))) {
602 func_idx = array_index_nospec(func_idx,(func_idx)
603 ARRAY_SIZE(i915_query_funcs))(func_idx);
604 ret = i915_query_funcs[func_idx](dev_priv, &item);
605 }
606
607 /* Only write the length back to userspace if they differ. */
608 if (ret != item.length && put_user(ret, &user_item_ptr->length)({ __typeof((ret)) __tmp = (ret); -copyout(&(__tmp), &
user_item_ptr->length, sizeof(__tmp)); })
)
609 return -EFAULT14;
610 }
611
612 return 0;
613}

/usr/src/sys/dev/pci/drm/include/linux/slab.h

1/* Public domain. */
2
3#ifndef _LINUX_SLAB_H
4#define _LINUX_SLAB_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8
9#include <linux/types.h>
10#include <linux/workqueue.h>
11#include <linux/gfp.h>
12
13#include <linux/processor.h> /* for CACHELINESIZE */
14
15#define ARCH_KMALLOC_MINALIGN64 CACHELINESIZE64
16
17#define ZERO_SIZE_PTR((void *)0) NULL((void *)0)
18
19static inline void *
20kmalloc(size_t size, int flags)
21{
22 return malloc(size, M_DRM145, flags);
12
Memory is allocated
23}
24
25static inline void *
26kmalloc_array(size_t n, size_t size, int flags)
27{
28 if (n != 0 && SIZE_MAX0xffffffffffffffffUL / n < size)
29 return NULL((void *)0);
30 return malloc(n * size, M_DRM145, flags);
31}
32
33static inline void *
34kcalloc(size_t n, size_t size, int flags)
35{
36 if (n != 0 && SIZE_MAX0xffffffffffffffffUL / n < size)
37 return NULL((void *)0);
38 return malloc(n * size, M_DRM145, flags | M_ZERO0x0008);
39}
40
41static inline void *
42kzalloc(size_t size, int flags)
43{
44 return malloc(size, M_DRM145, flags | M_ZERO0x0008);
45}
46
47static inline void
48kfree(const void *objp)
49{
50 free((void *)objp, M_DRM145, 0);
51}
52
53#endif