Bug Summary

File:dev/pci/drm/i915/i915_query.c
Warning:line 58, column 17
The left operand of '!=' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name i915_query.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_query.c
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/nospec.h>
8
9#include "i915_drv.h"
10#include "i915_perf.h"
11#include "i915_query.h"
12#include <uapi/drm/i915_drm.h>
13
14static int copy_query_item(void *query_hdr, size_t query_sz,
15 u32 total_length,
16 struct drm_i915_query_item *query_item)
17{
18 if (query_item->length == 0)
6
Assuming field 'length' is equal to 0
7
Taking true branch
19 return total_length;
8
Returning without writing to 'query_hdr->flags'
20
21 if (query_item->length < total_length)
22 return -EINVAL22;
23
24 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr)),
25 query_sz))
26 return -EFAULT14;
27
28 return 0;
29}
30
31static int query_topology_info(struct drm_i915_privateinteldrm_softc *dev_priv,
32 struct drm_i915_query_item *query_item)
33{
34 const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu;
35 struct drm_i915_query_topology_info topo;
36 u32 slice_length, subslice_length, eu_length, total_length;
37 int ret;
38
39 if (query_item->flags != 0)
1
Assuming field 'flags' is equal to 0
2
Taking false branch
40 return -EINVAL22;
41
42 if (sseu->max_slices == 0)
3
Assuming field 'max_slices' is not equal to 0
4
Taking false branch
43 return -ENODEV19;
44
45 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask))extern char _ctassert[(!(sizeof(u8) != sizeof(sseu->slice_mask
))) ? 1 : -1 ] __attribute__((__unused__))
;
46
47 slice_length = sizeof(sseu->slice_mask);
48 subslice_length = sseu->max_slices * sseu->ss_stride;
49 eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
50 total_length = sizeof(topo) + slice_length + subslice_length +
51 eu_length;
52
53 ret = copy_query_item(&topo, sizeof(topo), total_length,
5
Calling 'copy_query_item'
9
Returning from 'copy_query_item'
54 query_item);
55 if (ret != 0)
10
Assuming 'ret' is equal to 0
11
Taking false branch
56 return ret;
57
58 if (topo.flags != 0)
12
The left operand of '!=' is a garbage value
59 return -EINVAL22;
60
61 memset(&topo, 0, sizeof(topo))__builtin_memset((&topo), (0), (sizeof(topo)));
62 topo.max_slices = sseu->max_slices;
63 topo.max_subslices = sseu->max_subslices;
64 topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
65
66 topo.subslice_offset = slice_length;
67 topo.subslice_stride = sseu->ss_stride;
68 topo.eu_offset = slice_length + subslice_length;
69 topo.eu_stride = sseu->eu_stride;
70
71 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr)),
72 &topo, sizeof(topo)))
73 return -EFAULT14;
74
75 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo))((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo))),
76 &sseu->slice_mask, slice_length))
77 return -EFAULT14;
78
79 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length))
80 sizeof(topo) + slice_length)((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length))
,
81 sseu->subslice_mask, subslice_length))
82 return -EFAULT14;
83
84 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
85 sizeof(topo) +((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
86 slice_length + subslice_length)((void *)(uintptr_t)(query_item->data_ptr + sizeof(topo) +
slice_length + subslice_length))
,
87 sseu->eu_mask, eu_length))
88 return -EFAULT14;
89
90 return total_length;
91}
92
93static int
94query_engine_info(struct drm_i915_privateinteldrm_softc *i915,
95 struct drm_i915_query_item *query_item)
96{
97 struct drm_i915_query_engine_info __user *query_ptr =
98 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
99 struct drm_i915_engine_info __user *info_ptr;
100 struct drm_i915_query_engine_info query;
101 struct drm_i915_engine_info info = { };
102 unsigned int num_uabi_engines = 0;
103 struct intel_engine_cs *engine;
104 int len, ret;
105
106 if (query_item->flags)
107 return -EINVAL22;
108
109 for_each_uabi_engine(engine, i915)for ((engine) = (linux_root_RB_MINMAX((struct linux_root *)(&
(i915)->uabi_engines), -1) ? ({ const __typeof( ((struct intel_engine_cs
*)0)->uabi_node ) *__mptr = (linux_root_RB_MINMAX((struct
linux_root *)(&(i915)->uabi_engines), -1)); (struct intel_engine_cs
*)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs
, uabi_node) );}) : ((void *)0)); (engine); (engine) = (linux_root_RB_NEXT
((&(engine)->uabi_node)) ? ({ const __typeof( ((struct
intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_NEXT
((&(engine)->uabi_node))); (struct intel_engine_cs *)(
(char *)__mptr - __builtin_offsetof(struct intel_engine_cs, uabi_node
) );}) : ((void *)0)))
110 num_uabi_engines++;
111
112 len = struct_size(query_ptr, engines, num_uabi_engines)(sizeof(*(query_ptr)) + ((num_uabi_engines) * (sizeof(*(query_ptr
)->engines))))
;
113
114 ret = copy_query_item(&query, sizeof(query), len, query_item);
115 if (ret != 0)
116 return ret;
117
118 if (query.num_engines || query.rsvd[0] || query.rsvd[1] ||
119 query.rsvd[2])
120 return -EINVAL22;
121
122 info_ptr = &query_ptr->engines[0];
123
124 for_each_uabi_engine(engine, i915)for ((engine) = (linux_root_RB_MINMAX((struct linux_root *)(&
(i915)->uabi_engines), -1) ? ({ const __typeof( ((struct intel_engine_cs
*)0)->uabi_node ) *__mptr = (linux_root_RB_MINMAX((struct
linux_root *)(&(i915)->uabi_engines), -1)); (struct intel_engine_cs
*)( (char *)__mptr - __builtin_offsetof(struct intel_engine_cs
, uabi_node) );}) : ((void *)0)); (engine); (engine) = (linux_root_RB_NEXT
((&(engine)->uabi_node)) ? ({ const __typeof( ((struct
intel_engine_cs *)0)->uabi_node ) *__mptr = (linux_root_RB_NEXT
((&(engine)->uabi_node))); (struct intel_engine_cs *)(
(char *)__mptr - __builtin_offsetof(struct intel_engine_cs, uabi_node
) );}) : ((void *)0)))
{
125 info.engine.engine_class = engine->uabi_class;
126 info.engine.engine_instance = engine->uabi_instance;
127 info.capabilities = engine->uabi_capabilities;
128
129 if (copy_to_user(info_ptr, &info, sizeof(info)))
130 return -EFAULT14;
131
132 query.num_engines++;
133 info_ptr++;
134 }
135
136 if (copy_to_user(query_ptr, &query, sizeof(query)))
137 return -EFAULT14;
138
139 return len;
140}
141
142static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
143 u64 user_regs_ptr,
144 u32 kernel_n_regs)
145{
146 /*
147 * We'll just put the number of registers, and won't copy the
148 * register.
149 */
150 if (user_n_regs == 0)
151 return 0;
152
153 if (user_n_regs < kernel_n_regs)
154 return -EINVAL22;
155
156 return 0;
157}
158
159static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
160 u32 kernel_n_regs,
161 u64 user_regs_ptr,
162 u32 *user_n_regs)
163{
164 u32 __user *p = u64_to_user_ptr(user_regs_ptr)((void *)(uintptr_t)(user_regs_ptr));
165 u32 r;
166
167 if (*user_n_regs == 0) {
168 *user_n_regs = kernel_n_regs;
169 return 0;
170 }
171
172 *user_n_regs = kernel_n_regs;
173
174 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)access_ok(p, 2 * sizeof(u32) * kernel_n_regs))
175 return -EFAULT14;
176
177 for (r = 0; r < kernel_n_regs; r++, p += 2) {
178 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),({ __typeof((i915_mmio_reg_offset(kernel_regs[r].addr))) __tmp
= (i915_mmio_reg_offset(kernel_regs[r].addr)); if (copyout(&
(__tmp), p, sizeof(__tmp)) != 0) goto Efault; })
179 p, Efault)({ __typeof((i915_mmio_reg_offset(kernel_regs[r].addr))) __tmp
= (i915_mmio_reg_offset(kernel_regs[r].addr)); if (copyout(&
(__tmp), p, sizeof(__tmp)) != 0) goto Efault; })
;
180 unsafe_put_user(kernel_regs[r].value, p + 1, Efault)({ __typeof((kernel_regs[r].value)) __tmp = (kernel_regs[r].value
); if (copyout(&(__tmp), p + 1, sizeof(__tmp)) != 0) goto
Efault; })
;
181 }
182 user_write_access_end();
183 return 0;
184Efault:
185 user_write_access_end();
186 return -EFAULT14;
187}
188
189static int query_perf_config_data(struct drm_i915_privateinteldrm_softc *i915,
190 struct drm_i915_query_item *query_item,
191 bool_Bool use_uuid)
192{
193 struct drm_i915_query_perf_config __user *user_query_config_ptr =
194 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
195 struct drm_i915_perf_oa_config __user *user_config_ptr =
196 u64_to_user_ptr(query_item->data_ptr +((void *)(uintptr_t)(query_item->data_ptr + sizeof(struct drm_i915_query_perf_config
)))
197 sizeof(struct drm_i915_query_perf_config))((void *)(uintptr_t)(query_item->data_ptr + sizeof(struct drm_i915_query_perf_config
)))
;
198 struct drm_i915_perf_oa_config user_config;
199 struct i915_perf *perf = &i915->perf;
200 struct i915_oa_config *oa_config;
201 char uuid[UUID_STRING_LEN36 + 1];
202 u64 config_id;
203 u32 flags, total_size;
204 int ret;
205
206 if (!perf->i915)
207 return -ENODEV19;
208
209 total_size =
210 sizeof(struct drm_i915_query_perf_config) +
211 sizeof(struct drm_i915_perf_oa_config);
212
213 if (query_item->length == 0)
214 return total_size;
215
216 if (query_item->length < total_size) {
217 DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",__drm_dbg(DRM_UT_CORE, "Invalid query config data item size=%u expected=%u\n"
, query_item->length, total_size)
218 query_item->length, total_size)__drm_dbg(DRM_UT_CORE, "Invalid query config data item size=%u expected=%u\n"
, query_item->length, total_size)
;
219 return -EINVAL22;
220 }
221
222 if (get_user(flags, &user_query_config_ptr->flags)-copyin(&user_query_config_ptr->flags, &(flags), sizeof
(flags))
)
223 return -EFAULT14;
224
225 if (flags != 0)
226 return -EINVAL22;
227
228 if (use_uuid) {
229 struct i915_oa_config *tmp;
230 int id;
231
232 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid))extern char _ctassert[(!(sizeof(user_query_config_ptr->uuid
) >= sizeof(uuid))) ? 1 : -1 ] __attribute__((__unused__))
;
233
234 memset(&uuid, 0, sizeof(uuid))__builtin_memset((&uuid), (0), (sizeof(uuid)));
235 if (copy_from_user(uuid, user_query_config_ptr->uuid,
236 sizeof(user_query_config_ptr->uuid)))
237 return -EFAULT14;
238
239 oa_config = NULL((void *)0);
240 rcu_read_lock();
241 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
{
242 if (!strcmp(tmp->uuid, uuid)) {
243 oa_config = i915_oa_config_get(tmp);
244 break;
245 }
246 }
247 rcu_read_unlock();
248 } else {
249 if (get_user(config_id, &user_query_config_ptr->config)-copyin(&user_query_config_ptr->config, &(config_id
), sizeof(config_id))
)
250 return -EFAULT14;
251
252 oa_config = i915_perf_get_oa_config(perf, config_id);
253 }
254 if (!oa_config)
255 return -ENOENT2;
256
257 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
258 ret = -EFAULT14;
259 goto out;
260 }
261
262 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
263 user_config.boolean_regs_ptr,
264 oa_config->b_counter_regs_len);
265 if (ret)
266 goto out;
267
268 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
269 user_config.flex_regs_ptr,
270 oa_config->flex_regs_len);
271 if (ret)
272 goto out;
273
274 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
275 user_config.mux_regs_ptr,
276 oa_config->mux_regs_len);
277 if (ret)
278 goto out;
279
280 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
281 oa_config->b_counter_regs_len,
282 user_config.boolean_regs_ptr,
283 &user_config.n_boolean_regs);
284 if (ret)
285 goto out;
286
287 ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
288 oa_config->flex_regs_len,
289 user_config.flex_regs_ptr,
290 &user_config.n_flex_regs);
291 if (ret)
292 goto out;
293
294 ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
295 oa_config->mux_regs_len,
296 user_config.mux_regs_ptr,
297 &user_config.n_mux_regs);
298 if (ret)
299 goto out;
300
301 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid))__builtin_memcpy((user_config.uuid), (oa_config->uuid), (sizeof
(user_config.uuid)))
;
302
303 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
304 ret = -EFAULT14;
305 goto out;
306 }
307
308 ret = total_size;
309
310out:
311 i915_oa_config_put(oa_config);
312 return ret;
313}
314
315static size_t sizeof_perf_config_list(size_t count)
316{
317 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
318}
319
320static size_t sizeof_perf_metrics(struct i915_perf *perf)
321{
322 struct i915_oa_config *tmp;
323 size_t i;
324 int id;
325
326 i = 1;
327 rcu_read_lock();
328 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
329 i++;
330 rcu_read_unlock();
331
332 return sizeof_perf_config_list(i);
333}
334
335static int query_perf_config_list(struct drm_i915_privateinteldrm_softc *i915,
336 struct drm_i915_query_item *query_item)
337{
338 struct drm_i915_query_perf_config __user *user_query_config_ptr =
339 u64_to_user_ptr(query_item->data_ptr)((void *)(uintptr_t)(query_item->data_ptr));
340 struct i915_perf *perf = &i915->perf;
341 u64 *oa_config_ids = NULL((void *)0);
342 int alloc, n_configs;
343 u32 flags;
344 int ret;
345
346 if (!perf->i915)
347 return -ENODEV19;
348
349 if (query_item->length == 0)
350 return sizeof_perf_metrics(perf);
351
352 if (get_user(flags, &user_query_config_ptr->flags)-copyin(&user_query_config_ptr->flags, &(flags), sizeof
(flags))
)
353 return -EFAULT14;
354
355 if (flags != 0)
356 return -EINVAL22;
357
358 n_configs = 1;
359 do {
360 struct i915_oa_config *tmp;
361 u64 *ids;
362 int id;
363
364#ifdef __linux__
365 ids = krealloc(oa_config_ids,
366 n_configs * sizeof(*oa_config_ids),
367 GFP_KERNEL(0x0001 | 0x0004));
368 if (!ids)
369 return -ENOMEM12;
370#else
371 ids = kmalloc(n_configs * sizeof(*oa_config_ids),
372 GFP_KERNEL(0x0001 | 0x0004));
373 if (!ids)
374 return -ENOMEM12;
375 if (n_configs > 1)
376 memcpy(ids, oa_config_ids,__builtin_memcpy((ids), (oa_config_ids), ((n_configs - 1) * sizeof
(*oa_config_ids)))
377 (n_configs - 1) * sizeof(*oa_config_ids))__builtin_memcpy((ids), (oa_config_ids), ((n_configs - 1) * sizeof
(*oa_config_ids)))
;
378 kfree(oa_config_ids);
379#endif
380
381 alloc = fetch_and_zero(&n_configs)({ typeof(*&n_configs) __T = *(&n_configs); *(&n_configs
) = (typeof(*&n_configs))0; __T; })
;
382
383 ids[n_configs++] = 1ull; /* reserved for test_config */
384 rcu_read_lock();
385 idr_for_each_entry(&perf->metrics_idr, tmp, id)for (id = 0; ((tmp) = idr_get_next(&perf->metrics_idr,
&(id))) != ((void *)0); id++)
{
386 if (n_configs < alloc)
387 ids[n_configs] = id;
388 n_configs++;
389 }
390 rcu_read_unlock();
391
392 oa_config_ids = ids;
393 } while (n_configs > alloc);
394
395 if (query_item->length < sizeof_perf_config_list(n_configs)) {
396 DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",__drm_dbg(DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
397 query_item->length,__drm_dbg(DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
398 sizeof_perf_config_list(n_configs))__drm_dbg(DRM_UT_CORE, "Invalid query config list item size=%u expected=%zu\n"
, query_item->length, sizeof_perf_config_list(n_configs))
;
399 kfree(oa_config_ids);
400 return -EINVAL22;
401 }
402
403 if (put_user(n_configs, &user_query_config_ptr->config)({ __typeof((n_configs)) __tmp = (n_configs); -copyout(&(
__tmp), &user_query_config_ptr->config, sizeof(__tmp))
; })
) {
404 kfree(oa_config_ids);
405 return -EFAULT14;
406 }
407
408 ret = copy_to_user(user_query_config_ptr + 1,
409 oa_config_ids,
410 n_configs * sizeof(*oa_config_ids));
411 kfree(oa_config_ids);
412 if (ret)
413 return -EFAULT14;
414
415 return sizeof_perf_config_list(n_configs);
416}
417
418static int query_perf_config(struct drm_i915_privateinteldrm_softc *i915,
419 struct drm_i915_query_item *query_item)
420{
421 switch (query_item->flags) {
422 case DRM_I915_QUERY_PERF_CONFIG_LIST1:
423 return query_perf_config_list(i915, query_item);
424 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID2:
425 return query_perf_config_data(i915, query_item, true1);
426 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID3:
427 return query_perf_config_data(i915, query_item, false0);
428 default:
429 return -EINVAL22;
430 }
431}
432
433static int (* const i915_query_funcs[])(struct drm_i915_privateinteldrm_softc *dev_priv,
434 struct drm_i915_query_item *query_item) = {
435 query_topology_info,
436 query_engine_info,
437 query_perf_config,
438};
439
440int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
441{
442 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev);
443 struct drm_i915_query *args = data;
444 struct drm_i915_query_item __user *user_item_ptr =
445 u64_to_user_ptr(args->items_ptr)((void *)(uintptr_t)(args->items_ptr));
446 u32 i;
447
448 if (args->flags != 0)
449 return -EINVAL22;
450
451 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
452 struct drm_i915_query_item item;
453 unsigned long func_idx;
454 int ret;
455
456 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
457 return -EFAULT14;
458
459 if (item.query_id == 0)
460 return -EINVAL22;
461
462 if (overflows_type(item.query_id - 1, unsigned long)(sizeof(item.query_id - 1) > sizeof(unsigned long) &&
(item.query_id - 1) >> (8 * sizeof(unsigned long)))
)
463 return -EINVAL22;
464
465 func_idx = item.query_id - 1;
466
467 ret = -EINVAL22;
468 if (func_idx < ARRAY_SIZE(i915_query_funcs)(sizeof((i915_query_funcs)) / sizeof((i915_query_funcs)[0]))) {
469 func_idx = array_index_nospec(func_idx,(func_idx)
470 ARRAY_SIZE(i915_query_funcs))(func_idx);
471 ret = i915_query_funcs[func_idx](dev_priv, &item);
472 }
473
474 /* Only write the length back to userspace if they differ. */
475 if (ret != item.length && put_user(ret, &user_item_ptr->length)({ __typeof((ret)) __tmp = (ret); -copyout(&(__tmp), &
user_item_ptr->length, sizeof(__tmp)); })
)
476 return -EFAULT14;
477 }
478
479 return 0;
480}