Bug Summary

File:dev/pci/drm/radeon/radeon_gem.c
Warning:line 174, column 3
Value stored to 'bo_va' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name radeon_gem.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/radeon/radeon_gem.c
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/pci.h>
30
31#include <drm/drm_debugfs.h>
32#include <drm/drm_device.h>
33#include <drm/drm_file.h>
34#include <drm/radeon_drm.h>
35
36#include "radeon.h"
37
38void radeon_gem_object_free(struct drm_gem_object *gobj)
39{
40 struct radeon_bo *robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
41
42 if (robj) {
43 radeon_mn_unregister(robj);
44 radeon_bo_unref(&robj);
45 }
46}
47
48int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
49 int alignment, int initial_domain,
50 u32 flags, bool_Bool kernel,
51 struct drm_gem_object **obj)
52{
53 struct radeon_bo *robj;
54 unsigned long max_size;
55 int r;
56
57 *obj = NULL((void *)0);
58 /* At least align on page size */
59 if (alignment < PAGE_SIZE(1 << 12)) {
60 alignment = PAGE_SIZE(1 << 12);
61 }
62
63 /* Maximum bo size is the unpinned gtt size since we use the gtt to
64 * handle vram to system pool migrations.
65 */
66 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
67 if (size > max_size) {
68 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",__drm_dbg(DRM_UT_CORE, "Allocation size %ldMb bigger than %ldMb limit\n"
, size >> 20, max_size >> 20)
69 size >> 20, max_size >> 20)__drm_dbg(DRM_UT_CORE, "Allocation size %ldMb bigger than %ldMb limit\n"
, size >> 20, max_size >> 20)
;
70 return -ENOMEM12;
71 }
72
73retry:
74 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
75 flags, NULL((void *)0), NULL((void *)0), &robj);
76 if (r) {
77 if (r != -ERESTARTSYS4) {
78 if (initial_domain == RADEON_GEM_DOMAIN_VRAM0x4) {
79 initial_domain |= RADEON_GEM_DOMAIN_GTT0x2;
80 goto retry;
81 }
82 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",__drm_err("Failed to allocate GEM object (%ld, %d, %u, %d)\n"
, size, initial_domain, alignment, r)
83 size, initial_domain, alignment, r)__drm_err("Failed to allocate GEM object (%ld, %d, %u, %d)\n"
, size, initial_domain, alignment, r)
;
84 }
85 return r;
86 }
87 *obj = &robj->tbo.base;
88#ifdef __linux__
89 robj->pid = task_pid_nr(current);
90#else
91 robj->pid = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p->ps_pid;
92#endif
93
94 mutex_lock(&rdev->gem.mutex)rw_enter_write(&rdev->gem.mutex);
95 list_add_tail(&robj->list, &rdev->gem.objects);
96 mutex_unlock(&rdev->gem.mutex)rw_exit_write(&rdev->gem.mutex);
97
98 return 0;
99}
100
101static int radeon_gem_set_domain(struct drm_gem_object *gobj,
102 uint32_t rdomain, uint32_t wdomain)
103{
104 struct radeon_bo *robj;
105 uint32_t domain;
106 long r;
107
108 /* FIXME: reeimplement */
109 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
110 /* work out where to validate the buffer to */
111 domain = wdomain;
112 if (!domain) {
113 domain = rdomain;
114 }
115 if (!domain) {
116 /* Do nothings */
117 pr_warn("Set domain without domain !\n")printk("\0014" "Set domain without domain !\n");
118 return 0;
119 }
120 if (domain == RADEON_GEM_DOMAIN_CPU0x1) {
121 /* Asking for cpu access wait for object idle */
122 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true1, true1, 30 * HZhz);
123 if (!r)
124 r = -EBUSY16;
125
126 if (r < 0 && r != -EINTR4) {
127 pr_err("Failed to wait for object: %li\n", r)printk("\0013" "Failed to wait for object: %li\n", r);
128 return r;
129 }
130 }
131 if (domain == RADEON_GEM_DOMAIN_VRAM0x4 && robj->prime_shared_count) {
132 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
133 return -EINVAL22;
134 }
135 return 0;
136}
137
138int radeon_gem_init(struct radeon_device *rdev)
139{
140 INIT_LIST_HEAD(&rdev->gem.objects);
141 return 0;
142}
143
144void radeon_gem_fini(struct radeon_device *rdev)
145{
146 radeon_bo_force_delete(rdev);
147}
148
149/*
150 * Call from drm_gem_handle_create which appear in both new and open ioctl
151 * case.
152 */
153int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
154{
155 struct radeon_bo *rbo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
156 struct radeon_device *rdev = rbo->rdev;
157 struct radeon_fpriv *fpriv = file_priv->driver_priv;
158 struct radeon_vm *vm = &fpriv->vm;
159 struct radeon_bo_va *bo_va;
160 int r;
161
162 if ((rdev->family < CHIP_CAYMAN) ||
163 (!rdev->accel_working)) {
164 return 0;
165 }
166
167 r = radeon_bo_reserve(rbo, false0);
168 if (r) {
169 return r;
170 }
171
172 bo_va = radeon_vm_bo_find(vm, rbo);
173 if (!bo_va) {
174 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
Value stored to 'bo_va' is never read
175 } else {
176 ++bo_va->ref_count;
177 }
178 radeon_bo_unreserve(rbo);
179
180 return 0;
181}
182
183void radeon_gem_object_close(struct drm_gem_object *obj,
184 struct drm_file *file_priv)
185{
186 struct radeon_bo *rbo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
187 struct radeon_device *rdev = rbo->rdev;
188 struct radeon_fpriv *fpriv = file_priv->driver_priv;
189 struct radeon_vm *vm = &fpriv->vm;
190 struct radeon_bo_va *bo_va;
191 int r;
192
193 if ((rdev->family < CHIP_CAYMAN) ||
194 (!rdev->accel_working)) {
195 return;
196 }
197
198 r = radeon_bo_reserve(rbo, true1);
199 if (r) {
200 dev_err(rdev->dev, "leaking bo va because "printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
201 "we fail to reserve bo (%d)\n", r)printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
202 return;
203 }
204 bo_va = radeon_vm_bo_find(vm, rbo);
205 if (bo_va) {
206 if (--bo_va->ref_count == 0) {
207 radeon_vm_bo_rmv(rdev, bo_va);
208 }
209 }
210 radeon_bo_unreserve(rbo);
211}
212
213static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
214{
215 if (r == -EDEADLK11) {
216 r = radeon_gpu_reset(rdev);
217 if (!r)
218 r = -EAGAIN35;
219 }
220 return r;
221}
222
223/*
224 * GEM ioctls.
225 */
226int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *filp)
228{
229 struct radeon_device *rdev = dev->dev_private;
230 struct drm_radeon_gem_info *args = data;
231 struct ttm_resource_manager *man;
232
233 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM2);
234
235 args->vram_size = (u64)man->size << PAGE_SHIFT12;
236 args->vram_visible = rdev->mc.visible_vram_size;
237 args->vram_visible -= rdev->vram_pin_size;
238 args->gart_size = rdev->mc.gtt_size;
239 args->gart_size -= rdev->gart_pin_size;
240
241 return 0;
242}
243
244int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
245 struct drm_file *filp)
246{
247 /* TODO: implement */
248 DRM_ERROR("unimplemented %s\n", __func__)__drm_err("unimplemented %s\n", __func__);
249 return -ENOSYS78;
250}
251
252int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *filp)
254{
255 /* TODO: implement */
256 DRM_ERROR("unimplemented %s\n", __func__)__drm_err("unimplemented %s\n", __func__);
257 return -ENOSYS78;
258}
259
260int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
261 struct drm_file *filp)
262{
263 struct radeon_device *rdev = dev->dev_private;
264 struct drm_radeon_gem_create *args = data;
265 struct drm_gem_object *gobj;
266 uint32_t handle;
267 int r;
268
269 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
270 /* create a gem object to contain this object in */
271 args->size = roundup(args->size, PAGE_SIZE)((((args->size)+(((1 << 12))-1))/((1 << 12)))*
((1 << 12)))
;
272 r = radeon_gem_object_create(rdev, args->size, args->alignment,
273 args->initial_domain, args->flags,
274 false0, &gobj);
275 if (r) {
276 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
277 r = radeon_gem_handle_lockup(rdev, r);
278 return r;
279 }
280 r = drm_gem_handle_create(filp, gobj, &handle);
281 /* drop reference from allocate - handle holds it now */
282 drm_gem_object_put(gobj);
283 if (r) {
284 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
285 r = radeon_gem_handle_lockup(rdev, r);
286 return r;
287 }
288 args->handle = handle;
289 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
290 return 0;
291}
292
293int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
294 struct drm_file *filp)
295{
296 return -ENOSYS78;
297#ifdef notyet
298 struct ttm_operation_ctx ctx = { true1, false0 };
299 struct radeon_device *rdev = dev->dev_private;
300 struct drm_radeon_gem_userptr *args = data;
301 struct drm_gem_object *gobj;
302 struct radeon_bo *bo;
303 uint32_t handle;
304 int r;
305
306 args->addr = untagged_addr(args->addr);
307
308 if (offset_in_page(args->addr | args->size)((vaddr_t)(args->addr | args->size) & ((1 << 12
) - 1))
)
309 return -EINVAL22;
310
311 /* reject unknown flag values */
312 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY(1 << 0) |
313 RADEON_GEM_USERPTR_ANONONLY(1 << 1) | RADEON_GEM_USERPTR_VALIDATE(1 << 2) |
314 RADEON_GEM_USERPTR_REGISTER(1 << 3)))
315 return -EINVAL22;
316
317 if (args->flags & RADEON_GEM_USERPTR_READONLY(1 << 0)) {
318 /* readonly pages not tested on older hardware */
319 if (rdev->family < CHIP_R600)
320 return -EINVAL22;
321
322 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY(1 << 1)) ||
323 !(args->flags & RADEON_GEM_USERPTR_REGISTER(1 << 3))) {
324
325 /* if we want to write to it we must require anonymous
326 memory and install a MMU notifier */
327 return -EACCES13;
328 }
329
330 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
331
332 /* create a gem object to contain this object in */
333 r = radeon_gem_object_create(rdev, args->size, 0,
334 RADEON_GEM_DOMAIN_CPU0x1, 0,
335 false0, &gobj);
336 if (r)
337 goto handle_lockup;
338
339 bo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
340 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
341 if (r)
342 goto release_object;
343
344 if (args->flags & RADEON_GEM_USERPTR_REGISTER(1 << 3)) {
345 r = radeon_mn_register(bo, args->addr);
346 if (r)
347 goto release_object;
348 }
349
350 if (args->flags & RADEON_GEM_USERPTR_VALIDATE(1 << 2)) {
351 mmap_read_lock(current->mm);
352 r = radeon_bo_reserve(bo, true1);
353 if (r) {
354 mmap_read_unlock(current->mm);
355 goto release_object;
356 }
357
358 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT0x2);
359 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
360 radeon_bo_unreserve(bo);
361 mmap_read_unlock(current->mm);
362 if (r)
363 goto release_object;
364 }
365
366 r = drm_gem_handle_create(filp, gobj, &handle);
367 /* drop reference from allocate - handle holds it now */
368 drm_gem_object_put(gobj);
369 if (r)
370 goto handle_lockup;
371
372 args->handle = handle;
373 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
374 return 0;
375
376release_object:
377 drm_gem_object_put(gobj);
378
379handle_lockup:
380 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
381 r = radeon_gem_handle_lockup(rdev, r);
382
383 return r;
384#endif
385}
386
387int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *filp)
389{
390 /* transition the BO to a domain -
391 * just validate the BO into a certain domain */
392 struct radeon_device *rdev = dev->dev_private;
393 struct drm_radeon_gem_set_domain *args = data;
394 struct drm_gem_object *gobj;
395 struct radeon_bo *robj;
396 int r;
397
398 /* for now if someone requests domain CPU -
399 * just make sure the buffer is finished with */
400 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
401
402 /* just do a BO wait for now */
403 gobj = drm_gem_object_lookup(filp, args->handle);
404 if (gobj == NULL((void *)0)) {
405 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
406 return -ENOENT2;
407 }
408 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
409
410 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
411
412 drm_gem_object_put(gobj);
413 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
414 r = radeon_gem_handle_lockup(robj->rdev, r);
415 return r;
416}
417
418int radeon_mode_dumb_mmap(struct drm_file *filp,
419 struct drm_device *dev,
420 uint32_t handle, uint64_t *offset_p)
421{
422 struct drm_gem_object *gobj;
423 struct radeon_bo *robj;
424
425 gobj = drm_gem_object_lookup(filp, handle);
426 if (gobj == NULL((void *)0)) {
427 return -ENOENT2;
428 }
429 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
430 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
431 drm_gem_object_put(gobj);
432 return -EPERM1;
433 }
434 *offset_p = radeon_bo_mmap_offset(robj);
435 drm_gem_object_put(gobj);
436 return 0;
437}
438
439int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
440 struct drm_file *filp)
441{
442 struct drm_radeon_gem_mmap *args = data;
443
444 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
445}
446
447int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *filp)
449{
450 struct drm_radeon_gem_busy *args = data;
451 struct drm_gem_object *gobj;
452 struct radeon_bo *robj;
453 int r;
454 uint32_t cur_placement = 0;
455
456 gobj = drm_gem_object_lookup(filp, args->handle);
457 if (gobj == NULL((void *)0)) {
458 return -ENOENT2;
459 }
460 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
461
462 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true1);
463 if (r == 0)
464 r = -EBUSY16;
465 else
466 r = 0;
467
468 cur_placement = READ_ONCE(robj->tbo.mem.mem_type)({ typeof(robj->tbo.mem.mem_type) __tmp = *(volatile typeof
(robj->tbo.mem.mem_type) *)&(robj->tbo.mem.mem_type
); membar_datadep_consumer(); __tmp; })
;
469 args->domain = radeon_mem_type_to_domain(cur_placement);
470 drm_gem_object_put(gobj);
471 return r;
472}
473
474int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *filp)
476{
477 struct radeon_device *rdev = dev->dev_private;
478 struct drm_radeon_gem_wait_idle *args = data;
479 struct drm_gem_object *gobj;
480 struct radeon_bo *robj;
481 int r = 0;
482 uint32_t cur_placement = 0;
483 long ret;
484
485 gobj = drm_gem_object_lookup(filp, args->handle);
486 if (gobj == NULL((void *)0)) {
487 return -ENOENT2;
488 }
489 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
490
491 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true1, true1, 30 * HZhz);
492 if (ret == 0)
493 r = -EBUSY16;
494 else if (ret < 0)
495 r = ret;
496
497 /* Flush HDP cache via MMIO if necessary */
498 cur_placement = READ_ONCE(robj->tbo.mem.mem_type)({ typeof(robj->tbo.mem.mem_type) __tmp = *(volatile typeof
(robj->tbo.mem.mem_type) *)&(robj->tbo.mem.mem_type
); membar_datadep_consumer(); __tmp; })
;
499 if (rdev->asic->mmio_hdp_flush &&
500 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM0x4)
501 robj->rdev->asic->mmio_hdp_flush(rdev);
502 drm_gem_object_put(gobj);
503 r = radeon_gem_handle_lockup(rdev, r);
504 return r;
505}
506
507int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
508 struct drm_file *filp)
509{
510 struct drm_radeon_gem_set_tiling *args = data;
511 struct drm_gem_object *gobj;
512 struct radeon_bo *robj;
513 int r = 0;
514
515 DRM_DEBUG("%d \n", args->handle)__drm_dbg(DRM_UT_CORE, "%d \n", args->handle);
516 gobj = drm_gem_object_lookup(filp, args->handle);
517 if (gobj == NULL((void *)0))
518 return -ENOENT2;
519 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
520 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
521 drm_gem_object_put(gobj);
522 return r;
523}
524
525int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *filp)
527{
528 struct drm_radeon_gem_get_tiling *args = data;
529 struct drm_gem_object *gobj;
530 struct radeon_bo *rbo;
531 int r = 0;
532
533 DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n");
534 gobj = drm_gem_object_lookup(filp, args->handle);
535 if (gobj == NULL((void *)0))
536 return -ENOENT2;
537 rbo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
538 r = radeon_bo_reserve(rbo, false0);
539 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0))
540 goto out;
541 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
542 radeon_bo_unreserve(rbo);
543out:
544 drm_gem_object_put(gobj);
545 return r;
546}
547
548/**
549 * radeon_gem_va_update_vm -update the bo_va in its VM
550 *
551 * @rdev: radeon_device pointer
552 * @bo_va: bo_va to update
553 *
554 * Update the bo_va directly after setting it's address. Errors are not
555 * vital here, so they are not reported back to userspace.
556 */
557static void radeon_gem_va_update_vm(struct radeon_device *rdev,
558 struct radeon_bo_va *bo_va)
559{
560 struct ttm_validate_buffer tv, *entry;
561 struct radeon_bo_list *vm_bos;
562 struct ww_acquire_ctx ticket;
563 struct list_head list;
564 unsigned domain;
565 int r;
566
567 INIT_LIST_HEAD(&list);
568
569 tv.bo = &bo_va->bo->tbo;
570 tv.num_shared = 1;
571 list_add(&tv.head, &list);
572
573 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
574 if (!vm_bos)
575 return;
576
577 r = ttm_eu_reserve_buffers(&ticket, &list, true1, NULL((void *)0));
578 if (r)
579 goto error_free;
580
581 list_for_each_entry(entry, &list, head)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->head
) *__mptr = ((&list)->next); (__typeof(*entry) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*entry), head) );}); &
entry->head != (&list); entry = ({ const __typeof( ((__typeof
(*entry) *)0)->head ) *__mptr = (entry->head.next); (__typeof
(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof(*entry
), head) );}))
{
582 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
583 /* if anything is swapped out don't swap it in here,
584 just abort and wait for the next CS */
585 if (domain == RADEON_GEM_DOMAIN_CPU0x1)
586 goto error_unreserve;
587 }
588
589 mutex_lock(&bo_va->vm->mutex)rw_enter_write(&bo_va->vm->mutex);
590 r = radeon_vm_clear_freed(rdev, bo_va->vm);
591 if (r)
592 goto error_unlock;
593
594 if (bo_va->it.start)
595 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
596
597error_unlock:
598 mutex_unlock(&bo_va->vm->mutex)rw_exit_write(&bo_va->vm->mutex);
599
600error_unreserve:
601 ttm_eu_backoff_reservation(&ticket, &list);
602
603error_free:
604 kvfree(vm_bos);
605
606 if (r && r != -ERESTARTSYS4)
607 DRM_ERROR("Couldn't update BO_VA (%d)\n", r)__drm_err("Couldn't update BO_VA (%d)\n", r);
608}
609
610int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
611 struct drm_file *filp)
612{
613 struct drm_radeon_gem_va *args = data;
614 struct drm_gem_object *gobj;
615 struct radeon_device *rdev = dev->dev_private;
616 struct radeon_fpriv *fpriv = filp->driver_priv;
617 struct radeon_bo *rbo;
618 struct radeon_bo_va *bo_va;
619 u32 invalid_flags;
620 int r = 0;
621
622 if (!rdev->vm_manager.enabled) {
623 args->operation = RADEON_VA_RESULT_ERROR1;
624 return -ENOTTY25;
625 }
626
627 /* !! DONT REMOVE !!
628 * We don't support vm_id yet, to be sure we don't have have broken
629 * userspace, reject anyone trying to use non 0 value thus moving
630 * forward we can use those fields without breaking existant userspace
631 */
632 if (args->vm_id) {
633 args->operation = RADEON_VA_RESULT_ERROR1;
634 return -EINVAL22;
635 }
636
637 if (args->offset < RADEON_VA_RESERVED_SIZE(8 << 20)) {
638 dev_err(&dev->pdev->dev,printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
639 "offset 0x%lX is in reserved area 0x%X\n",printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
640 (unsigned long)args->offset,printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
641 RADEON_VA_RESERVED_SIZE)printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
;
642 args->operation = RADEON_VA_RESULT_ERROR1;
643 return -EINVAL22;
644 }
645
646 /* don't remove, we need to enforce userspace to set the snooped flag
647 * otherwise we will endup with broken userspace and we won't be able
648 * to enable this feature without adding new interface
649 */
650 invalid_flags = RADEON_VM_PAGE_VALID(1 << 0) | RADEON_VM_PAGE_SYSTEM(1 << 3);
651 if ((args->flags & invalid_flags)) {
652 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",printf("drm:pid%d:%s *ERROR* " "invalid flags 0x%08X vs 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
flags, invalid_flags)
653 args->flags, invalid_flags)printf("drm:pid%d:%s *ERROR* " "invalid flags 0x%08X vs 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
flags, invalid_flags)
;
654 args->operation = RADEON_VA_RESULT_ERROR1;
655 return -EINVAL22;
656 }
657
658 switch (args->operation) {
659 case RADEON_VA_MAP1:
660 case RADEON_VA_UNMAP2:
661 break;
662 default:
663 dev_err(&dev->pdev->dev, "unsupported operation %d\n",printf("drm:pid%d:%s *ERROR* " "unsupported operation %d\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
operation)
664 args->operation)printf("drm:pid%d:%s *ERROR* " "unsupported operation %d\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
operation)
;
665 args->operation = RADEON_VA_RESULT_ERROR1;
666 return -EINVAL22;
667 }
668
669 gobj = drm_gem_object_lookup(filp, args->handle);
670 if (gobj == NULL((void *)0)) {
671 args->operation = RADEON_VA_RESULT_ERROR1;
672 return -ENOENT2;
673 }
674 rbo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
675 r = radeon_bo_reserve(rbo, false0);
676 if (r) {
677 args->operation = RADEON_VA_RESULT_ERROR1;
678 drm_gem_object_put(gobj);
679 return r;
680 }
681 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
682 if (!bo_va) {
683 args->operation = RADEON_VA_RESULT_ERROR1;
684 radeon_bo_unreserve(rbo);
685 drm_gem_object_put(gobj);
686 return -ENOENT2;
687 }
688
689 switch (args->operation) {
690 case RADEON_VA_MAP1:
691 if (bo_va->it.start) {
692 args->operation = RADEON_VA_RESULT_VA_EXIST2;
693 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE4096;
694 radeon_bo_unreserve(rbo);
695 goto out;
696 }
697 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
698 break;
699 case RADEON_VA_UNMAP2:
700 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
701 break;
702 default:
703 break;
704 }
705 if (!r)
706 radeon_gem_va_update_vm(rdev, bo_va);
707 args->operation = RADEON_VA_RESULT_OK0;
708 if (r) {
709 args->operation = RADEON_VA_RESULT_ERROR1;
710 }
711out:
712 drm_gem_object_put(gobj);
713 return r;
714}
715
716int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *filp)
718{
719 struct drm_radeon_gem_op *args = data;
720 struct drm_gem_object *gobj;
721 struct radeon_bo *robj;
722 int r;
723
724 gobj = drm_gem_object_lookup(filp, args->handle);
725 if (gobj == NULL((void *)0)) {
726 return -ENOENT2;
727 }
728 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
729
730 r = -EPERM1;
731 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
732 goto out;
733
734 r = radeon_bo_reserve(robj, false0);
735 if (unlikely(r)__builtin_expect(!!(r), 0))
736 goto out;
737
738 switch (args->op) {
739 case RADEON_GEM_OP_GET_INITIAL_DOMAIN0:
740 args->value = robj->initial_domain;
741 break;
742 case RADEON_GEM_OP_SET_INITIAL_DOMAIN1:
743 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM0x4 |
744 RADEON_GEM_DOMAIN_GTT0x2 |
745 RADEON_GEM_DOMAIN_CPU0x1);
746 break;
747 default:
748 r = -EINVAL22;
749 }
750
751 radeon_bo_unreserve(robj);
752out:
753 drm_gem_object_put(gobj);
754 return r;
755}
756
757int radeon_mode_dumb_create(struct drm_file *file_priv,
758 struct drm_device *dev,
759 struct drm_mode_create_dumb *args)
760{
761 struct radeon_device *rdev = dev->dev_private;
762 struct drm_gem_object *gobj;
763 uint32_t handle;
764 int r;
765
766 args->pitch = radeon_align_pitch(rdev, args->width,
767 DIV_ROUND_UP(args->bpp, 8)(((args->bpp) + ((8) - 1)) / (8)), 0);
768 args->size = args->pitch * args->height;
769 args->size = roundup2(args->size, PAGE_SIZE)(((args->size) + (((1 << 12)) - 1)) & (~((__typeof
(args->size))((1 << 12)) - 1)))
;
770
771 r = radeon_gem_object_create(rdev, args->size, 0,
772 RADEON_GEM_DOMAIN_VRAM0x4, 0,
773 false0, &gobj);
774 if (r)
775 return -ENOMEM12;
776
777 r = drm_gem_handle_create(file_priv, gobj, &handle);
778 /* drop reference from allocate - handle holds it now */
779 drm_gem_object_put(gobj);
780 if (r) {
781 return r;
782 }
783 args->handle = handle;
784 return 0;
785}
786
787#if defined(CONFIG_DEBUG_FS)
788static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
789{
790 struct drm_info_node *node = (struct drm_info_node *)m->private;
791 struct drm_device *dev = node->minor->dev;
792 struct radeon_device *rdev = dev->dev_private;
793 struct radeon_bo *rbo;
794 unsigned i = 0;
795
796 mutex_lock(&rdev->gem.mutex)rw_enter_write(&rdev->gem.mutex);
797 list_for_each_entry(rbo, &rdev->gem.objects, list)for (rbo = ({ const __typeof( ((__typeof(*rbo) *)0)->list )
*__mptr = ((&rdev->gem.objects)->next); (__typeof(
*rbo) *)( (char *)__mptr - __builtin_offsetof(__typeof(*rbo),
list) );}); &rbo->list != (&rdev->gem.objects)
; rbo = ({ const __typeof( ((__typeof(*rbo) *)0)->list ) *
__mptr = (rbo->list.next); (__typeof(*rbo) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*rbo), list) );}))
{
798 unsigned domain;
799 const char *placement;
800
801 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
802 switch (domain) {
803 case RADEON_GEM_DOMAIN_VRAM0x4:
804 placement = "VRAM";
805 break;
806 case RADEON_GEM_DOMAIN_GTT0x2:
807 placement = " GTT";
808 break;
809 case RADEON_GEM_DOMAIN_CPU0x1:
810 default:
811 placement = " CPU";
812 break;
813 }
814 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
815 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
816 placement, (unsigned long)rbo->pid);
817 i++;
818 }
819 mutex_unlock(&rdev->gem.mutex)rw_exit_write(&rdev->gem.mutex);
820 return 0;
821}
822
823static struct drm_info_list radeon_debugfs_gem_list[] = {
824 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL((void *)0)},
825};
826#endif
827
828int radeon_gem_debugfs_init(struct radeon_device *rdev)
829{
830#if defined(CONFIG_DEBUG_FS)
831 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
832#endif
833 return 0;
834}