Bug Summary

File:dev/pci/drm/radeon/radeon_gem.c
Warning:line 294, column 3
Value stored to 'bo_va' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name radeon_gem.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/radeon/radeon_gem.c
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/iosys-map.h>
30#include <linux/pci.h>
31
32#include <drm/drm_device.h>
33#include <drm/drm_file.h>
34#include <drm/drm_gem_ttm_helper.h>
35#include <drm/radeon_drm.h>
36
37#include "radeon.h"
38#include "radeon_prime.h"
39
40struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 int flags);
42struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43int radeon_gem_prime_pin(struct drm_gem_object *obj);
44void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45
46const struct drm_gem_object_funcs radeon_gem_object_funcs;
47
48#ifdef __linux__
49static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
50{
51 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
52 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
53 vm_fault_t ret;
54
55 down_read(&rdev->pm.mclk_lock)rw_enter_read(&rdev->pm.mclk_lock);
56
57 ret = ttm_bo_vm_reserve(bo, vmf);
58 if (ret)
59 goto unlock_mclk;
60
61 ret = radeon_bo_fault_reserve_notify(bo);
62 if (ret)
63 goto unlock_resv;
64
65 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
66 TTM_BO_VM_NUM_PREFAULT16);
67 if (ret == VM_FAULT_RETRY3 && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
68 goto unlock_mclk;
69
70unlock_resv:
71 dma_resv_unlock(bo->base.resv);
72
73unlock_mclk:
74 up_read(&rdev->pm.mclk_lock)rw_exit_read(&rdev->pm.mclk_lock);
75 return ret;
76}
77
78static const struct vm_operations_struct radeon_gem_vm_ops = {
79 .fault = radeon_gem_fault,
80 .open = ttm_bo_vm_open,
81 .close = ttm_bo_vm_close,
82 .access = ttm_bo_vm_access
83};
84#else /* !__linux__ */
85int
86radeon_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
87 int npages, int centeridx, vm_fault_t fault_type,
88 vm_prot_t access_type, int flags)
89{
90 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
91 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
92 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
93 vm_fault_t ret;
94
95 down_read(&rdev->pm.mclk_lock)rw_enter_read(&rdev->pm.mclk_lock);
96
97 ret = ttm_bo_vm_reserve(bo);
98 if (ret)
99 goto unlock_mclk;
100
101 ret = radeon_bo_fault_reserve_notify(bo);
102 if (ret)
103 goto unlock_resv;
104
105 ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
106 TTM_BO_VM_NUM_PREFAULT16, 1);
107#ifdef notyet
108 if (ret == VM_FAULT_RETRY3 && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
109 goto unlock_mclk;
110#endif
111
112unlock_resv:
113 dma_resv_unlock(bo->base.resv);
114
115unlock_mclk:
116 switch (ret) {
117 case VM_FAULT_NOPAGE1:
118 ret = VM_PAGER_OK0;
119 break;
120 case VM_FAULT_RETRY3:
121 ret = VM_PAGER_REFAULT7;
122 break;
123 default:
124 ret = VM_PAGER_BAD1;
125 break;
126 }
127 up_read(&rdev->pm.mclk_lock)rw_exit_read(&rdev->pm.mclk_lock);
128 uvmfault_unlockall(ufi, NULL((void *)0), uobj);
129 return ret;
130}
131
132void
133radeon_gem_vm_reference(struct uvm_object *uobj)
134{
135 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
136
137 ttm_bo_get(bo);
138}
139
140void
141radeon_gem_vm_detach(struct uvm_object *uobj)
142{
143 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
144
145 ttm_bo_put(bo);
146}
147
148static const struct uvm_pagerops radeon_gem_vm_ops = {
149 .pgo_fault = radeon_gem_fault,
150 .pgo_reference = radeon_gem_vm_reference,
151 .pgo_detach = radeon_gem_vm_detach
152};
153#endif /* !__linux__ */
154
155static void radeon_gem_object_free(struct drm_gem_object *gobj)
156{
157 struct radeon_bo *robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
158
159 if (robj) {
160 radeon_mn_unregister(robj);
161 radeon_bo_unref(&robj);
162 }
163}
164
165int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
166 int alignment, int initial_domain,
167 u32 flags, bool_Bool kernel,
168 struct drm_gem_object **obj)
169{
170 struct radeon_bo *robj;
171 unsigned long max_size;
172 int r;
173
174 *obj = NULL((void *)0);
175 /* At least align on page size */
176 if (alignment < PAGE_SIZE(1 << 12)) {
177 alignment = PAGE_SIZE(1 << 12);
178 }
179
180 /* Maximum bo size is the unpinned gtt size since we use the gtt to
181 * handle vram to system pool migrations.
182 */
183 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
184 if (size > max_size) {
185 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",___drm_dbg(((void *)0), DRM_UT_CORE, "Allocation size %ldMb bigger than %ldMb limit\n"
, size >> 20, max_size >> 20)
186 size >> 20, max_size >> 20)___drm_dbg(((void *)0), DRM_UT_CORE, "Allocation size %ldMb bigger than %ldMb limit\n"
, size >> 20, max_size >> 20)
;
187 return -ENOMEM12;
188 }
189
190retry:
191 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
192 flags, NULL((void *)0), NULL((void *)0), &robj);
193 if (r) {
194 if (r != -ERESTARTSYS4) {
195 if (initial_domain == RADEON_GEM_DOMAIN_VRAM0x4) {
196 initial_domain |= RADEON_GEM_DOMAIN_GTT0x2;
197 goto retry;
198 }
199 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",__drm_err("Failed to allocate GEM object (%ld, %d, %u, %d)\n"
, size, initial_domain, alignment, r)
200 size, initial_domain, alignment, r)__drm_err("Failed to allocate GEM object (%ld, %d, %u, %d)\n"
, size, initial_domain, alignment, r)
;
201 }
202 return r;
203 }
204 *obj = &robj->tbo.base;
205 (*obj)->funcs = &radeon_gem_object_funcs;
206#ifdef __linux__
207 robj->pid = task_pid_nr(current);
208#else
209 robj->pid = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p->ps_pid;
210#endif
211
212 mutex_lock(&rdev->gem.mutex)rw_enter_write(&rdev->gem.mutex);
213 list_add_tail(&robj->list, &rdev->gem.objects);
214 mutex_unlock(&rdev->gem.mutex)rw_exit_write(&rdev->gem.mutex);
215
216 return 0;
217}
218
219static int radeon_gem_set_domain(struct drm_gem_object *gobj,
220 uint32_t rdomain, uint32_t wdomain)
221{
222 struct radeon_bo *robj;
223 uint32_t domain;
224 long r;
225
226 /* FIXME: reeimplement */
227 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
228 /* work out where to validate the buffer to */
229 domain = wdomain;
230 if (!domain) {
231 domain = rdomain;
232 }
233 if (!domain) {
234 /* Do nothings */
235 pr_warn("Set domain without domain !\n")printk("\0014" "Set domain without domain !\n");
236 return 0;
237 }
238 if (domain == RADEON_GEM_DOMAIN_CPU0x1) {
239 /* Asking for cpu access wait for object idle */
240 r = dma_resv_wait_timeout(robj->tbo.base.resv,
241 DMA_RESV_USAGE_BOOKKEEP,
242 true1, 30 * HZhz);
243 if (!r)
244 r = -EBUSY16;
245
246 if (r < 0 && r != -EINTR4) {
247 pr_err("Failed to wait for object: %li\n", r)printk("\0013" "Failed to wait for object: %li\n", r);
248 return r;
249 }
250 }
251 if (domain == RADEON_GEM_DOMAIN_VRAM0x4 && robj->prime_shared_count) {
252 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
253 return -EINVAL22;
254 }
255 return 0;
256}
257
258int radeon_gem_init(struct radeon_device *rdev)
259{
260 INIT_LIST_HEAD(&rdev->gem.objects);
261 return 0;
262}
263
264void radeon_gem_fini(struct radeon_device *rdev)
265{
266 radeon_bo_force_delete(rdev);
267}
268
269/*
270 * Call from drm_gem_handle_create which appear in both new and open ioctl
271 * case.
272 */
273static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
274{
275 struct radeon_bo *rbo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
276 struct radeon_device *rdev = rbo->rdev;
277 struct radeon_fpriv *fpriv = file_priv->driver_priv;
278 struct radeon_vm *vm = &fpriv->vm;
279 struct radeon_bo_va *bo_va;
280 int r;
281
282 if ((rdev->family < CHIP_CAYMAN) ||
283 (!rdev->accel_working)) {
284 return 0;
285 }
286
287 r = radeon_bo_reserve(rbo, false0);
288 if (r) {
289 return r;
290 }
291
292 bo_va = radeon_vm_bo_find(vm, rbo);
293 if (!bo_va) {
294 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
Value stored to 'bo_va' is never read
295 } else {
296 ++bo_va->ref_count;
297 }
298 radeon_bo_unreserve(rbo);
299
300 return 0;
301}
302
303static void radeon_gem_object_close(struct drm_gem_object *obj,
304 struct drm_file *file_priv)
305{
306 struct radeon_bo *rbo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
307 struct radeon_device *rdev = rbo->rdev;
308 struct radeon_fpriv *fpriv = file_priv->driver_priv;
309 struct radeon_vm *vm = &fpriv->vm;
310 struct radeon_bo_va *bo_va;
311 int r;
312
313 if ((rdev->family < CHIP_CAYMAN) ||
314 (!rdev->accel_working)) {
315 return;
316 }
317
318 r = radeon_bo_reserve(rbo, true1);
319 if (r) {
320 dev_err(rdev->dev, "leaking bo va because "printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
321 "we fail to reserve bo (%d)\n", r)printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
322 return;
323 }
324 bo_va = radeon_vm_bo_find(vm, rbo);
325 if (bo_va) {
326 if (--bo_va->ref_count == 0) {
327 radeon_vm_bo_rmv(rdev, bo_va);
328 }
329 }
330 radeon_bo_unreserve(rbo);
331}
332
333static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
334{
335 if (r == -EDEADLK11) {
336 r = radeon_gpu_reset(rdev);
337 if (!r)
338 r = -EAGAIN35;
339 }
340 return r;
341}
342
343#ifdef __linux__
344static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
345{
346 struct radeon_bo *bo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
347 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
348
349 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
350 return -EPERM1;
351
352 return drm_gem_ttm_mmap(obj, vma);
353}
354#else
355static int
356radeon_gem_object_mmap(struct drm_gem_object *obj,
357 vm_prot_t accessprot, voff_t off, vsize_t size)
358{
359 struct radeon_bo *bo = gem_to_radeon_bo(obj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((obj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
360 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
361
362 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
363 return -EPERM1;
364
365 return drm_gem_ttm_mmap(obj, accessprot, off, size);
366}
367#endif
368
369const struct drm_gem_object_funcs radeon_gem_object_funcs = {
370 .free = radeon_gem_object_free,
371 .open = radeon_gem_object_open,
372 .close = radeon_gem_object_close,
373 .export = radeon_gem_prime_export,
374 .pin = radeon_gem_prime_pin,
375 .unpin = radeon_gem_prime_unpin,
376 .get_sg_table = radeon_gem_prime_get_sg_table,
377 .vmap = drm_gem_ttm_vmap,
378 .vunmap = drm_gem_ttm_vunmap,
379 .mmap = radeon_gem_object_mmap,
380 .vm_ops = &radeon_gem_vm_ops,
381};
382
383/*
384 * GEM ioctls.
385 */
386int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
387 struct drm_file *filp)
388{
389 struct radeon_device *rdev = dev->dev_private;
390 struct drm_radeon_gem_info *args = data;
391 struct ttm_resource_manager *man;
392
393 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM2);
394
395 args->vram_size = (u64)man->size << PAGE_SHIFT12;
396 args->vram_visible = rdev->mc.visible_vram_size;
397 args->vram_visible -= rdev->vram_pin_size;
398 args->gart_size = rdev->mc.gtt_size;
399 args->gart_size -= rdev->gart_pin_size;
400
401 return 0;
402}
403
404int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
405 struct drm_file *filp)
406{
407 /* TODO: implement */
408 DRM_ERROR("unimplemented %s\n", __func__)__drm_err("unimplemented %s\n", __func__);
409 return -ENOSYS78;
410}
411
412int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *filp)
414{
415 /* TODO: implement */
416 DRM_ERROR("unimplemented %s\n", __func__)__drm_err("unimplemented %s\n", __func__);
417 return -ENOSYS78;
418}
419
420int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
421 struct drm_file *filp)
422{
423 struct radeon_device *rdev = dev->dev_private;
424 struct drm_radeon_gem_create *args = data;
425 struct drm_gem_object *gobj;
426 uint32_t handle;
427 int r;
428
429 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
430 /* create a gem object to contain this object in */
431 args->size = roundup(args->size, PAGE_SIZE)((((args->size)+(((1 << 12))-1))/((1 << 12)))*
((1 << 12)))
;
432 r = radeon_gem_object_create(rdev, args->size, args->alignment,
433 args->initial_domain, args->flags,
434 false0, &gobj);
435 if (r) {
436 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
437 r = radeon_gem_handle_lockup(rdev, r);
438 return r;
439 }
440 r = drm_gem_handle_create(filp, gobj, &handle);
441 /* drop reference from allocate - handle holds it now */
442 drm_gem_object_put(gobj);
443 if (r) {
444 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
445 r = radeon_gem_handle_lockup(rdev, r);
446 return r;
447 }
448 args->handle = handle;
449 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
450 return 0;
451}
452
453int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
454 struct drm_file *filp)
455{
456 return -ENOSYS78;
457#ifdef notyet
458 struct ttm_operation_ctx ctx = { true1, false0 };
459 struct radeon_device *rdev = dev->dev_private;
460 struct drm_radeon_gem_userptr *args = data;
461 struct drm_gem_object *gobj;
462 struct radeon_bo *bo;
463 uint32_t handle;
464 int r;
465
466 args->addr = untagged_addr(args->addr);
467
468 if (offset_in_page(args->addr | args->size)((vaddr_t)(args->addr | args->size) & ((1 << 12
) - 1))
)
469 return -EINVAL22;
470
471 /* reject unknown flag values */
472 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY(1 << 0) |
473 RADEON_GEM_USERPTR_ANONONLY(1 << 1) | RADEON_GEM_USERPTR_VALIDATE(1 << 2) |
474 RADEON_GEM_USERPTR_REGISTER(1 << 3)))
475 return -EINVAL22;
476
477 if (args->flags & RADEON_GEM_USERPTR_READONLY(1 << 0)) {
478 /* readonly pages not tested on older hardware */
479 if (rdev->family < CHIP_R600)
480 return -EINVAL22;
481
482 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY(1 << 1)) ||
483 !(args->flags & RADEON_GEM_USERPTR_REGISTER(1 << 3))) {
484
485 /* if we want to write to it we must require anonymous
486 memory and install a MMU notifier */
487 return -EACCES13;
488 }
489
490 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
491
492 /* create a gem object to contain this object in */
493 r = radeon_gem_object_create(rdev, args->size, 0,
494 RADEON_GEM_DOMAIN_CPU0x1, 0,
495 false0, &gobj);
496 if (r)
497 goto handle_lockup;
498
499 bo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
500 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
501 if (r)
502 goto release_object;
503
504 if (args->flags & RADEON_GEM_USERPTR_REGISTER(1 << 3)) {
505 r = radeon_mn_register(bo, args->addr);
506 if (r)
507 goto release_object;
508 }
509
510 if (args->flags & RADEON_GEM_USERPTR_VALIDATE(1 << 2)) {
511 mmap_read_lock(current->mm);
512 r = radeon_bo_reserve(bo, true1);
513 if (r) {
514 mmap_read_unlock(current->mm);
515 goto release_object;
516 }
517
518 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT0x2);
519 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
520 radeon_bo_unreserve(bo);
521 mmap_read_unlock(current->mm);
522 if (r)
523 goto release_object;
524 }
525
526 r = drm_gem_handle_create(filp, gobj, &handle);
527 /* drop reference from allocate - handle holds it now */
528 drm_gem_object_put(gobj);
529 if (r)
530 goto handle_lockup;
531
532 args->handle = handle;
533 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
534 return 0;
535
536release_object:
537 drm_gem_object_put(gobj);
538
539handle_lockup:
540 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
541 r = radeon_gem_handle_lockup(rdev, r);
542
543 return r;
544#endif
545}
546
547int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *filp)
549{
550 /* transition the BO to a domain -
551 * just validate the BO into a certain domain */
552 struct radeon_device *rdev = dev->dev_private;
553 struct drm_radeon_gem_set_domain *args = data;
554 struct drm_gem_object *gobj;
555 int r;
556
557 /* for now if someone requests domain CPU -
558 * just make sure the buffer is finished with */
559 down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock);
560
561 /* just do a BO wait for now */
562 gobj = drm_gem_object_lookup(filp, args->handle);
563 if (gobj == NULL((void *)0)) {
564 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
565 return -ENOENT2;
566 }
567
568 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
569
570 drm_gem_object_put(gobj);
571 up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock);
572 r = radeon_gem_handle_lockup(rdev, r);
573 return r;
574}
575
576int radeon_mode_dumb_mmap(struct drm_file *filp,
577 struct drm_device *dev,
578 uint32_t handle, uint64_t *offset_p)
579{
580 struct drm_gem_object *gobj;
581 struct radeon_bo *robj;
582
583 gobj = drm_gem_object_lookup(filp, handle);
584 if (gobj == NULL((void *)0)) {
585 return -ENOENT2;
586 }
587 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
588 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
589 drm_gem_object_put(gobj);
590 return -EPERM1;
591 }
592 *offset_p = radeon_bo_mmap_offset(robj);
593 drm_gem_object_put(gobj);
594 return 0;
595}
596
597int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
598 struct drm_file *filp)
599{
600 struct drm_radeon_gem_mmap *args = data;
601
602 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
603}
604
605int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
606 struct drm_file *filp)
607{
608 struct drm_radeon_gem_busy *args = data;
609 struct drm_gem_object *gobj;
610 struct radeon_bo *robj;
611 int r;
612 uint32_t cur_placement = 0;
613
614 gobj = drm_gem_object_lookup(filp, args->handle);
615 if (gobj == NULL((void *)0)) {
616 return -ENOENT2;
617 }
618 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
619
620 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
621 if (r == 0)
622 r = -EBUSY16;
623 else
624 r = 0;
625
626 cur_placement = READ_ONCE(robj->tbo.resource->mem_type)({ typeof(robj->tbo.resource->mem_type) __tmp = *(volatile
typeof(robj->tbo.resource->mem_type) *)&(robj->
tbo.resource->mem_type); membar_datadep_consumer(); __tmp;
})
;
627 args->domain = radeon_mem_type_to_domain(cur_placement);
628 drm_gem_object_put(gobj);
629 return r;
630}
631
632int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
633 struct drm_file *filp)
634{
635 struct radeon_device *rdev = dev->dev_private;
636 struct drm_radeon_gem_wait_idle *args = data;
637 struct drm_gem_object *gobj;
638 struct radeon_bo *robj;
639 int r = 0;
640 uint32_t cur_placement = 0;
641 long ret;
642
643 gobj = drm_gem_object_lookup(filp, args->handle);
644 if (gobj == NULL((void *)0)) {
645 return -ENOENT2;
646 }
647 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
648
649 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
650 true1, 30 * HZhz);
651 if (ret == 0)
652 r = -EBUSY16;
653 else if (ret < 0)
654 r = ret;
655
656 /* Flush HDP cache via MMIO if necessary */
657 cur_placement = READ_ONCE(robj->tbo.resource->mem_type)({ typeof(robj->tbo.resource->mem_type) __tmp = *(volatile
typeof(robj->tbo.resource->mem_type) *)&(robj->
tbo.resource->mem_type); membar_datadep_consumer(); __tmp;
})
;
658 if (rdev->asic->mmio_hdp_flush &&
659 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM0x4)
660 robj->rdev->asic->mmio_hdp_flush(rdev);
661 drm_gem_object_put(gobj);
662 r = radeon_gem_handle_lockup(rdev, r);
663 return r;
664}
665
666int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *filp)
668{
669 struct drm_radeon_gem_set_tiling *args = data;
670 struct drm_gem_object *gobj;
671 struct radeon_bo *robj;
672 int r = 0;
673
674 DRM_DEBUG("%d \n", args->handle)___drm_dbg(((void *)0), DRM_UT_CORE, "%d \n", args->handle
)
;
675 gobj = drm_gem_object_lookup(filp, args->handle);
676 if (gobj == NULL((void *)0))
677 return -ENOENT2;
678 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
679 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
680 drm_gem_object_put(gobj);
681 return r;
682}
683
684int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
685 struct drm_file *filp)
686{
687 struct drm_radeon_gem_get_tiling *args = data;
688 struct drm_gem_object *gobj;
689 struct radeon_bo *rbo;
690 int r = 0;
691
692 DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n");
693 gobj = drm_gem_object_lookup(filp, args->handle);
694 if (gobj == NULL((void *)0))
695 return -ENOENT2;
696 rbo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
697 r = radeon_bo_reserve(rbo, false0);
698 if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0))
699 goto out;
700 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
701 radeon_bo_unreserve(rbo);
702out:
703 drm_gem_object_put(gobj);
704 return r;
705}
706
707/**
708 * radeon_gem_va_update_vm -update the bo_va in its VM
709 *
710 * @rdev: radeon_device pointer
711 * @bo_va: bo_va to update
712 *
713 * Update the bo_va directly after setting it's address. Errors are not
714 * vital here, so they are not reported back to userspace.
715 */
716static void radeon_gem_va_update_vm(struct radeon_device *rdev,
717 struct radeon_bo_va *bo_va)
718{
719 struct ttm_validate_buffer tv, *entry;
720 struct radeon_bo_list *vm_bos;
721 struct ww_acquire_ctx ticket;
722 struct list_head list;
723 unsigned domain;
724 int r;
725
726 INIT_LIST_HEAD(&list);
727
728 tv.bo = &bo_va->bo->tbo;
729 tv.num_shared = 1;
730 list_add(&tv.head, &list);
731
732 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
733 if (!vm_bos)
734 return;
735
736 r = ttm_eu_reserve_buffers(&ticket, &list, true1, NULL((void *)0));
737 if (r)
738 goto error_free;
739
740 list_for_each_entry(entry, &list, head)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->head
) *__mptr = ((&list)->next); (__typeof(*entry) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*entry), head) );}); &
entry->head != (&list); entry = ({ const __typeof( ((__typeof
(*entry) *)0)->head ) *__mptr = (entry->head.next); (__typeof
(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof(*entry
), head) );}))
{
741 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
742 /* if anything is swapped out don't swap it in here,
743 just abort and wait for the next CS */
744 if (domain == RADEON_GEM_DOMAIN_CPU0x1)
745 goto error_unreserve;
746 }
747
748 mutex_lock(&bo_va->vm->mutex)rw_enter_write(&bo_va->vm->mutex);
749 r = radeon_vm_clear_freed(rdev, bo_va->vm);
750 if (r)
751 goto error_unlock;
752
753 if (bo_va->it.start)
754 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
755
756error_unlock:
757 mutex_unlock(&bo_va->vm->mutex)rw_exit_write(&bo_va->vm->mutex);
758
759error_unreserve:
760 ttm_eu_backoff_reservation(&ticket, &list);
761
762error_free:
763 kvfree(vm_bos);
764
765 if (r && r != -ERESTARTSYS4)
766 DRM_ERROR("Couldn't update BO_VA (%d)\n", r)__drm_err("Couldn't update BO_VA (%d)\n", r);
767}
768
769int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
770 struct drm_file *filp)
771{
772 struct drm_radeon_gem_va *args = data;
773 struct drm_gem_object *gobj;
774 struct radeon_device *rdev = dev->dev_private;
775 struct radeon_fpriv *fpriv = filp->driver_priv;
776 struct radeon_bo *rbo;
777 struct radeon_bo_va *bo_va;
778 u32 invalid_flags;
779 int r = 0;
780
781 if (!rdev->vm_manager.enabled) {
782 args->operation = RADEON_VA_RESULT_ERROR1;
783 return -ENOTTY25;
784 }
785
786 /* !! DONT REMOVE !!
787 * We don't support vm_id yet, to be sure we don't have broken
788 * userspace, reject anyone trying to use non 0 value thus moving
789 * forward we can use those fields without breaking existant userspace
790 */
791 if (args->vm_id) {
792 args->operation = RADEON_VA_RESULT_ERROR1;
793 return -EINVAL22;
794 }
795
796 if (args->offset < RADEON_VA_RESERVED_SIZE(8 << 20)) {
797 dev_err(dev->dev,printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
798 "offset 0x%lX is in reserved area 0x%X\n",printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
799 (unsigned long)args->offset,printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
800 RADEON_VA_RESERVED_SIZE)printf("drm:pid%d:%s *ERROR* " "offset 0x%lX is in reserved area 0x%X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , (unsigned
long)args->offset, (8 << 20))
;
801 args->operation = RADEON_VA_RESULT_ERROR1;
802 return -EINVAL22;
803 }
804
805 /* don't remove, we need to enforce userspace to set the snooped flag
806 * otherwise we will endup with broken userspace and we won't be able
807 * to enable this feature without adding new interface
808 */
809 invalid_flags = RADEON_VM_PAGE_VALID(1 << 0) | RADEON_VM_PAGE_SYSTEM(1 << 3);
810 if ((args->flags & invalid_flags)) {
811 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",printf("drm:pid%d:%s *ERROR* " "invalid flags 0x%08X vs 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
flags, invalid_flags)
812 args->flags, invalid_flags)printf("drm:pid%d:%s *ERROR* " "invalid flags 0x%08X vs 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
flags, invalid_flags)
;
813 args->operation = RADEON_VA_RESULT_ERROR1;
814 return -EINVAL22;
815 }
816
817 switch (args->operation) {
818 case RADEON_VA_MAP1:
819 case RADEON_VA_UNMAP2:
820 break;
821 default:
822 dev_err(dev->dev, "unsupported operation %d\n",printf("drm:pid%d:%s *ERROR* " "unsupported operation %d\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
operation)
823 args->operation)printf("drm:pid%d:%s *ERROR* " "unsupported operation %d\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , args->
operation)
;
824 args->operation = RADEON_VA_RESULT_ERROR1;
825 return -EINVAL22;
826 }
827
828 gobj = drm_gem_object_lookup(filp, args->handle);
829 if (gobj == NULL((void *)0)) {
830 args->operation = RADEON_VA_RESULT_ERROR1;
831 return -ENOENT2;
832 }
833 rbo = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
834 r = radeon_bo_reserve(rbo, false0);
835 if (r) {
836 args->operation = RADEON_VA_RESULT_ERROR1;
837 drm_gem_object_put(gobj);
838 return r;
839 }
840 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
841 if (!bo_va) {
842 args->operation = RADEON_VA_RESULT_ERROR1;
843 radeon_bo_unreserve(rbo);
844 drm_gem_object_put(gobj);
845 return -ENOENT2;
846 }
847
848 switch (args->operation) {
849 case RADEON_VA_MAP1:
850 if (bo_va->it.start) {
851 args->operation = RADEON_VA_RESULT_VA_EXIST2;
852 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE4096;
853 radeon_bo_unreserve(rbo);
854 goto out;
855 }
856 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
857 break;
858 case RADEON_VA_UNMAP2:
859 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
860 break;
861 default:
862 break;
863 }
864 if (!r)
865 radeon_gem_va_update_vm(rdev, bo_va);
866 args->operation = RADEON_VA_RESULT_OK0;
867 if (r) {
868 args->operation = RADEON_VA_RESULT_ERROR1;
869 }
870out:
871 drm_gem_object_put(gobj);
872 return r;
873}
874
875int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *filp)
877{
878 struct drm_radeon_gem_op *args = data;
879 struct drm_gem_object *gobj;
880 struct radeon_bo *robj;
881 int r;
882
883 gobj = drm_gem_object_lookup(filp, args->handle);
884 if (gobj == NULL((void *)0)) {
885 return -ENOENT2;
886 }
887 robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
;
888
889 r = -EPERM1;
890 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
891 goto out;
892
893 r = radeon_bo_reserve(robj, false0);
894 if (unlikely(r)__builtin_expect(!!(r), 0))
895 goto out;
896
897 switch (args->op) {
898 case RADEON_GEM_OP_GET_INITIAL_DOMAIN0:
899 args->value = robj->initial_domain;
900 break;
901 case RADEON_GEM_OP_SET_INITIAL_DOMAIN1:
902 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM0x4 |
903 RADEON_GEM_DOMAIN_GTT0x2 |
904 RADEON_GEM_DOMAIN_CPU0x1);
905 break;
906 default:
907 r = -EINVAL22;
908 }
909
910 radeon_bo_unreserve(robj);
911out:
912 drm_gem_object_put(gobj);
913 return r;
914}
915
916int radeon_mode_dumb_create(struct drm_file *file_priv,
917 struct drm_device *dev,
918 struct drm_mode_create_dumb *args)
919{
920 struct radeon_device *rdev = dev->dev_private;
921 struct drm_gem_object *gobj;
922 uint32_t handle;
923 int r;
924
925 args->pitch = radeon_align_pitch(rdev, args->width,
926 DIV_ROUND_UP(args->bpp, 8)(((args->bpp) + ((8) - 1)) / (8)), 0);
927 args->size = (u64)args->pitch * args->height;
928 args->size = roundup2(args->size, PAGE_SIZE)(((args->size) + (((1 << 12)) - 1)) & (~((__typeof
(args->size))((1 << 12)) - 1)))
;
929
930 r = radeon_gem_object_create(rdev, args->size, 0,
931 RADEON_GEM_DOMAIN_VRAM0x4, 0,
932 false0, &gobj);
933 if (r)
934 return -ENOMEM12;
935
936 r = drm_gem_handle_create(file_priv, gobj, &handle);
937 /* drop reference from allocate - handle holds it now */
938 drm_gem_object_put(gobj);
939 if (r) {
940 return r;
941 }
942 args->handle = handle;
943 return 0;
944}
945
946#if defined(CONFIG_DEBUG_FS)
947static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
948{
949 struct radeon_device *rdev = (struct radeon_device *)m->private;
950 struct radeon_bo *rbo;
951 unsigned i = 0;
952
953 mutex_lock(&rdev->gem.mutex)rw_enter_write(&rdev->gem.mutex);
954 list_for_each_entry(rbo, &rdev->gem.objects, list)for (rbo = ({ const __typeof( ((__typeof(*rbo) *)0)->list )
*__mptr = ((&rdev->gem.objects)->next); (__typeof(
*rbo) *)( (char *)__mptr - __builtin_offsetof(__typeof(*rbo),
list) );}); &rbo->list != (&rdev->gem.objects)
; rbo = ({ const __typeof( ((__typeof(*rbo) *)0)->list ) *
__mptr = (rbo->list.next); (__typeof(*rbo) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*rbo), list) );}))
{
955 unsigned domain;
956 const char *placement;
957
958 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
959 switch (domain) {
960 case RADEON_GEM_DOMAIN_VRAM0x4:
961 placement = "VRAM";
962 break;
963 case RADEON_GEM_DOMAIN_GTT0x2:
964 placement = " GTT";
965 break;
966 case RADEON_GEM_DOMAIN_CPU0x1:
967 default:
968 placement = " CPU";
969 break;
970 }
971 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
972 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
973 placement, (unsigned long)rbo->pid);
974 i++;
975 }
976 mutex_unlock(&rdev->gem.mutex)rw_exit_write(&rdev->gem.mutex);
977 return 0;
978}
979
980DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
981#endif
982
983void radeon_gem_debugfs_init(struct radeon_device *rdev)
984{
985#if defined(CONFIG_DEBUG_FS)
986 struct dentry *root = rdev->ddev->primary->debugfs_root;
987
988 debugfs_create_file("radeon_gem_info", 0444, root, rdev,ERR_PTR(-78)
989 &radeon_debugfs_gem_info_fops)ERR_PTR(-78);
990
991#endif
992}