File: | dev/pci/drm/amd/amdgpu/amdgpu_gem.c |
Warning: | line 140, column 3 Value stored to 'bo_va' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #include <linux/ktime.h> |
29 | #include <linux/module.h> |
30 | #include <linux/pagemap.h> |
31 | #include <linux/pci.h> |
32 | #include <linux/dma-buf.h> |
33 | |
34 | #include <drm/amdgpu_drm.h> |
35 | #include <drm/drm_debugfs.h> |
36 | |
37 | #include "amdgpu.h" |
38 | #include "amdgpu_display.h" |
39 | #include "amdgpu_xgmi.h" |
40 | |
41 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) |
42 | { |
43 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
44 | |
45 | if (robj) { |
46 | amdgpu_mn_unregister(robj); |
47 | amdgpu_bo_unref(&robj); |
48 | } |
49 | } |
50 | |
51 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
52 | int alignment, u32 initial_domain, |
53 | u64 flags, enum ttm_bo_type type, |
54 | struct dma_resv *resv, |
55 | struct drm_gem_object **obj) |
56 | { |
57 | struct amdgpu_bo *bo; |
58 | struct amdgpu_bo_param bp; |
59 | int r; |
60 | |
61 | memset(&bp, 0, sizeof(bp))__builtin_memset((&bp), (0), (sizeof(bp))); |
62 | *obj = NULL((void *)0); |
63 | |
64 | bp.size = size; |
65 | bp.byte_align = alignment; |
66 | bp.type = type; |
67 | bp.resv = resv; |
68 | bp.preferred_domain = initial_domain; |
69 | bp.flags = flags; |
70 | bp.domain = initial_domain; |
71 | r = amdgpu_bo_create(adev, &bp, &bo); |
72 | if (r) |
73 | return r; |
74 | |
75 | *obj = &bo->tbo.base; |
76 | |
77 | return 0; |
78 | } |
79 | |
80 | int drm_file_cmp(struct drm_file *, struct drm_file *); |
81 | SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp)void drm_file_tree_SPLAY(struct drm_file_tree *, struct drm_file *); void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *, int ); struct drm_file *drm_file_tree_SPLAY_INSERT(struct drm_file_tree *, struct drm_file *); struct drm_file *drm_file_tree_SPLAY_REMOVE (struct drm_file_tree *, struct drm_file *); static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_FIND (struct drm_file_tree *head, struct drm_file *elm) { if (((head )->sph_root == ((void *)0))) return(((void *)0)); drm_file_tree_SPLAY (head, elm); if ((drm_file_cmp)(elm, (head)->sph_root) == 0 ) return (head->sph_root); return (((void *)0)); } static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_NEXT (struct drm_file_tree *head, struct drm_file *elm) { drm_file_tree_SPLAY (head, elm); if ((elm)->link.spe_right != ((void *)0)) { elm = (elm)->link.spe_right; while ((elm)->link.spe_left != ((void *)0)) { elm = (elm)->link.spe_left; } } else elm = ((void *)0); return (elm); } static __attribute__((__unused__ )) __inline struct drm_file * drm_file_tree_SPLAY_MIN_MAX(struct drm_file_tree *head, int val) { drm_file_tree_SPLAY_MINMAX(head , val); return ((head)->sph_root); }; |
82 | |
83 | void amdgpu_gem_force_release(struct amdgpu_device *adev) |
84 | { |
85 | struct drm_device *ddev = adev_to_drm(adev); |
86 | struct drm_file *file; |
87 | |
88 | mutex_lock(&ddev->filelist_mutex)rw_enter_write(&ddev->filelist_mutex); |
89 | |
90 | SPLAY_FOREACH(file, drm_file_tree, &ddev->files)for ((file) = (((&ddev->files)->sph_root == ((void * )0)) ? ((void *)0) : drm_file_tree_SPLAY_MIN_MAX(&ddev-> files, -1)); (file) != ((void *)0); (file) = drm_file_tree_SPLAY_NEXT (&ddev->files, file)) { |
91 | struct drm_gem_object *gobj; |
92 | int handle; |
93 | |
94 | WARN_ONCE(1, "Still active user space clients!\n")({ static int __warned; int __ret = !!(1); if (__ret && !__warned) { printf("Still active user space clients!\n"); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
95 | spin_lock(&file->table_lock)mtx_enter(&file->table_lock); |
96 | idr_for_each_entry(&file->object_idr, gobj, handle)for (handle = 0; ((gobj) = idr_get_next(&file->object_idr , &(handle))) != ((void *)0); handle++) { |
97 | WARN_ONCE(1, "And also active allocations!\n")({ static int __warned; int __ret = !!(1); if (__ret && !__warned) { printf("And also active allocations!\n"); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
98 | drm_gem_object_put(gobj); |
99 | } |
100 | idr_destroy(&file->object_idr); |
101 | spin_unlock(&file->table_lock)mtx_leave(&file->table_lock); |
102 | } |
103 | |
104 | mutex_unlock(&ddev->filelist_mutex)rw_exit_write(&ddev->filelist_mutex); |
105 | } |
106 | |
107 | /* |
108 | * Call from drm_gem_handle_create which appear in both new and open ioctl |
109 | * case. |
110 | */ |
111 | int amdgpu_gem_object_open(struct drm_gem_object *obj, |
112 | struct drm_file *file_priv) |
113 | { |
114 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((obj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
115 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
116 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
117 | struct amdgpu_vm *vm = &fpriv->vm; |
118 | struct amdgpu_bo_va *bo_va; |
119 | #ifdef notyet |
120 | struct mm_struct *mm; |
121 | #endif |
122 | int r; |
123 | |
124 | #ifdef notyet |
125 | mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); |
126 | if (mm && mm != current->mm) |
127 | return -EPERM1; |
128 | #endif |
129 | |
130 | if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6) && |
131 | abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) |
132 | return -EPERM1; |
133 | |
134 | r = amdgpu_bo_reserve(abo, false0); |
135 | if (r) |
136 | return r; |
137 | |
138 | bo_va = amdgpu_vm_bo_find(vm, abo); |
139 | if (!bo_va) { |
140 | bo_va = amdgpu_vm_bo_add(adev, vm, abo); |
Value stored to 'bo_va' is never read | |
141 | } else { |
142 | ++bo_va->ref_count; |
143 | } |
144 | amdgpu_bo_unreserve(abo); |
145 | return 0; |
146 | } |
147 | |
148 | void amdgpu_gem_object_close(struct drm_gem_object *obj, |
149 | struct drm_file *file_priv) |
150 | { |
151 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((obj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
152 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
153 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
154 | struct amdgpu_vm *vm = &fpriv->vm; |
155 | |
156 | struct amdgpu_bo_list_entry vm_pd; |
157 | struct list_head list, duplicates; |
158 | struct dma_fence *fence = NULL((void *)0); |
159 | struct ttm_validate_buffer tv; |
160 | struct ww_acquire_ctx ticket; |
161 | struct amdgpu_bo_va *bo_va; |
162 | long r; |
163 | |
164 | INIT_LIST_HEAD(&list); |
165 | INIT_LIST_HEAD(&duplicates); |
166 | |
167 | tv.bo = &bo->tbo; |
168 | tv.num_shared = 2; |
169 | list_add(&tv.head, &list); |
170 | |
171 | amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); |
172 | |
173 | r = ttm_eu_reserve_buffers(&ticket, &list, false0, &duplicates); |
174 | if (r) { |
175 | dev_err(adev->dev, "leaking bo va because "printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%ld)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r) |
176 | "we fail to reserve bo (%ld)\n", r)printf("drm:pid%d:%s *ERROR* " "leaking bo va because " "we fail to reserve bo (%ld)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
177 | return; |
178 | } |
179 | bo_va = amdgpu_vm_bo_find(vm, bo); |
180 | if (!bo_va || --bo_va->ref_count) |
181 | goto out_unlock; |
182 | |
183 | amdgpu_vm_bo_rmv(adev, bo_va); |
184 | if (!amdgpu_vm_ready(vm)) |
185 | goto out_unlock; |
186 | |
187 | fence = dma_resv_get_excl(bo->tbo.base.resv); |
188 | if (fence) { |
189 | amdgpu_bo_fence(bo, fence, true1); |
190 | fence = NULL((void *)0); |
191 | } |
192 | |
193 | r = amdgpu_vm_clear_freed(adev, vm, &fence); |
194 | if (r || !fence) |
195 | goto out_unlock; |
196 | |
197 | amdgpu_bo_fence(bo, fence, true1); |
198 | dma_fence_put(fence); |
199 | |
200 | out_unlock: |
201 | if (unlikely(r < 0)__builtin_expect(!!(r < 0), 0)) |
202 | dev_err(adev->dev, "failed to clear page "printf("drm:pid%d:%s *ERROR* " "failed to clear page " "tables on GEM object close (%ld)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r) |
203 | "tables on GEM object close (%ld)\n", r)printf("drm:pid%d:%s *ERROR* " "failed to clear page " "tables on GEM object close (%ld)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
204 | ttm_eu_backoff_reservation(&ticket, &list); |
205 | } |
206 | |
207 | /* |
208 | * GEM ioctls. |
209 | */ |
210 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
211 | struct drm_file *filp) |
212 | { |
213 | struct amdgpu_device *adev = drm_to_adev(dev); |
214 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
215 | struct amdgpu_vm *vm = &fpriv->vm; |
216 | union drm_amdgpu_gem_create *args = data; |
217 | uint64_t flags = args->in.domain_flags; |
218 | uint64_t size = args->in.bo_size; |
219 | struct dma_resv *resv = NULL((void *)0); |
220 | struct drm_gem_object *gobj; |
221 | uint32_t handle, initial_domain; |
222 | int r; |
223 | |
224 | /* reject invalid gem flags */ |
225 | if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0) | |
226 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1) | |
227 | AMDGPU_GEM_CREATE_CPU_GTT_USWC(1 << 2) | |
228 | AMDGPU_GEM_CREATE_VRAM_CLEARED(1 << 3) | |
229 | AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6) | |
230 | AMDGPU_GEM_CREATE_EXPLICIT_SYNC(1 << 7) | |
231 | AMDGPU_GEM_CREATE_ENCRYPTED(1 << 10))) |
232 | |
233 | return -EINVAL22; |
234 | |
235 | /* reject invalid gem domains */ |
236 | if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK(0x1 | 0x2 | 0x4 | 0x8 | 0x10 | 0x20)) |
237 | return -EINVAL22; |
238 | |
239 | if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED(1 << 10))) { |
240 | DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n")({ static int __warned; if (!__warned) { printk("\0015" "[" "drm" "] " "Cannot allocate secure buffer since TMZ is disabled\n" ); __warned = 1; } }); |
241 | return -EINVAL22; |
242 | } |
243 | |
244 | /* create a gem object to contain this object in */ |
245 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS0x8 | |
246 | AMDGPU_GEM_DOMAIN_GWS0x10 | AMDGPU_GEM_DOMAIN_OA0x20)) { |
247 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6)) { |
248 | /* if gds bo is created from user space, it must be |
249 | * passed to bo list |
250 | */ |
251 | DRM_ERROR("GDS bo cannot be per-vm-bo\n")__drm_err("GDS bo cannot be per-vm-bo\n"); |
252 | return -EINVAL22; |
253 | } |
254 | flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1); |
255 | } |
256 | |
257 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6)) { |
258 | r = amdgpu_bo_reserve(vm->root.base.bo, false0); |
259 | if (r) |
260 | return r; |
261 | |
262 | resv = vm->root.base.bo->tbo.base.resv; |
263 | } |
264 | |
265 | retry: |
266 | initial_domain = (u32)(0xffffffff & args->in.domains); |
267 | r = amdgpu_gem_object_create(adev, size, args->in.alignment, |
268 | initial_domain, |
269 | flags, ttm_bo_type_device, resv, &gobj); |
270 | if (r) { |
271 | if (r != -ERESTARTSYS4) { |
272 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0)) { |
273 | flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); |
274 | goto retry; |
275 | } |
276 | |
277 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM0x4) { |
278 | initial_domain |= AMDGPU_GEM_DOMAIN_GTT0x2; |
279 | goto retry; |
280 | } |
281 | DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",__drm_dbg(DRM_UT_CORE, "Failed to allocate GEM object (%llu, %d, %llu, %d)\n" , size, initial_domain, args->in.alignment, r) |
282 | size, initial_domain, args->in.alignment, r)__drm_dbg(DRM_UT_CORE, "Failed to allocate GEM object (%llu, %d, %llu, %d)\n" , size, initial_domain, args->in.alignment, r); |
283 | } |
284 | return r; |
285 | } |
286 | |
287 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6)) { |
288 | if (!r) { |
289 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
290 | |
291 | abo->parent = amdgpu_bo_ref(vm->root.base.bo); |
292 | } |
293 | amdgpu_bo_unreserve(vm->root.base.bo); |
294 | } |
295 | if (r) |
296 | return r; |
297 | |
298 | r = drm_gem_handle_create(filp, gobj, &handle); |
299 | /* drop reference from allocate - handle holds it now */ |
300 | drm_gem_object_put(gobj); |
301 | if (r) |
302 | return r; |
303 | |
304 | memset(args, 0, sizeof(*args))__builtin_memset((args), (0), (sizeof(*args))); |
305 | args->out.handle = handle; |
306 | return 0; |
307 | } |
308 | |
309 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, |
310 | struct drm_file *filp) |
311 | { |
312 | return -ENOSYS78; |
313 | #ifdef notyet |
314 | struct ttm_operation_ctx ctx = { true1, false0 }; |
315 | struct amdgpu_device *adev = drm_to_adev(dev); |
316 | struct drm_amdgpu_gem_userptr *args = data; |
317 | struct drm_gem_object *gobj; |
318 | struct amdgpu_bo *bo; |
319 | uint32_t handle; |
320 | int r; |
321 | |
322 | args->addr = untagged_addr(args->addr); |
323 | |
324 | if (offset_in_page(args->addr | args->size)((vaddr_t)(args->addr | args->size) & ((1 << 12 ) - 1))) |
325 | return -EINVAL22; |
326 | |
327 | /* reject unknown flag values */ |
328 | if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY(1 << 0) | |
329 | AMDGPU_GEM_USERPTR_ANONONLY(1 << 1) | AMDGPU_GEM_USERPTR_VALIDATE(1 << 2) | |
330 | AMDGPU_GEM_USERPTR_REGISTER(1 << 3))) |
331 | return -EINVAL22; |
332 | |
333 | if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)) && |
334 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER(1 << 3))) { |
335 | |
336 | /* if we want to write to it we must install a MMU notifier */ |
337 | return -EACCES13; |
338 | } |
339 | |
340 | /* create a gem object to contain this object in */ |
341 | r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU0x1, |
342 | 0, ttm_bo_type_device, NULL((void *)0), &gobj); |
343 | if (r) |
344 | return r; |
345 | |
346 | bo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
347 | bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT0x2; |
348 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT0x2; |
349 | r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); |
350 | if (r) |
351 | goto release_object; |
352 | |
353 | if (args->flags & AMDGPU_GEM_USERPTR_REGISTER(1 << 3)) { |
354 | r = amdgpu_mn_register(bo, args->addr); |
355 | if (r) |
356 | goto release_object; |
357 | } |
358 | |
359 | if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE(1 << 2)) { |
360 | r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); |
361 | if (r) |
362 | goto release_object; |
363 | |
364 | r = amdgpu_bo_reserve(bo, true1); |
365 | if (r) |
366 | goto user_pages_done; |
367 | |
368 | amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT0x2); |
369 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
370 | amdgpu_bo_unreserve(bo); |
371 | if (r) |
372 | goto user_pages_done; |
373 | } |
374 | |
375 | r = drm_gem_handle_create(filp, gobj, &handle); |
376 | if (r) |
377 | goto user_pages_done; |
378 | |
379 | args->handle = handle; |
380 | |
381 | user_pages_done: |
382 | if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE(1 << 2)) |
383 | amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); |
384 | |
385 | release_object: |
386 | drm_gem_object_put(gobj); |
387 | |
388 | return r; |
389 | #endif |
390 | } |
391 | |
392 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
393 | struct drm_device *dev, |
394 | uint32_t handle, uint64_t *offset_p) |
395 | { |
396 | struct drm_gem_object *gobj; |
397 | struct amdgpu_bo *robj; |
398 | |
399 | gobj = drm_gem_object_lookup(filp, handle); |
400 | if (gobj == NULL((void *)0)) { |
401 | return -ENOENT2; |
402 | } |
403 | robj = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
404 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || |
405 | (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1))) { |
406 | drm_gem_object_put(gobj); |
407 | return -EPERM1; |
408 | } |
409 | *offset_p = amdgpu_bo_mmap_offset(robj); |
410 | drm_gem_object_put(gobj); |
411 | return 0; |
412 | } |
413 | |
414 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, |
415 | struct drm_file *filp) |
416 | { |
417 | union drm_amdgpu_gem_mmap *args = data; |
418 | uint32_t handle = args->in.handle; |
419 | memset(args, 0, sizeof(*args))__builtin_memset((args), (0), (sizeof(*args))); |
420 | return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); |
421 | } |
422 | |
423 | /** |
424 | * amdgpu_gem_timeout - calculate jiffies timeout from absolute value |
425 | * |
426 | * @timeout_ns: timeout in ns |
427 | * |
428 | * Calculate the timeout in jiffies from an absolute timeout in ns. |
429 | */ |
430 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) |
431 | { |
432 | unsigned long timeout_jiffies; |
433 | ktime_t timeout; |
434 | |
435 | /* clamp timeout if it's to large */ |
436 | if (((int64_t)timeout_ns) < 0) |
437 | return MAX_SCHEDULE_TIMEOUT(0x7fffffff); |
438 | |
439 | timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); |
440 | if (ktime_to_ns(timeout) < 0) |
441 | return 0; |
442 | |
443 | timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout))(((uint64_t)(ktime_to_ns(timeout))) * hz / 1000000000); |
444 | /* clamp timeout to avoid unsigned-> signed overflow */ |
445 | if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT(0x7fffffff) ) |
446 | return MAX_SCHEDULE_TIMEOUT(0x7fffffff) - 1; |
447 | |
448 | return timeout_jiffies; |
449 | } |
450 | |
451 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
452 | struct drm_file *filp) |
453 | { |
454 | union drm_amdgpu_gem_wait_idle *args = data; |
455 | struct drm_gem_object *gobj; |
456 | struct amdgpu_bo *robj; |
457 | uint32_t handle = args->in.handle; |
458 | unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); |
459 | int r = 0; |
460 | long ret; |
461 | |
462 | gobj = drm_gem_object_lookup(filp, handle); |
463 | if (gobj == NULL((void *)0)) { |
464 | return -ENOENT2; |
465 | } |
466 | robj = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
467 | ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true1, true1, |
468 | timeout); |
469 | |
470 | /* ret == 0 means not signaled, |
471 | * ret > 0 means signaled |
472 | * ret < 0 means interrupted before timeout |
473 | */ |
474 | if (ret >= 0) { |
475 | memset(args, 0, sizeof(*args))__builtin_memset((args), (0), (sizeof(*args))); |
476 | args->out.status = (ret == 0); |
477 | } else |
478 | r = ret; |
479 | |
480 | drm_gem_object_put(gobj); |
481 | return r; |
482 | } |
483 | |
484 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, |
485 | struct drm_file *filp) |
486 | { |
487 | struct drm_amdgpu_gem_metadata *args = data; |
488 | struct drm_gem_object *gobj; |
489 | struct amdgpu_bo *robj; |
490 | int r = -1; |
491 | |
492 | DRM_DEBUG("%d \n", args->handle)__drm_dbg(DRM_UT_CORE, "%d \n", args->handle); |
493 | gobj = drm_gem_object_lookup(filp, args->handle); |
494 | if (gobj == NULL((void *)0)) |
495 | return -ENOENT2; |
496 | robj = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
497 | |
498 | r = amdgpu_bo_reserve(robj, false0); |
499 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) |
500 | goto out; |
501 | |
502 | if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA2) { |
503 | amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); |
504 | r = amdgpu_bo_get_metadata(robj, args->data.data, |
505 | sizeof(args->data.data), |
506 | &args->data.data_size_bytes, |
507 | &args->data.flags); |
508 | } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA1) { |
509 | if (args->data.data_size_bytes > sizeof(args->data.data)) { |
510 | r = -EINVAL22; |
511 | goto unreserve; |
512 | } |
513 | r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); |
514 | if (!r) |
515 | r = amdgpu_bo_set_metadata(robj, args->data.data, |
516 | args->data.data_size_bytes, |
517 | args->data.flags); |
518 | } |
519 | |
520 | unreserve: |
521 | amdgpu_bo_unreserve(robj); |
522 | out: |
523 | drm_gem_object_put(gobj); |
524 | return r; |
525 | } |
526 | |
527 | /** |
528 | * amdgpu_gem_va_update_vm -update the bo_va in its VM |
529 | * |
530 | * @adev: amdgpu_device pointer |
531 | * @vm: vm to update |
532 | * @bo_va: bo_va to update |
533 | * @operation: map, unmap or clear |
534 | * |
535 | * Update the bo_va directly after setting its address. Errors are not |
536 | * vital here, so they are not reported back to userspace. |
537 | */ |
538 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
539 | struct amdgpu_vm *vm, |
540 | struct amdgpu_bo_va *bo_va, |
541 | uint32_t operation) |
542 | { |
543 | int r; |
544 | |
545 | if (!amdgpu_vm_ready(vm)) |
546 | return; |
547 | |
548 | r = amdgpu_vm_clear_freed(adev, vm, NULL((void *)0)); |
549 | if (r) |
550 | goto error; |
551 | |
552 | if (operation == AMDGPU_VA_OP_MAP1 || |
553 | operation == AMDGPU_VA_OP_REPLACE4) { |
554 | r = amdgpu_vm_bo_update(adev, bo_va, false0); |
555 | if (r) |
556 | goto error; |
557 | } |
558 | |
559 | r = amdgpu_vm_update_pdes(adev, vm, false0); |
560 | |
561 | error: |
562 | if (r && r != -ERESTARTSYS4) |
563 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r)__drm_err("Couldn't update BO_VA (%d)\n", r); |
564 | } |
565 | |
566 | /** |
567 | * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags |
568 | * |
569 | * @adev: amdgpu_device pointer |
570 | * @flags: GEM UAPI flags |
571 | * |
572 | * Returns the GEM UAPI flags mapped into hardware for the ASIC. |
573 | */ |
574 | uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) |
575 | { |
576 | uint64_t pte_flag = 0; |
577 | |
578 | if (flags & AMDGPU_VM_PAGE_EXECUTABLE(1 << 3)) |
579 | pte_flag |= AMDGPU_PTE_EXECUTABLE(1ULL << 4); |
580 | if (flags & AMDGPU_VM_PAGE_READABLE(1 << 1)) |
581 | pte_flag |= AMDGPU_PTE_READABLE(1ULL << 5); |
582 | if (flags & AMDGPU_VM_PAGE_WRITEABLE(1 << 2)) |
583 | pte_flag |= AMDGPU_PTE_WRITEABLE(1ULL << 6); |
584 | if (flags & AMDGPU_VM_PAGE_PRT(1 << 4)) |
585 | pte_flag |= AMDGPU_PTE_PRT(1ULL << 51); |
586 | |
587 | if (adev->gmc.gmc_funcs->map_mtype) |
588 | pte_flag |= amdgpu_gmc_map_mtype(adev,(adev)->gmc.gmc_funcs->map_mtype((adev),(flags & (0xf << 5))) |
589 | flags & AMDGPU_VM_MTYPE_MASK)(adev)->gmc.gmc_funcs->map_mtype((adev),(flags & (0xf << 5))); |
590 | |
591 | return pte_flag; |
592 | } |
593 | |
594 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
595 | struct drm_file *filp) |
596 | { |
597 | const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE(1 << 0) | |
598 | AMDGPU_VM_PAGE_READABLE(1 << 1) | AMDGPU_VM_PAGE_WRITEABLE(1 << 2) | |
599 | AMDGPU_VM_PAGE_EXECUTABLE(1 << 3) | AMDGPU_VM_MTYPE_MASK(0xf << 5); |
600 | const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE(1 << 0) | |
601 | AMDGPU_VM_PAGE_PRT(1 << 4); |
602 | |
603 | struct drm_amdgpu_gem_va *args = data; |
604 | struct drm_gem_object *gobj; |
605 | struct amdgpu_device *adev = drm_to_adev(dev); |
606 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
607 | struct amdgpu_bo *abo; |
608 | struct amdgpu_bo_va *bo_va; |
609 | struct amdgpu_bo_list_entry vm_pd; |
610 | struct ttm_validate_buffer tv; |
611 | struct ww_acquire_ctx ticket; |
612 | struct list_head list, duplicates; |
613 | uint64_t va_flags; |
614 | uint64_t vm_size; |
615 | int r = 0; |
616 | |
617 | if (args->va_address < AMDGPU_VA_RESERVED_SIZE(2ULL << 20)) { |
618 | dev_dbg(&dev->pdev->dev,do { } while(0) |
619 | "va_address 0x%llX is in reserved area 0x%llX\n",do { } while(0) |
620 | args->va_address, AMDGPU_VA_RESERVED_SIZE)do { } while(0); |
621 | return -EINVAL22; |
622 | } |
623 | |
624 | if (args->va_address >= AMDGPU_GMC_HOLE_START0x0000800000000000ULL && |
625 | args->va_address < AMDGPU_GMC_HOLE_END0xffff800000000000ULL) { |
626 | dev_dbg(&dev->pdev->dev,do { } while(0) |
627 | "va_address 0x%llX is in VA hole 0x%llX-0x%llX\n",do { } while(0) |
628 | args->va_address, AMDGPU_GMC_HOLE_START,do { } while(0) |
629 | AMDGPU_GMC_HOLE_END)do { } while(0); |
630 | return -EINVAL22; |
631 | } |
632 | |
633 | args->va_address &= AMDGPU_GMC_HOLE_MASK0x0000ffffffffffffULL; |
634 | |
635 | vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE4096; |
636 | vm_size -= AMDGPU_VA_RESERVED_SIZE(2ULL << 20); |
637 | if (args->va_address + args->map_size > vm_size) { |
638 | dev_dbg(&dev->pdev->dev,do { } while(0) |
639 | "va_address 0x%llx is in top reserved area 0x%llx\n",do { } while(0) |
640 | args->va_address + args->map_size, vm_size)do { } while(0); |
641 | return -EINVAL22; |
642 | } |
643 | |
644 | if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { |
645 | dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",do { } while(0) |
646 | args->flags)do { } while(0); |
647 | return -EINVAL22; |
648 | } |
649 | |
650 | switch (args->operation) { |
651 | case AMDGPU_VA_OP_MAP1: |
652 | case AMDGPU_VA_OP_UNMAP2: |
653 | case AMDGPU_VA_OP_CLEAR3: |
654 | case AMDGPU_VA_OP_REPLACE4: |
655 | break; |
656 | default: |
657 | dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",do { } while(0) |
658 | args->operation)do { } while(0); |
659 | return -EINVAL22; |
660 | } |
661 | |
662 | INIT_LIST_HEAD(&list); |
663 | INIT_LIST_HEAD(&duplicates); |
664 | if ((args->operation != AMDGPU_VA_OP_CLEAR3) && |
665 | !(args->flags & AMDGPU_VM_PAGE_PRT(1 << 4))) { |
666 | gobj = drm_gem_object_lookup(filp, args->handle); |
667 | if (gobj == NULL((void *)0)) |
668 | return -ENOENT2; |
669 | abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
670 | tv.bo = &abo->tbo; |
671 | if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6)) |
672 | tv.num_shared = 1; |
673 | else |
674 | tv.num_shared = 0; |
675 | list_add(&tv.head, &list); |
676 | } else { |
677 | gobj = NULL((void *)0); |
678 | abo = NULL((void *)0); |
679 | } |
680 | |
681 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
682 | |
683 | r = ttm_eu_reserve_buffers(&ticket, &list, true1, &duplicates); |
684 | if (r) |
685 | goto error_unref; |
686 | |
687 | if (abo) { |
688 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); |
689 | if (!bo_va) { |
690 | r = -ENOENT2; |
691 | goto error_backoff; |
692 | } |
693 | } else if (args->operation != AMDGPU_VA_OP_CLEAR3) { |
694 | bo_va = fpriv->prt_va; |
695 | } else { |
696 | bo_va = NULL((void *)0); |
697 | } |
698 | |
699 | switch (args->operation) { |
700 | case AMDGPU_VA_OP_MAP1: |
701 | va_flags = amdgpu_gem_va_map_flags(adev, args->flags); |
702 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, |
703 | args->offset_in_bo, args->map_size, |
704 | va_flags); |
705 | break; |
706 | case AMDGPU_VA_OP_UNMAP2: |
707 | r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); |
708 | break; |
709 | |
710 | case AMDGPU_VA_OP_CLEAR3: |
711 | r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm, |
712 | args->va_address, |
713 | args->map_size); |
714 | break; |
715 | case AMDGPU_VA_OP_REPLACE4: |
716 | va_flags = amdgpu_gem_va_map_flags(adev, args->flags); |
717 | r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, |
718 | args->offset_in_bo, args->map_size, |
719 | va_flags); |
720 | break; |
721 | default: |
722 | break; |
723 | } |
724 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE(1 << 0)) && !amdgpu_vm_debug) |
725 | amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, |
726 | args->operation); |
727 | |
728 | error_backoff: |
729 | ttm_eu_backoff_reservation(&ticket, &list); |
730 | |
731 | error_unref: |
732 | drm_gem_object_put(gobj); |
733 | return r; |
734 | } |
735 | |
736 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
737 | struct drm_file *filp) |
738 | { |
739 | struct amdgpu_device *adev = drm_to_adev(dev); |
740 | struct drm_amdgpu_gem_op *args = data; |
741 | struct drm_gem_object *gobj; |
742 | struct amdgpu_vm_bo_base *base; |
743 | struct amdgpu_bo *robj; |
744 | int r; |
745 | |
746 | gobj = drm_gem_object_lookup(filp, args->handle); |
747 | if (gobj == NULL((void *)0)) { |
748 | return -ENOENT2; |
749 | } |
750 | robj = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
751 | |
752 | r = amdgpu_bo_reserve(robj, false0); |
753 | if (unlikely(r)__builtin_expect(!!(r), 0)) |
754 | goto out; |
755 | |
756 | switch (args->op) { |
757 | case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO0: { |
758 | struct drm_amdgpu_gem_create_in info; |
759 | void __user *out = u64_to_user_ptr(args->value)((void *)(uintptr_t)(args->value)); |
760 | |
761 | info.bo_size = robj->tbo.base.size; |
762 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT12; |
763 | info.domains = robj->preferred_domains; |
764 | info.domain_flags = robj->flags; |
765 | amdgpu_bo_unreserve(robj); |
766 | if (copy_to_user(out, &info, sizeof(info))) |
767 | r = -EFAULT14; |
768 | break; |
769 | } |
770 | case AMDGPU_GEM_OP_SET_PLACEMENT1: |
771 | if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM0x4)) { |
772 | r = -EINVAL22; |
773 | amdgpu_bo_unreserve(robj); |
774 | break; |
775 | } |
776 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { |
777 | r = -EPERM1; |
778 | amdgpu_bo_unreserve(robj); |
779 | break; |
780 | } |
781 | for (base = robj->vm_bo; base; base = base->next) |
782 | if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), |
783 | amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { |
784 | r = -EINVAL22; |
785 | amdgpu_bo_unreserve(robj); |
786 | goto out; |
787 | } |
788 | |
789 | |
790 | robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM0x4 | |
791 | AMDGPU_GEM_DOMAIN_GTT0x2 | |
792 | AMDGPU_GEM_DOMAIN_CPU0x1); |
793 | robj->allowed_domains = robj->preferred_domains; |
794 | if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM0x4) |
795 | robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT0x2; |
796 | |
797 | if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID(1 << 6)) |
798 | amdgpu_vm_bo_invalidate(adev, robj, true1); |
799 | |
800 | amdgpu_bo_unreserve(robj); |
801 | break; |
802 | default: |
803 | amdgpu_bo_unreserve(robj); |
804 | r = -EINVAL22; |
805 | } |
806 | |
807 | out: |
808 | drm_gem_object_put(gobj); |
809 | return r; |
810 | } |
811 | |
812 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
813 | struct drm_device *dev, |
814 | struct drm_mode_create_dumb *args) |
815 | { |
816 | struct amdgpu_device *adev = drm_to_adev(dev); |
817 | struct drm_gem_object *gobj; |
818 | uint32_t handle; |
819 | u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0) | |
820 | AMDGPU_GEM_CREATE_CPU_GTT_USWC(1 << 2); |
821 | u32 domain; |
822 | int r; |
823 | |
824 | /* |
825 | * The buffer returned from this function should be cleared, but |
826 | * it can only be done if the ring is enabled or we'll fail to |
827 | * create the buffer. |
828 | */ |
829 | if (adev->mman.buffer_funcs_enabled) |
830 | flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED(1 << 3); |
831 | |
832 | args->pitch = amdgpu_align_pitch(adev, args->width, |
833 | DIV_ROUND_UP(args->bpp, 8)(((args->bpp) + ((8) - 1)) / (8)), 0); |
834 | args->size = (u64)args->pitch * args->height; |
835 | args->size = roundup2(args->size, PAGE_SIZE)(((args->size) + (((1 << 12)) - 1)) & (~((__typeof (args->size))((1 << 12)) - 1))); |
836 | domain = amdgpu_bo_get_preferred_pin_domain(adev, |
837 | amdgpu_display_supported_domains(adev, flags)); |
838 | r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, |
839 | ttm_bo_type_device, NULL((void *)0), &gobj); |
840 | if (r) |
841 | return -ENOMEM12; |
842 | |
843 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
844 | /* drop reference from allocate - handle holds it now */ |
845 | drm_gem_object_put(gobj); |
846 | if (r) { |
847 | return r; |
848 | } |
849 | args->handle = handle; |
850 | return 0; |
851 | } |
852 | |
853 | #if defined(CONFIG_DEBUG_FS) |
854 | |
855 | #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \ |
856 | if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ |
857 | seq_printf((m), " " #flag); \ |
858 | } |
859 | |
860 | static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) |
861 | { |
862 | struct drm_gem_object *gobj = ptr; |
863 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
864 | struct seq_file *m = data; |
865 | |
866 | struct dma_buf_attachment *attachment; |
867 | struct dma_buf *dma_buf; |
868 | unsigned domain; |
869 | const char *placement; |
870 | unsigned pin_count; |
871 | |
872 | domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
873 | switch (domain) { |
874 | case AMDGPU_GEM_DOMAIN_VRAM0x4: |
875 | placement = "VRAM"; |
876 | break; |
877 | case AMDGPU_GEM_DOMAIN_GTT0x2: |
878 | placement = " GTT"; |
879 | break; |
880 | case AMDGPU_GEM_DOMAIN_CPU0x1: |
881 | default: |
882 | placement = " CPU"; |
883 | break; |
884 | } |
885 | seq_printf(m, "\t0x%08x: %12ld byte %s", |
886 | id, amdgpu_bo_size(bo), placement); |
887 | |
888 | pin_count = READ_ONCE(bo->pin_count)({ typeof(bo->pin_count) __tmp = *(volatile typeof(bo-> pin_count) *)&(bo->pin_count); membar_datadep_consumer (); __tmp; }); |
889 | if (pin_count) |
890 | seq_printf(m, " pin count %d", pin_count); |
891 | |
892 | dma_buf = READ_ONCE(bo->tbo.base.dma_buf)({ typeof(bo->tbo.base.dma_buf) __tmp = *(volatile typeof( bo->tbo.base.dma_buf) *)&(bo->tbo.base.dma_buf); membar_datadep_consumer (); __tmp; }); |
893 | attachment = READ_ONCE(bo->tbo.base.import_attach)({ typeof(bo->tbo.base.import_attach) __tmp = *(volatile typeof (bo->tbo.base.import_attach) *)&(bo->tbo.base.import_attach ); membar_datadep_consumer(); __tmp; }); |
894 | |
895 | if (attachment) |
896 | seq_printf(m, " imported from %p%s", dma_buf, |
897 | attachment->peer2peer ? " P2P" : ""); |
898 | else if (dma_buf) |
899 | seq_printf(m, " exported as %p", dma_buf); |
900 | |
901 | amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); |
902 | amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS); |
903 | amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC); |
904 | amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED); |
905 | amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW); |
906 | amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS); |
907 | amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID); |
908 | amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC); |
909 | |
910 | seq_printf(m, "\n"); |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) |
916 | { |
917 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
918 | struct drm_device *dev = node->minor->dev; |
919 | struct drm_file *file; |
920 | int r; |
921 | |
922 | r = mutex_lock_interruptible(&dev->filelist_mutex); |
923 | if (r) |
924 | return r; |
925 | |
926 | list_for_each_entry(file, &dev->filelist, lhead)for (file = ({ const __typeof( ((__typeof(*file) *)0)->lhead ) *__mptr = ((&dev->filelist)->next); (__typeof(*file ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*file), lhead ) );}); &file->lhead != (&dev->filelist); file = ({ const __typeof( ((__typeof(*file) *)0)->lhead ) *__mptr = (file->lhead.next); (__typeof(*file) *)( (char *)__mptr - __builtin_offsetof(__typeof(*file), lhead) );})) { |
927 | struct task_struct *task; |
928 | |
929 | /* |
930 | * Although we have a valid reference on file->pid, that does |
931 | * not guarantee that the task_struct who called get_pid() is |
932 | * still alive (e.g. get_pid(current) => fork() => exit()). |
933 | * Therefore, we need to protect this ->comm access using RCU. |
934 | */ |
935 | rcu_read_lock(); |
936 | task = pid_task(file->pid, PIDTYPE_PID); |
937 | seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), |
938 | task ? task->comm : "<unknown>"); |
939 | rcu_read_unlock(); |
940 | |
941 | spin_lock(&file->table_lock)mtx_enter(&file->table_lock); |
942 | idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); |
943 | spin_unlock(&file->table_lock)mtx_leave(&file->table_lock); |
944 | } |
945 | |
946 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); |
947 | return 0; |
948 | } |
949 | |
950 | static const struct drm_info_list amdgpu_debugfs_gem_list[] = { |
951 | {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL((void *)0)}, |
952 | }; |
953 | #endif |
954 | |
955 | int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) |
956 | { |
957 | #if defined(CONFIG_DEBUG_FS) |
958 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, |
959 | ARRAY_SIZE(amdgpu_debugfs_gem_list)(sizeof((amdgpu_debugfs_gem_list)) / sizeof((amdgpu_debugfs_gem_list )[0]))); |
960 | #endif |
961 | return 0; |
962 | } |