File: | dev/pci/drm/amd/amdgpu/amdgpu_object.c |
Warning: | line 58, column 24 Value stored to 'adev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | #include <linux/list.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/dma-buf.h> |
35 | |
36 | #include <drm/drm_drv.h> |
37 | #include <drm/amdgpu_drm.h> |
38 | #include <drm/drm_cache.h> |
39 | #include "amdgpu.h" |
40 | #include "amdgpu_trace.h" |
41 | #include "amdgpu_amdkfd.h" |
42 | |
43 | /** |
44 | * DOC: amdgpu_object |
45 | * |
46 | * This defines the interfaces to operate on an &amdgpu_bo buffer object which |
47 | * represents memory used by driver (VRAM, system memory, etc.). The driver |
48 | * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces |
49 | * to create/destroy/set buffer object which are then managed by the kernel TTM |
50 | * memory manager. |
51 | * The interfaces are also used internally by kernel clients, including gfx, |
52 | * uvd, etc. for kernel managed allocations used by the GPU. |
53 | * |
54 | */ |
55 | |
56 | static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) |
57 | { |
58 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
Value stored to 'adev' during its initialization is never read | |
59 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
60 | |
61 | amdgpu_bo_kunmap(bo); |
62 | |
63 | if (bo->tbo.base.import_attach) |
64 | drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); |
65 | drm_gem_object_release(&bo->tbo.base); |
66 | amdgpu_bo_unref(&bo->parent); |
67 | kvfree(bo); |
68 | } |
69 | |
70 | static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) |
71 | { |
72 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); |
73 | struct amdgpu_bo_user *ubo; |
74 | |
75 | ubo = to_amdgpu_bo_user(bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
76 | kfree(ubo->metadata); |
77 | amdgpu_bo_destroy(tbo); |
78 | } |
79 | |
80 | static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) |
81 | { |
82 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
83 | struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; |
84 | struct amdgpu_bo_vm *vmbo; |
85 | |
86 | bo = shadow_bo->parent; |
87 | vmbo = to_amdgpu_bo_vm(bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );}); |
88 | /* in case amdgpu_device_recover_vram got NULL of bo->parent */ |
89 | if (!list_empty(&vmbo->shadow_list)) { |
90 | mutex_lock(&adev->shadow_list_lock)rw_enter_write(&adev->shadow_list_lock); |
91 | list_del_init(&vmbo->shadow_list); |
92 | mutex_unlock(&adev->shadow_list_lock)rw_exit_write(&adev->shadow_list_lock); |
93 | } |
94 | |
95 | amdgpu_bo_destroy(tbo); |
96 | } |
97 | |
98 | /** |
99 | * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo |
100 | * @bo: buffer object to be checked |
101 | * |
102 | * Uses destroy function associated with the object to determine if this is |
103 | * an &amdgpu_bo. |
104 | * |
105 | * Returns: |
106 | * true if the object belongs to &amdgpu_bo, false if not. |
107 | */ |
108 | bool_Bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) |
109 | { |
110 | if (bo->destroy == &amdgpu_bo_destroy || |
111 | bo->destroy == &amdgpu_bo_user_destroy || |
112 | bo->destroy == &amdgpu_bo_vm_destroy) |
113 | return true1; |
114 | |
115 | return false0; |
116 | } |
117 | |
118 | /** |
119 | * amdgpu_bo_placement_from_domain - set buffer's placement |
120 | * @abo: &amdgpu_bo buffer object whose placement is to be set |
121 | * @domain: requested domain |
122 | * |
123 | * Sets buffer's placement according to requested domain and the buffer's |
124 | * flags. |
125 | */ |
126 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
127 | { |
128 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
129 | struct ttm_placement *placement = &abo->placement; |
130 | struct ttm_place *places = abo->placements; |
131 | u64 flags = abo->flags; |
132 | u32 c = 0; |
133 | |
134 | if (domain & AMDGPU_GEM_DOMAIN_VRAM0x4) { |
135 | unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT12; |
136 | |
137 | places[c].fpfn = 0; |
138 | places[c].lpfn = 0; |
139 | places[c].mem_type = TTM_PL_VRAM2; |
140 | places[c].flags = 0; |
141 | |
142 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0)) |
143 | places[c].lpfn = visible_pfn; |
144 | else |
145 | places[c].flags |= TTM_PL_FLAG_TOPDOWN(1 << 1); |
146 | |
147 | if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5)) |
148 | places[c].flags |= TTM_PL_FLAG_CONTIGUOUS(1 << 0); |
149 | c++; |
150 | } |
151 | |
152 | if (domain & AMDGPU_GEM_DOMAIN_GTT0x2) { |
153 | places[c].fpfn = 0; |
154 | places[c].lpfn = 0; |
155 | places[c].mem_type = |
156 | abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE(1 << 11) ? |
157 | AMDGPU_PL_PREEMPT(3 + 3) : TTM_PL_TT1; |
158 | places[c].flags = 0; |
159 | c++; |
160 | } |
161 | |
162 | if (domain & AMDGPU_GEM_DOMAIN_CPU0x1) { |
163 | places[c].fpfn = 0; |
164 | places[c].lpfn = 0; |
165 | places[c].mem_type = TTM_PL_SYSTEM0; |
166 | places[c].flags = 0; |
167 | c++; |
168 | } |
169 | |
170 | if (domain & AMDGPU_GEM_DOMAIN_GDS0x8) { |
171 | places[c].fpfn = 0; |
172 | places[c].lpfn = 0; |
173 | places[c].mem_type = AMDGPU_PL_GDS(3 + 0); |
174 | places[c].flags = 0; |
175 | c++; |
176 | } |
177 | |
178 | if (domain & AMDGPU_GEM_DOMAIN_GWS0x10) { |
179 | places[c].fpfn = 0; |
180 | places[c].lpfn = 0; |
181 | places[c].mem_type = AMDGPU_PL_GWS(3 + 1); |
182 | places[c].flags = 0; |
183 | c++; |
184 | } |
185 | |
186 | if (domain & AMDGPU_GEM_DOMAIN_OA0x20) { |
187 | places[c].fpfn = 0; |
188 | places[c].lpfn = 0; |
189 | places[c].mem_type = AMDGPU_PL_OA(3 + 2); |
190 | places[c].flags = 0; |
191 | c++; |
192 | } |
193 | |
194 | if (!c) { |
195 | places[c].fpfn = 0; |
196 | places[c].lpfn = 0; |
197 | places[c].mem_type = TTM_PL_SYSTEM0; |
198 | places[c].flags = 0; |
199 | c++; |
200 | } |
201 | |
202 | BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS)((!(c > 3)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 202, "!(c > 3)")); |
203 | |
204 | placement->num_placement = c; |
205 | placement->placement = places; |
206 | |
207 | placement->num_busy_placement = c; |
208 | placement->busy_placement = places; |
209 | } |
210 | |
211 | /** |
212 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
213 | * |
214 | * @adev: amdgpu device object |
215 | * @size: size for the new BO |
216 | * @align: alignment for the new BO |
217 | * @domain: where to place it |
218 | * @bo_ptr: used to initialize BOs in structures |
219 | * @gpu_addr: GPU addr of the pinned BO |
220 | * @cpu_addr: optional CPU address mapping |
221 | * |
222 | * Allocates and pins a BO for kernel internal use, and returns it still |
223 | * reserved. |
224 | * |
225 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
226 | * |
227 | * Returns: |
228 | * 0 on success, negative error code otherwise. |
229 | */ |
230 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
231 | unsigned long size, int align, |
232 | u32 domain, struct amdgpu_bo **bo_ptr, |
233 | u64 *gpu_addr, void **cpu_addr) |
234 | { |
235 | struct amdgpu_bo_param bp; |
236 | bool_Bool free = false0; |
237 | int r; |
238 | |
239 | if (!size) { |
240 | amdgpu_bo_unref(bo_ptr); |
241 | return 0; |
242 | } |
243 | |
244 | memset(&bp, 0, sizeof(bp))__builtin_memset((&bp), (0), (sizeof(bp))); |
245 | bp.size = size; |
246 | bp.byte_align = align; |
247 | bp.domain = domain; |
248 | bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0) |
249 | : AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1); |
250 | bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5); |
251 | bp.type = ttm_bo_type_kernel; |
252 | bp.resv = NULL((void *)0); |
253 | bp.bo_ptr_size = sizeof(struct amdgpu_bo); |
254 | |
255 | if (!*bo_ptr) { |
256 | r = amdgpu_bo_create(adev, &bp, bo_ptr); |
257 | if (r) { |
258 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate kernel bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r) |
259 | r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate kernel bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
260 | return r; |
261 | } |
262 | free = true1; |
263 | } |
264 | |
265 | r = amdgpu_bo_reserve(*bo_ptr, false0); |
266 | if (r) { |
267 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve kernel bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
268 | goto error_free; |
269 | } |
270 | |
271 | r = amdgpu_bo_pin(*bo_ptr, domain); |
272 | if (r) { |
273 | dev_err(adev->dev, "(%d) kernel bo pin failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) kernel bo pin failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
274 | goto error_unreserve; |
275 | } |
276 | |
277 | r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo); |
278 | if (r) { |
279 | dev_err(adev->dev, "%p bind failed\n", *bo_ptr)printf("drm:pid%d:%s *ERROR* " "%p bind failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , *bo_ptr); |
280 | goto error_unpin; |
281 | } |
282 | |
283 | if (gpu_addr) |
284 | *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr); |
285 | |
286 | if (cpu_addr) { |
287 | r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); |
288 | if (r) { |
289 | dev_err(adev->dev, "(%d) kernel bo map failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) kernel bo map failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
290 | goto error_unpin; |
291 | } |
292 | } |
293 | |
294 | return 0; |
295 | |
296 | error_unpin: |
297 | amdgpu_bo_unpin(*bo_ptr); |
298 | error_unreserve: |
299 | amdgpu_bo_unreserve(*bo_ptr); |
300 | |
301 | error_free: |
302 | if (free) |
303 | amdgpu_bo_unref(bo_ptr); |
304 | |
305 | return r; |
306 | } |
307 | |
308 | /** |
309 | * amdgpu_bo_create_kernel - create BO for kernel use |
310 | * |
311 | * @adev: amdgpu device object |
312 | * @size: size for the new BO |
313 | * @align: alignment for the new BO |
314 | * @domain: where to place it |
315 | * @bo_ptr: used to initialize BOs in structures |
316 | * @gpu_addr: GPU addr of the pinned BO |
317 | * @cpu_addr: optional CPU address mapping |
318 | * |
319 | * Allocates and pins a BO for kernel internal use. |
320 | * |
321 | * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. |
322 | * |
323 | * Returns: |
324 | * 0 on success, negative error code otherwise. |
325 | */ |
326 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
327 | unsigned long size, int align, |
328 | u32 domain, struct amdgpu_bo **bo_ptr, |
329 | u64 *gpu_addr, void **cpu_addr) |
330 | { |
331 | int r; |
332 | |
333 | r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, |
334 | gpu_addr, cpu_addr); |
335 | |
336 | if (r) |
337 | return r; |
338 | |
339 | if (*bo_ptr) |
340 | amdgpu_bo_unreserve(*bo_ptr); |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | /** |
346 | * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location |
347 | * |
348 | * @adev: amdgpu device object |
349 | * @offset: offset of the BO |
350 | * @size: size of the BO |
351 | * @bo_ptr: used to initialize BOs in structures |
352 | * @cpu_addr: optional CPU address mapping |
353 | * |
354 | * Creates a kernel BO at a specific offset in VRAM. |
355 | * |
356 | * Returns: |
357 | * 0 on success, negative error code otherwise. |
358 | */ |
359 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
360 | uint64_t offset, uint64_t size, |
361 | struct amdgpu_bo **bo_ptr, void **cpu_addr) |
362 | { |
363 | struct ttm_operation_ctx ctx = { false0, false0 }; |
364 | unsigned int i; |
365 | int r; |
366 | |
367 | offset &= LINUX_PAGE_MASK(~((1 << 12) - 1)); |
368 | size = roundup2(size, PAGE_SIZE)(((size) + (((1 << 12)) - 1)) & (~((__typeof(size)) ((1 << 12)) - 1))); |
369 | |
370 | r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE(1 << 12), |
371 | AMDGPU_GEM_DOMAIN_VRAM0x4, bo_ptr, NULL((void *)0), |
372 | cpu_addr); |
373 | if (r) |
374 | return r; |
375 | |
376 | if ((*bo_ptr) == NULL((void *)0)) |
377 | return 0; |
378 | |
379 | /* |
380 | * Remove the original mem node and create a new one at the request |
381 | * position. |
382 | */ |
383 | if (cpu_addr) |
384 | amdgpu_bo_kunmap(*bo_ptr); |
385 | |
386 | ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource); |
387 | |
388 | for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { |
389 | (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT12; |
390 | (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT12; |
391 | } |
392 | r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, |
393 | &(*bo_ptr)->tbo.resource, &ctx); |
394 | if (r) |
395 | goto error; |
396 | |
397 | if (cpu_addr) { |
398 | r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); |
399 | if (r) |
400 | goto error; |
401 | } |
402 | |
403 | amdgpu_bo_unreserve(*bo_ptr); |
404 | return 0; |
405 | |
406 | error: |
407 | amdgpu_bo_unreserve(*bo_ptr); |
408 | amdgpu_bo_unref(bo_ptr); |
409 | return r; |
410 | } |
411 | |
412 | /** |
413 | * amdgpu_bo_free_kernel - free BO for kernel use |
414 | * |
415 | * @bo: amdgpu BO to free |
416 | * @gpu_addr: pointer to where the BO's GPU memory space address was stored |
417 | * @cpu_addr: pointer to where the BO's CPU memory space address was stored |
418 | * |
419 | * unmaps and unpin a BO for kernel internal use. |
420 | */ |
421 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
422 | void **cpu_addr) |
423 | { |
424 | if (*bo == NULL((void *)0)) |
425 | return; |
426 | |
427 | if (likely(amdgpu_bo_reserve(*bo, true) == 0)__builtin_expect(!!(amdgpu_bo_reserve(*bo, 1) == 0), 1)) { |
428 | if (cpu_addr) |
429 | amdgpu_bo_kunmap(*bo); |
430 | |
431 | amdgpu_bo_unpin(*bo); |
432 | amdgpu_bo_unreserve(*bo); |
433 | } |
434 | amdgpu_bo_unref(bo); |
435 | |
436 | if (gpu_addr) |
437 | *gpu_addr = 0; |
438 | |
439 | if (cpu_addr) |
440 | *cpu_addr = NULL((void *)0); |
441 | } |
442 | |
443 | /* Validate bo size is bit bigger then the request domain */ |
444 | static bool_Bool amdgpu_bo_validate_size(struct amdgpu_device *adev, |
445 | unsigned long size, u32 domain) |
446 | { |
447 | struct ttm_resource_manager *man = NULL((void *)0); |
448 | |
449 | /* |
450 | * If GTT is part of requested domains the check must succeed to |
451 | * allow fall back to GTT. |
452 | */ |
453 | if (domain & AMDGPU_GEM_DOMAIN_GTT0x2) { |
454 | man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT1); |
455 | |
456 | if (man && size < man->size) |
457 | return true1; |
458 | else if (!man) |
459 | WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized")({ static int __warned; int __ret = !!("GTT domain requested but GTT mem manager uninitialized" ); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "\"GTT domain requested but GTT mem manager uninitialized\"" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 459) ; __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
460 | goto fail; |
461 | } else if (domain & AMDGPU_GEM_DOMAIN_VRAM0x4) { |
462 | man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM2); |
463 | |
464 | if (man && size < man->size) |
465 | return true1; |
466 | goto fail; |
467 | } |
468 | |
469 | /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ |
470 | return true1; |
471 | |
472 | fail: |
473 | if (man) |
474 | DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,___drm_dbg(((void *)0), DRM_UT_CORE, "BO size %lu > total memory in domain: %llu\n" , size, man->size) |
475 | man->size)___drm_dbg(((void *)0), DRM_UT_CORE, "BO size %lu > total memory in domain: %llu\n" , size, man->size); |
476 | return false0; |
477 | } |
478 | |
479 | bool_Bool amdgpu_bo_support_uswc(u64 bo_flags) |
480 | { |
481 | |
482 | #ifdef CONFIG_X86_32 |
483 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
484 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
485 | */ |
486 | return false0; |
487 | #elif defined(CONFIG_X861) && !defined(CONFIG_X86_PAT1) |
488 | /* Don't try to enable write-combining when it can't work, or things |
489 | * may be slow |
490 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
491 | */ |
492 | |
493 | #ifndef CONFIG_COMPILE_TEST |
494 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT1 for better performance \ |
495 | thanks to write-combining |
496 | #endif |
497 | |
498 | if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC(1 << 2)) |
499 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "({ static int __warned; if (!__warned) { printk("\0016" "[" "drm" "] " "Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n" ); __warned = 1; } }) |
500 | "better performance thanks to write-combining\n")({ static int __warned; if (!__warned) { printk("\0016" "[" "drm" "] " "Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n" ); __warned = 1; } }); |
501 | return false0; |
502 | #else |
503 | /* For architectures that don't support WC memory, |
504 | * mask out the WC flag from the BO |
505 | */ |
506 | if (!drm_arch_can_wc_memory()) |
507 | return false0; |
508 | |
509 | return true1; |
510 | #endif |
511 | } |
512 | |
513 | /** |
514 | * amdgpu_bo_create - create an &amdgpu_bo buffer object |
515 | * @adev: amdgpu device object |
516 | * @bp: parameters to be used for the buffer object |
517 | * @bo_ptr: pointer to the buffer object pointer |
518 | * |
519 | * Creates an &amdgpu_bo buffer object. |
520 | * |
521 | * Returns: |
522 | * 0 for success or a negative error code on failure. |
523 | */ |
524 | int amdgpu_bo_create(struct amdgpu_device *adev, |
525 | struct amdgpu_bo_param *bp, |
526 | struct amdgpu_bo **bo_ptr) |
527 | { |
528 | struct ttm_operation_ctx ctx = { |
529 | .interruptible = (bp->type != ttm_bo_type_kernel), |
530 | .no_wait_gpu = bp->no_wait_gpu, |
531 | /* We opt to avoid OOM on system pages allocations */ |
532 | .gfp_retry_mayfail = true1, |
533 | .allow_res_evict = bp->type != ttm_bo_type_kernel, |
534 | .resv = bp->resv |
535 | }; |
536 | struct amdgpu_bo *bo; |
537 | unsigned long page_align, size = bp->size; |
538 | int r; |
539 | |
540 | /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ |
541 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS0x10 | AMDGPU_GEM_DOMAIN_OA0x20)) { |
542 | /* GWS and OA don't need any alignment. */ |
543 | page_align = bp->byte_align; |
544 | size <<= PAGE_SHIFT12; |
545 | } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS0x8) { |
546 | /* Both size and alignment must be a multiple of 4. */ |
547 | page_align = roundup2(bp->byte_align, 4)(((bp->byte_align) + ((4) - 1)) & (~((__typeof(bp-> byte_align))(4) - 1))); |
548 | size = roundup2(size, 4)(((size) + ((4) - 1)) & (~((__typeof(size))(4) - 1))) << PAGE_SHIFT12; |
549 | } else { |
550 | /* Memory should be aligned at least to a page size. */ |
551 | page_align = roundup2(bp->byte_align, PAGE_SIZE)(((bp->byte_align) + (((1 << 12)) - 1)) & (~((__typeof (bp->byte_align))((1 << 12)) - 1))) >> PAGE_SHIFT12; |
552 | size = roundup2(size, PAGE_SIZE)(((size) + (((1 << 12)) - 1)) & (~((__typeof(size)) ((1 << 12)) - 1))); |
553 | } |
554 | |
555 | if (!amdgpu_bo_validate_size(adev, size, bp->domain)) |
556 | return -ENOMEM12; |
557 | |
558 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo))((!(bp->bo_ptr_size < sizeof(struct amdgpu_bo))) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 558, "!(bp->bo_ptr_size < sizeof(struct amdgpu_bo))") ); |
559 | |
560 | *bo_ptr = NULL((void *)0); |
561 | bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL(0x0001 | 0x0004)); |
562 | if (bo == NULL((void *)0)) |
563 | return -ENOMEM12; |
564 | drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); |
565 | bo->adev = adev; |
566 | bo->vm_bo = NULL((void *)0); |
567 | bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : |
568 | bp->domain; |
569 | bo->allowed_domains = bo->preferred_domains; |
570 | if (bp->type != ttm_bo_type_kernel && |
571 | !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE(1 << 12)) && |
572 | bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM0x4) |
573 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT0x2; |
574 | |
575 | bo->flags = bp->flags; |
576 | |
577 | if (!amdgpu_bo_support_uswc(bo->flags)) |
578 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC(1 << 2); |
579 | |
580 | if (adev->ras_enabled) |
581 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE(1 << 9); |
582 | |
583 | bo->tbo.bdev = &adev->mman.bdev; |
584 | if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS0x10 | AMDGPU_GEM_DOMAIN_OA0x20 | |
585 | AMDGPU_GEM_DOMAIN_GDS0x8)) |
586 | amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU0x1); |
587 | else |
588 | amdgpu_bo_placement_from_domain(bo, bp->domain); |
589 | if (bp->type == ttm_bo_type_kernel) |
590 | bo->tbo.priority = 1; |
591 | |
592 | if (!bp->destroy) |
593 | bp->destroy = &amdgpu_bo_destroy; |
594 | |
595 | r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type, |
596 | &bo->placement, page_align, &ctx, NULL((void *)0), |
597 | bp->resv, bp->destroy); |
598 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) |
599 | return r; |
600 | |
601 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
602 | bo->tbo.resource->mem_type == TTM_PL_VRAM2 && |
603 | bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT12) |
604 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, |
605 | ctx.bytes_moved); |
606 | else |
607 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); |
608 | |
609 | if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED(1 << 3) && |
610 | bo->tbo.resource->mem_type == TTM_PL_VRAM2) { |
611 | struct dma_fence *fence; |
612 | |
613 | r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); |
614 | if (unlikely(r)__builtin_expect(!!(r), 0)) |
615 | goto fail_unreserve; |
616 | |
617 | dma_resv_add_fence(bo->tbo.base.resv, fence, |
618 | DMA_RESV_USAGE_KERNEL); |
619 | dma_fence_put(fence); |
620 | } |
621 | if (!bp->resv) |
622 | amdgpu_bo_unreserve(bo); |
623 | *bo_ptr = bo; |
624 | |
625 | trace_amdgpu_bo_create(bo); |
626 | |
627 | /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ |
628 | if (bp->type == ttm_bo_type_device) |
629 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); |
630 | |
631 | return 0; |
632 | |
633 | fail_unreserve: |
634 | if (!bp->resv) |
635 | dma_resv_unlock(bo->tbo.base.resv); |
636 | amdgpu_bo_unref(&bo); |
637 | return r; |
638 | } |
639 | |
640 | /** |
641 | * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object |
642 | * @adev: amdgpu device object |
643 | * @bp: parameters to be used for the buffer object |
644 | * @ubo_ptr: pointer to the buffer object pointer |
645 | * |
646 | * Create a BO to be used by user application; |
647 | * |
648 | * Returns: |
649 | * 0 for success or a negative error code on failure. |
650 | */ |
651 | |
652 | int amdgpu_bo_create_user(struct amdgpu_device *adev, |
653 | struct amdgpu_bo_param *bp, |
654 | struct amdgpu_bo_user **ubo_ptr) |
655 | { |
656 | struct amdgpu_bo *bo_ptr; |
657 | int r; |
658 | |
659 | bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); |
660 | bp->destroy = &amdgpu_bo_user_destroy; |
661 | r = amdgpu_bo_create(adev, bp, &bo_ptr); |
662 | if (r) |
663 | return r; |
664 | |
665 | *ubo_ptr = to_amdgpu_bo_user(bo_ptr)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo_ptr)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
666 | return r; |
667 | } |
668 | |
669 | /** |
670 | * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object |
671 | * @adev: amdgpu device object |
672 | * @bp: parameters to be used for the buffer object |
673 | * @vmbo_ptr: pointer to the buffer object pointer |
674 | * |
675 | * Create a BO to be for GPUVM. |
676 | * |
677 | * Returns: |
678 | * 0 for success or a negative error code on failure. |
679 | */ |
680 | |
681 | int amdgpu_bo_create_vm(struct amdgpu_device *adev, |
682 | struct amdgpu_bo_param *bp, |
683 | struct amdgpu_bo_vm **vmbo_ptr) |
684 | { |
685 | struct amdgpu_bo *bo_ptr; |
686 | int r; |
687 | |
688 | /* bo_ptr_size will be determined by the caller and it depends on |
689 | * num of amdgpu_vm_pt entries. |
690 | */ |
691 | BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm))((!(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm))) ? ( void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 691, "!(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm))" )); |
692 | r = amdgpu_bo_create(adev, bp, &bo_ptr); |
693 | if (r) |
694 | return r; |
695 | |
696 | *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((bo_ptr)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );}); |
697 | return r; |
698 | } |
699 | |
700 | /** |
701 | * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list |
702 | * |
703 | * @vmbo: BO that will be inserted into the shadow list |
704 | * |
705 | * Insert a BO to the shadow list. |
706 | */ |
707 | void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) |
708 | { |
709 | struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); |
710 | |
711 | mutex_lock(&adev->shadow_list_lock)rw_enter_write(&adev->shadow_list_lock); |
712 | list_add_tail(&vmbo->shadow_list, &adev->shadow_list); |
713 | vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); |
714 | vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; |
715 | mutex_unlock(&adev->shadow_list_lock)rw_exit_write(&adev->shadow_list_lock); |
716 | } |
717 | |
718 | /** |
719 | * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow |
720 | * |
721 | * @shadow: &amdgpu_bo shadow to be restored |
722 | * @fence: dma_fence associated with the operation |
723 | * |
724 | * Copies a buffer object's shadow content back to the object. |
725 | * This is used for recovering a buffer from its shadow in case of a gpu |
726 | * reset where vram context may be lost. |
727 | * |
728 | * Returns: |
729 | * 0 for success or a negative error code on failure. |
730 | */ |
731 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) |
732 | |
733 | { |
734 | struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); |
735 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
736 | uint64_t shadow_addr, parent_addr; |
737 | |
738 | shadow_addr = amdgpu_bo_gpu_offset(shadow); |
739 | parent_addr = amdgpu_bo_gpu_offset(shadow->parent); |
740 | |
741 | return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, |
742 | amdgpu_bo_size(shadow), NULL((void *)0), fence, |
743 | true1, false0, false0); |
744 | } |
745 | |
746 | /** |
747 | * amdgpu_bo_kmap - map an &amdgpu_bo buffer object |
748 | * @bo: &amdgpu_bo buffer object to be mapped |
749 | * @ptr: kernel virtual address to be returned |
750 | * |
751 | * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls |
752 | * amdgpu_bo_kptr() to get the kernel virtual address. |
753 | * |
754 | * Returns: |
755 | * 0 for success or a negative error code on failure. |
756 | */ |
757 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
758 | { |
759 | void *kptr; |
760 | long r; |
761 | |
762 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1)) |
763 | return -EPERM1; |
764 | |
765 | r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, |
766 | false0, MAX_SCHEDULE_TIMEOUT(0x7fffffff)); |
767 | if (r < 0) |
768 | return r; |
769 | |
770 | kptr = amdgpu_bo_kptr(bo); |
771 | if (kptr) { |
772 | if (ptr) |
773 | *ptr = kptr; |
774 | return 0; |
775 | } |
776 | |
777 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); |
778 | if (r) |
779 | return r; |
780 | |
781 | if (ptr) |
782 | *ptr = amdgpu_bo_kptr(bo); |
783 | |
784 | return 0; |
785 | } |
786 | |
787 | /** |
788 | * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object |
789 | * @bo: &amdgpu_bo buffer object |
790 | * |
791 | * Calls ttm_kmap_obj_virtual() to get the kernel virtual address |
792 | * |
793 | * Returns: |
794 | * the virtual address of a buffer object area. |
795 | */ |
796 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo) |
797 | { |
798 | bool_Bool is_iomem; |
799 | |
800 | return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
801 | } |
802 | |
803 | /** |
804 | * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object |
805 | * @bo: &amdgpu_bo buffer object to be unmapped |
806 | * |
807 | * Unmaps a kernel map set up by amdgpu_bo_kmap(). |
808 | */ |
809 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
810 | { |
811 | if (bo->kmap.bo) |
812 | ttm_bo_kunmap(&bo->kmap); |
813 | } |
814 | |
815 | /** |
816 | * amdgpu_bo_ref - reference an &amdgpu_bo buffer object |
817 | * @bo: &amdgpu_bo buffer object |
818 | * |
819 | * References the contained &ttm_buffer_object. |
820 | * |
821 | * Returns: |
822 | * a refcounted pointer to the &amdgpu_bo buffer object. |
823 | */ |
824 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
825 | { |
826 | if (bo == NULL((void *)0)) |
827 | return NULL((void *)0); |
828 | |
829 | ttm_bo_get(&bo->tbo); |
830 | return bo; |
831 | } |
832 | |
833 | /** |
834 | * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object |
835 | * @bo: &amdgpu_bo buffer object |
836 | * |
837 | * Unreferences the contained &ttm_buffer_object and clear the pointer |
838 | */ |
839 | void amdgpu_bo_unref(struct amdgpu_bo **bo) |
840 | { |
841 | struct ttm_buffer_object *tbo; |
842 | |
843 | if ((*bo) == NULL((void *)0)) |
844 | return; |
845 | |
846 | tbo = &((*bo)->tbo); |
847 | ttm_bo_put(tbo); |
848 | *bo = NULL((void *)0); |
849 | } |
850 | |
851 | /** |
852 | * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object |
853 | * @bo: &amdgpu_bo buffer object to be pinned |
854 | * @domain: domain to be pinned to |
855 | * @min_offset: the start of requested address range |
856 | * @max_offset: the end of requested address range |
857 | * |
858 | * Pins the buffer object according to requested domain and address range. If |
859 | * the memory is unbound gart memory, binds the pages into gart table. Adjusts |
860 | * pin_count and pin_size accordingly. |
861 | * |
862 | * Pinning means to lock pages in memory along with keeping them at a fixed |
863 | * offset. It is required when a buffer can not be moved, for example, when |
864 | * a display buffer is being scanned out. |
865 | * |
866 | * Compared with amdgpu_bo_pin(), this function gives more flexibility on |
867 | * where to pin a buffer if there are specific restrictions on where a buffer |
868 | * must be located. |
869 | * |
870 | * Returns: |
871 | * 0 for success or a negative error code on failure. |
872 | */ |
873 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
874 | u64 min_offset, u64 max_offset) |
875 | { |
876 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
877 | struct ttm_operation_ctx ctx = { false0, false0 }; |
878 | int r, i; |
879 | |
880 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
881 | return -EPERM1; |
882 | |
883 | if (WARN_ON_ONCE(min_offset > max_offset)({ static int __warned; int __ret = !!(min_offset > max_offset ); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "min_offset > max_offset", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 883); __warned = 1; } __builtin_expect(!!(__ret), 0); })) |
884 | return -EINVAL22; |
885 | |
886 | /* Check domain to be pinned to against preferred domains */ |
887 | if (bo->preferred_domains & domain) |
888 | domain = bo->preferred_domains & domain; |
889 | |
890 | /* A shared bo cannot be migrated to VRAM */ |
891 | if (bo->tbo.base.import_attach) { |
892 | if (domain & AMDGPU_GEM_DOMAIN_GTT0x2) |
893 | domain = AMDGPU_GEM_DOMAIN_GTT0x2; |
894 | else |
895 | return -EINVAL22; |
896 | } |
897 | |
898 | if (bo->tbo.pin_count) { |
899 | uint32_t mem_type = bo->tbo.resource->mem_type; |
900 | uint32_t mem_flags = bo->tbo.resource->placement; |
901 | |
902 | if (!(domain & amdgpu_mem_type_to_domain(mem_type))) |
903 | return -EINVAL22; |
904 | |
905 | if ((mem_type == TTM_PL_VRAM2) && |
906 | (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5)) && |
907 | !(mem_flags & TTM_PL_FLAG_CONTIGUOUS(1 << 0))) |
908 | return -EINVAL22; |
909 | |
910 | ttm_bo_pin(&bo->tbo); |
911 | |
912 | if (max_offset != 0) { |
913 | u64 domain_start = amdgpu_ttm_domain_start(adev, |
914 | mem_type); |
915 | WARN_ON_ONCE(max_offset <({ static int __warned; int __ret = !!(max_offset < (amdgpu_bo_gpu_offset (bo) - domain_start)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 916) ; __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
916 | (amdgpu_bo_gpu_offset(bo) - domain_start))({ static int __warned; int __ret = !!(max_offset < (amdgpu_bo_gpu_offset (bo) - domain_start)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 916) ; __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
917 | } |
918 | |
919 | return 0; |
920 | } |
921 | |
922 | /* This assumes only APU display buffers are pinned with (VRAM|GTT). |
923 | * See function amdgpu_display_supported_domains() |
924 | */ |
925 | domain = amdgpu_bo_get_preferred_domain(adev, domain); |
926 | |
927 | #ifdef notyet |
928 | if (bo->tbo.base.import_attach) |
929 | dma_buf_pin(bo->tbo.base.import_attach); |
930 | #endif |
931 | |
932 | /* force to pin into visible video ram */ |
933 | if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS(1 << 1))) |
934 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); |
935 | amdgpu_bo_placement_from_domain(bo, domain); |
936 | for (i = 0; i < bo->placement.num_placement; i++) { |
937 | unsigned fpfn, lpfn; |
938 | |
939 | fpfn = min_offset >> PAGE_SHIFT12; |
940 | lpfn = max_offset >> PAGE_SHIFT12; |
941 | |
942 | if (fpfn > bo->placements[i].fpfn) |
943 | bo->placements[i].fpfn = fpfn; |
944 | if (!bo->placements[i].lpfn || |
945 | (lpfn && lpfn < bo->placements[i].lpfn)) |
946 | bo->placements[i].lpfn = lpfn; |
947 | } |
948 | |
949 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
950 | if (unlikely(r)__builtin_expect(!!(r), 0)) { |
951 | dev_err(adev->dev, "%p pin failed\n", bo)printf("drm:pid%d:%s *ERROR* " "%p pin failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , bo); |
952 | goto error; |
953 | } |
954 | |
955 | ttm_bo_pin(&bo->tbo); |
956 | |
957 | domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); |
958 | if (domain == AMDGPU_GEM_DOMAIN_VRAM0x4) { |
959 | atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size)__sync_fetch_and_add_8(&adev->vram_pin_size, amdgpu_bo_size (bo)); |
960 | atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),__sync_fetch_and_add_8(&adev->visible_pin_size, amdgpu_vram_mgr_bo_visible_size (bo)) |
961 | &adev->visible_pin_size)__sync_fetch_and_add_8(&adev->visible_pin_size, amdgpu_vram_mgr_bo_visible_size (bo)); |
962 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT0x2) { |
963 | atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size)__sync_fetch_and_add_8(&adev->gart_pin_size, amdgpu_bo_size (bo)); |
964 | } |
965 | |
966 | error: |
967 | return r; |
968 | } |
969 | |
970 | /** |
971 | * amdgpu_bo_pin - pin an &amdgpu_bo buffer object |
972 | * @bo: &amdgpu_bo buffer object to be pinned |
973 | * @domain: domain to be pinned to |
974 | * |
975 | * A simple wrapper to amdgpu_bo_pin_restricted(). |
976 | * Provides a simpler API for buffers that do not have any strict restrictions |
977 | * on where a buffer must be located. |
978 | * |
979 | * Returns: |
980 | * 0 for success or a negative error code on failure. |
981 | */ |
982 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) |
983 | { |
984 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5); |
985 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0); |
986 | } |
987 | |
988 | /** |
989 | * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object |
990 | * @bo: &amdgpu_bo buffer object to be unpinned |
991 | * |
992 | * Decreases the pin_count, and clears the flags if pin_count reaches 0. |
993 | * Changes placement and pin size accordingly. |
994 | * |
995 | * Returns: |
996 | * 0 for success or a negative error code on failure. |
997 | */ |
998 | void amdgpu_bo_unpin(struct amdgpu_bo *bo) |
999 | { |
1000 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1001 | |
1002 | ttm_bo_unpin(&bo->tbo); |
1003 | if (bo->tbo.pin_count) |
1004 | return; |
1005 | |
1006 | #ifdef notyet |
1007 | if (bo->tbo.base.import_attach) |
1008 | dma_buf_unpin(bo->tbo.base.import_attach); |
1009 | #endif |
1010 | |
1011 | if (bo->tbo.resource->mem_type == TTM_PL_VRAM2) { |
1012 | atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size)__sync_fetch_and_sub_8(&adev->vram_pin_size, amdgpu_bo_size (bo)); |
1013 | atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),__sync_fetch_and_sub_8(&adev->visible_pin_size, amdgpu_vram_mgr_bo_visible_size (bo)) |
1014 | &adev->visible_pin_size)__sync_fetch_and_sub_8(&adev->visible_pin_size, amdgpu_vram_mgr_bo_visible_size (bo)); |
1015 | } else if (bo->tbo.resource->mem_type == TTM_PL_TT1) { |
1016 | atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size)__sync_fetch_and_sub_8(&adev->gart_pin_size, amdgpu_bo_size (bo)); |
1017 | } |
1018 | } |
1019 | |
1020 | static const char *amdgpu_vram_names[] = { |
1021 | "UNKNOWN", |
1022 | "GDDR1", |
1023 | "DDR2", |
1024 | "GDDR3", |
1025 | "GDDR4", |
1026 | "GDDR5", |
1027 | "HBM", |
1028 | "DDR3", |
1029 | "DDR4", |
1030 | "GDDR6", |
1031 | "DDR5", |
1032 | "LPDDR4", |
1033 | "LPDDR5" |
1034 | }; |
1035 | |
1036 | /** |
1037 | * amdgpu_bo_init - initialize memory manager |
1038 | * @adev: amdgpu device object |
1039 | * |
1040 | * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. |
1041 | * |
1042 | * Returns: |
1043 | * 0 for success or a negative error code on failure. |
1044 | */ |
1045 | int amdgpu_bo_init(struct amdgpu_device *adev) |
1046 | { |
1047 | /* On A+A platform, VRAM can be mapped as WB */ |
1048 | if (!adev->gmc.xgmi.connected_to_cpu) { |
1049 | #ifdef __linux__ |
1050 | /* reserve PAT memory space to WC for VRAM */ |
1051 | int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, |
1052 | adev->gmc.aper_size); |
1053 | |
1054 | if (r) { |
1055 | DRM_ERROR("Unable to set WC memtype for the aperture base\n")__drm_err("Unable to set WC memtype for the aperture base\n"); |
1056 | return r; |
1057 | } |
1058 | |
1059 | /* Add an MTRR for the VRAM */ |
1060 | adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, |
1061 | adev->gmc.aper_size); |
1062 | #else |
1063 | paddr_t start, end; |
1064 | |
1065 | drm_mtrr_add(adev->gmc.aper_base, adev->gmc.aper_size, DRM_MTRR_WC(1<<1)); |
1066 | |
1067 | start = atop(bus_space_mmap(adev->memt, adev->gmc.aper_base, 0, 0, 0))((((adev->memt)->mmap((adev->gmc.aper_base), (0), (0 ), (0)))) >> 12); |
1068 | end = start + atop(adev->gmc.aper_size)((adev->gmc.aper_size) >> 12); |
1069 | uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE0x01); |
1070 | #endif |
1071 | } |
1072 | |
1073 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , adev->gmc.mc_vram_size >> 20, (unsigned long long) adev->gmc.aper_size >> 20) |
1074 | adev->gmc.mc_vram_size >> 20,printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , adev->gmc.mc_vram_size >> 20, (unsigned long long) adev->gmc.aper_size >> 20) |
1075 | (unsigned long long)adev->gmc.aper_size >> 20)printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , adev->gmc.mc_vram_size >> 20, (unsigned long long) adev->gmc.aper_size >> 20); |
1076 | DRM_INFO("RAM width %dbits %s\n",printk("\0016" "[" "drm" "] " "RAM width %dbits %s\n", adev-> gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]) |
1077 | adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type])printk("\0016" "[" "drm" "] " "RAM width %dbits %s\n", adev-> gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); |
1078 | return amdgpu_ttm_init(adev); |
1079 | } |
1080 | |
1081 | /** |
1082 | * amdgpu_bo_fini - tear down memory manager |
1083 | * @adev: amdgpu device object |
1084 | * |
1085 | * Reverses amdgpu_bo_init() to tear down memory manager. |
1086 | */ |
1087 | void amdgpu_bo_fini(struct amdgpu_device *adev) |
1088 | { |
1089 | int idx; |
1090 | |
1091 | amdgpu_ttm_fini(adev); |
1092 | |
1093 | if (drm_dev_enter(adev_to_drm(adev), &idx)) { |
1094 | |
1095 | if (!adev->gmc.xgmi.connected_to_cpu) { |
1096 | #ifdef __linux__ |
1097 | arch_phys_wc_del(adev->gmc.vram_mtrr); |
1098 | arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); |
1099 | #else |
1100 | drm_mtrr_del(0, adev->gmc.aper_base, adev->gmc.aper_size, DRM_MTRR_WC(1<<1)); |
1101 | |
1102 | #endif |
1103 | } |
1104 | drm_dev_exit(idx); |
1105 | } |
1106 | } |
1107 | |
1108 | /** |
1109 | * amdgpu_bo_set_tiling_flags - set tiling flags |
1110 | * @bo: &amdgpu_bo buffer object |
1111 | * @tiling_flags: new flags |
1112 | * |
1113 | * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or |
1114 | * kernel driver to set the tiling flags on a buffer. |
1115 | * |
1116 | * Returns: |
1117 | * 0 for success or a negative error code on failure. |
1118 | */ |
1119 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) |
1120 | { |
1121 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1122 | struct amdgpu_bo_user *ubo; |
1123 | |
1124 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel)((!(bo->tbo.type == ttm_bo_type_kernel)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1124, "!(bo->tbo.type == ttm_bo_type_kernel)")); |
1125 | if (adev->family <= AMDGPU_FAMILY_CZ135 && |
1126 | AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT)(((__u64)(tiling_flags) >> 9) & 0x7) > 6) |
1127 | return -EINVAL22; |
1128 | |
1129 | ubo = to_amdgpu_bo_user(bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
1130 | ubo->tiling_flags = tiling_flags; |
1131 | return 0; |
1132 | } |
1133 | |
1134 | /** |
1135 | * amdgpu_bo_get_tiling_flags - get tiling flags |
1136 | * @bo: &amdgpu_bo buffer object |
1137 | * @tiling_flags: returned flags |
1138 | * |
1139 | * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to |
1140 | * set the tiling flags on a buffer. |
1141 | */ |
1142 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
1143 | { |
1144 | struct amdgpu_bo_user *ubo; |
1145 | |
1146 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel)((!(bo->tbo.type == ttm_bo_type_kernel)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1146, "!(bo->tbo.type == ttm_bo_type_kernel)")); |
1147 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
1148 | ubo = to_amdgpu_bo_user(bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
1149 | |
1150 | if (tiling_flags) |
1151 | *tiling_flags = ubo->tiling_flags; |
1152 | } |
1153 | |
1154 | /** |
1155 | * amdgpu_bo_set_metadata - set metadata |
1156 | * @bo: &amdgpu_bo buffer object |
1157 | * @metadata: new metadata |
1158 | * @metadata_size: size of the new metadata |
1159 | * @flags: flags of the new metadata |
1160 | * |
1161 | * Sets buffer object's metadata, its size and flags. |
1162 | * Used via GEM ioctl. |
1163 | * |
1164 | * Returns: |
1165 | * 0 for success or a negative error code on failure. |
1166 | */ |
1167 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
1168 | uint32_t metadata_size, uint64_t flags) |
1169 | { |
1170 | struct amdgpu_bo_user *ubo; |
1171 | void *buffer; |
1172 | |
1173 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel)((!(bo->tbo.type == ttm_bo_type_kernel)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1173, "!(bo->tbo.type == ttm_bo_type_kernel)")); |
1174 | ubo = to_amdgpu_bo_user(bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
1175 | if (!metadata_size) { |
1176 | if (ubo->metadata_size) { |
1177 | kfree(ubo->metadata); |
1178 | ubo->metadata = NULL((void *)0); |
1179 | ubo->metadata_size = 0; |
1180 | } |
1181 | return 0; |
1182 | } |
1183 | |
1184 | if (metadata == NULL((void *)0)) |
1185 | return -EINVAL22; |
1186 | |
1187 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL(0x0001 | 0x0004)); |
1188 | if (buffer == NULL((void *)0)) |
1189 | return -ENOMEM12; |
1190 | |
1191 | kfree(ubo->metadata); |
1192 | ubo->metadata_flags = flags; |
1193 | ubo->metadata = buffer; |
1194 | ubo->metadata_size = metadata_size; |
1195 | |
1196 | return 0; |
1197 | } |
1198 | |
1199 | /** |
1200 | * amdgpu_bo_get_metadata - get metadata |
1201 | * @bo: &amdgpu_bo buffer object |
1202 | * @buffer: returned metadata |
1203 | * @buffer_size: size of the buffer |
1204 | * @metadata_size: size of the returned metadata |
1205 | * @flags: flags of the returned metadata |
1206 | * |
1207 | * Gets buffer object's metadata, its size and flags. buffer_size shall not be |
1208 | * less than metadata_size. |
1209 | * Used via GEM ioctl. |
1210 | * |
1211 | * Returns: |
1212 | * 0 for success or a negative error code on failure. |
1213 | */ |
1214 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
1215 | size_t buffer_size, uint32_t *metadata_size, |
1216 | uint64_t *flags) |
1217 | { |
1218 | struct amdgpu_bo_user *ubo; |
1219 | |
1220 | if (!buffer && !metadata_size) |
1221 | return -EINVAL22; |
1222 | |
1223 | BUG_ON(bo->tbo.type == ttm_bo_type_kernel)((!(bo->tbo.type == ttm_bo_type_kernel)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1223, "!(bo->tbo.type == ttm_bo_type_kernel)")); |
1224 | ubo = to_amdgpu_bo_user(bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}); |
1225 | if (metadata_size) |
1226 | *metadata_size = ubo->metadata_size; |
1227 | |
1228 | if (buffer) { |
1229 | if (buffer_size < ubo->metadata_size) |
1230 | return -EINVAL22; |
1231 | |
1232 | if (ubo->metadata_size) |
1233 | memcpy(buffer, ubo->metadata, ubo->metadata_size)__builtin_memcpy((buffer), (ubo->metadata), (ubo->metadata_size )); |
1234 | } |
1235 | |
1236 | if (flags) |
1237 | *flags = ubo->metadata_flags; |
1238 | |
1239 | return 0; |
1240 | } |
1241 | |
1242 | /** |
1243 | * amdgpu_bo_move_notify - notification about a memory move |
1244 | * @bo: pointer to a buffer object |
1245 | * @evict: if this move is evicting the buffer from the graphics address space |
1246 | * @new_mem: new information of the bufer object |
1247 | * |
1248 | * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs |
1249 | * bookkeeping. |
1250 | * TTM driver callback which is called when ttm moves a buffer. |
1251 | */ |
1252 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
1253 | bool_Bool evict, |
1254 | struct ttm_resource *new_mem) |
1255 | { |
1256 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
1257 | struct amdgpu_bo *abo; |
1258 | struct ttm_resource *old_mem = bo->resource; |
1259 | |
1260 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1261 | return; |
1262 | |
1263 | abo = ttm_to_amdgpu_bo(bo); |
1264 | amdgpu_vm_bo_invalidate(adev, abo, evict); |
1265 | |
1266 | amdgpu_bo_kunmap(abo); |
1267 | |
1268 | #ifdef notyet |
1269 | if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && |
1270 | bo->resource->mem_type != TTM_PL_SYSTEM0) |
1271 | dma_buf_move_notify(abo->tbo.base.dma_buf); |
1272 | #endif |
1273 | |
1274 | /* remember the eviction */ |
1275 | if (evict) |
1276 | atomic64_inc(&adev->num_evictions)__sync_fetch_and_add_8(&adev->num_evictions, 1); |
1277 | |
1278 | /* update statistics */ |
1279 | if (!new_mem) |
1280 | return; |
1281 | |
1282 | /* move_notify is called before move happens */ |
1283 | trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
1284 | } |
1285 | |
1286 | void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, |
1287 | uint64_t *gtt_mem, uint64_t *cpu_mem) |
1288 | { |
1289 | unsigned int domain; |
1290 | |
1291 | domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); |
1292 | switch (domain) { |
1293 | case AMDGPU_GEM_DOMAIN_VRAM0x4: |
1294 | *vram_mem += amdgpu_bo_size(bo); |
1295 | break; |
1296 | case AMDGPU_GEM_DOMAIN_GTT0x2: |
1297 | *gtt_mem += amdgpu_bo_size(bo); |
1298 | break; |
1299 | case AMDGPU_GEM_DOMAIN_CPU0x1: |
1300 | default: |
1301 | *cpu_mem += amdgpu_bo_size(bo); |
1302 | break; |
1303 | } |
1304 | } |
1305 | |
1306 | /** |
1307 | * amdgpu_bo_release_notify - notification about a BO being released |
1308 | * @bo: pointer to a buffer object |
1309 | * |
1310 | * Wipes VRAM buffers whose contents should not be leaked before the |
1311 | * memory is released. |
1312 | */ |
1313 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) |
1314 | { |
1315 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
1316 | struct dma_fence *fence = NULL((void *)0); |
1317 | struct amdgpu_bo *abo; |
1318 | int r; |
1319 | |
1320 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1321 | return; |
1322 | |
1323 | abo = ttm_to_amdgpu_bo(bo); |
1324 | |
1325 | if (abo->kfd_bo) |
1326 | amdgpu_amdkfd_release_notify(abo); |
1327 | |
1328 | /* We only remove the fence if the resv has individualized. */ |
1329 | WARN_ON_ONCE(bo->type == ttm_bo_type_kernel({ static int __warned; int __ret = !!(bo->type == ttm_bo_type_kernel && bo->base.resv != &bo->base._resv); if ( __ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "bo->type == ttm_bo_type_kernel && bo->base.resv != &bo->base._resv" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1330 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
1330 | && bo->base.resv != &bo->base._resv)({ static int __warned; int __ret = !!(bo->type == ttm_bo_type_kernel && bo->base.resv != &bo->base._resv); if ( __ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "bo->type == ttm_bo_type_kernel && bo->base.resv != &bo->base._resv" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1330 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
1331 | if (bo->base.resv == &bo->base._resv) |
1332 | amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); |
1333 | |
1334 | if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM2 || |
1335 | !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE(1 << 9)) || |
1336 | adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev))) |
1337 | return; |
1338 | |
1339 | if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))({ static int __warned; int __ret = !!(!dma_resv_trylock(bo-> base.resv)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!dma_resv_trylock(bo->base.resv)", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1339); __warned = 1; } __builtin_expect(!!(__ret), 0); })) |
1340 | return; |
1341 | |
1342 | r = amdgpu_fill_buffer(abo, AMDGPU_POISON0xd0bed0be, bo->base.resv, &fence); |
1343 | if (!WARN_ON(r)({ int __ret = !!(r); if (__ret) printf("WARNING %s failed at %s:%d\n" , "r", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1343); __builtin_expect(!!(__ret), 0); })) { |
1344 | amdgpu_bo_fence(abo, fence, false0); |
1345 | dma_fence_put(fence); |
1346 | } |
1347 | |
1348 | dma_resv_unlock(bo->base.resv); |
1349 | } |
1350 | |
1351 | /** |
1352 | * amdgpu_bo_fault_reserve_notify - notification about a memory fault |
1353 | * @bo: pointer to a buffer object |
1354 | * |
1355 | * Notifies the driver we are taking a fault on this BO and have reserved it, |
1356 | * also performs bookkeeping. |
1357 | * TTM driver callback for dealing with vm faults. |
1358 | * |
1359 | * Returns: |
1360 | * 0 for success or a negative error code on failure. |
1361 | */ |
1362 | vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
1363 | { |
1364 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
1365 | struct ttm_operation_ctx ctx = { false0, false0 }; |
1366 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
1367 | unsigned long offset; |
1368 | int r; |
1369 | |
1370 | /* Remember that this BO was accessed by the CPU */ |
1371 | abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); |
1372 | |
1373 | if (bo->resource->mem_type != TTM_PL_VRAM2) |
1374 | return 0; |
1375 | |
1376 | offset = bo->resource->start << PAGE_SHIFT12; |
1377 | if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) |
1378 | return 0; |
1379 | |
1380 | /* Can't move a pinned BO to visible VRAM */ |
1381 | if (abo->tbo.pin_count > 0) |
1382 | return VM_FAULT_SIGBUS2; |
1383 | |
1384 | /* hurrah the memory is not visible ! */ |
1385 | atomic64_inc(&adev->num_vram_cpu_page_faults)__sync_fetch_and_add_8(&adev->num_vram_cpu_page_faults , 1); |
1386 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM0x4 | |
1387 | AMDGPU_GEM_DOMAIN_GTT0x2); |
1388 | |
1389 | /* Avoid costly evictions; only set GTT as a busy placement */ |
1390 | abo->placement.num_busy_placement = 1; |
1391 | abo->placement.busy_placement = &abo->placements[1]; |
1392 | |
1393 | r = ttm_bo_validate(bo, &abo->placement, &ctx); |
1394 | if (unlikely(r == -EBUSY || r == -ERESTARTSYS)__builtin_expect(!!(r == -16 || r == -4), 0)) |
1395 | return VM_FAULT_NOPAGE1; |
1396 | else if (unlikely(r)__builtin_expect(!!(r), 0)) |
1397 | return VM_FAULT_SIGBUS2; |
1398 | |
1399 | offset = bo->resource->start << PAGE_SHIFT12; |
1400 | /* this should never happen */ |
1401 | if (bo->resource->mem_type == TTM_PL_VRAM2 && |
1402 | (offset + bo->base.size) > adev->gmc.visible_vram_size) |
1403 | return VM_FAULT_SIGBUS2; |
1404 | |
1405 | ttm_bo_move_to_lru_tail_unlocked(bo); |
1406 | return 0; |
1407 | } |
1408 | |
1409 | /** |
1410 | * amdgpu_bo_fence - add fence to buffer object |
1411 | * |
1412 | * @bo: buffer object in question |
1413 | * @fence: fence to add |
1414 | * @shared: true if fence should be added shared |
1415 | * |
1416 | */ |
1417 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
1418 | bool_Bool shared) |
1419 | { |
1420 | struct dma_resv *resv = bo->tbo.base.resv; |
1421 | int r; |
1422 | |
1423 | r = dma_resv_reserve_fences(resv, 1); |
1424 | if (r) { |
1425 | /* As last resort on OOM we block for the fence */ |
1426 | dma_fence_wait(fence, false0); |
1427 | return; |
1428 | } |
1429 | |
1430 | dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : |
1431 | DMA_RESV_USAGE_WRITE); |
1432 | } |
1433 | |
1434 | /** |
1435 | * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences |
1436 | * |
1437 | * @adev: amdgpu device pointer |
1438 | * @resv: reservation object to sync to |
1439 | * @sync_mode: synchronization mode |
1440 | * @owner: fence owner |
1441 | * @intr: Whether the wait is interruptible |
1442 | * |
1443 | * Extract the fences from the reservation object and waits for them to finish. |
1444 | * |
1445 | * Returns: |
1446 | * 0 on success, errno otherwise. |
1447 | */ |
1448 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
1449 | enum amdgpu_sync_mode sync_mode, void *owner, |
1450 | bool_Bool intr) |
1451 | { |
1452 | struct amdgpu_sync sync; |
1453 | int r; |
1454 | |
1455 | amdgpu_sync_create(&sync); |
1456 | amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner); |
1457 | r = amdgpu_sync_wait(&sync, intr); |
1458 | amdgpu_sync_free(&sync); |
1459 | return r; |
1460 | } |
1461 | |
1462 | /** |
1463 | * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv |
1464 | * @bo: buffer object to wait for |
1465 | * @owner: fence owner |
1466 | * @intr: Whether the wait is interruptible |
1467 | * |
1468 | * Wrapper to wait for fences in a BO. |
1469 | * Returns: |
1470 | * 0 on success, errno otherwise. |
1471 | */ |
1472 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool_Bool intr) |
1473 | { |
1474 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1475 | |
1476 | return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv, |
1477 | AMDGPU_SYNC_NE_OWNER, owner, intr); |
1478 | } |
1479 | |
1480 | /** |
1481 | * amdgpu_bo_gpu_offset - return GPU offset of bo |
1482 | * @bo: amdgpu object for which we query the offset |
1483 | * |
1484 | * Note: object should either be pinned or reserved when calling this |
1485 | * function, it might be useful to add check for this for debugging. |
1486 | * |
1487 | * Returns: |
1488 | * current GPU offset of the object. |
1489 | */ |
1490 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
1491 | { |
1492 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM)({ static int __warned; int __ret = !!(bo->tbo.resource-> mem_type == 0); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "bo->tbo.resource->mem_type == 0", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c" , 1492); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
1493 | WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&({ static int __warned; int __ret = !!(!dma_resv_is_locked(bo ->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "!dma_resv_is_locked(bo->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1494 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
1494 | !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel)({ static int __warned; int __ret = !!(!dma_resv_is_locked(bo ->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "!dma_resv_is_locked(bo->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1494 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
1495 | WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET)({ static int __warned; int __ret = !!(bo->tbo.resource-> start == 0x7fffffffffffffffL); if (__ret && !__warned ) { printf("WARNING %s failed at %s:%d\n", "bo->tbo.resource->start == 0x7fffffffffffffffL" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1495 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
1496 | WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&({ static int __warned; int __ret = !!(bo->tbo.resource-> mem_type == 2 && !(bo->flags & (1 << 5)) ); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "bo->tbo.resource->mem_type == 2 && !(bo->flags & (1 << 5))" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1497 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
1497 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))({ static int __warned; int __ret = !!(bo->tbo.resource-> mem_type == 2 && !(bo->flags & (1 << 5)) ); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "bo->tbo.resource->mem_type == 2 && !(bo->flags & (1 << 5))" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_object.c", 1497 ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
1498 | |
1499 | return amdgpu_bo_gpu_offset_no_check(bo); |
1500 | } |
1501 | |
1502 | /** |
1503 | * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo |
1504 | * @bo: amdgpu object for which we query the offset |
1505 | * |
1506 | * Returns: |
1507 | * current GPU offset of the object without raising warnings. |
1508 | */ |
1509 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) |
1510 | { |
1511 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1512 | uint64_t offset; |
1513 | |
1514 | offset = (bo->tbo.resource->start << PAGE_SHIFT12) + |
1515 | amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); |
1516 | |
1517 | return amdgpu_gmc_sign_extend(offset); |
1518 | } |
1519 | |
1520 | /** |
1521 | * amdgpu_bo_get_preferred_domain - get preferred domain |
1522 | * @adev: amdgpu device object |
1523 | * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` |
1524 | * |
1525 | * Returns: |
1526 | * Which of the allowed domains is preferred for allocating the BO. |
1527 | */ |
1528 | uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, |
1529 | uint32_t domain) |
1530 | { |
1531 | if ((domain == (AMDGPU_GEM_DOMAIN_VRAM0x4 | AMDGPU_GEM_DOMAIN_GTT0x2)) && |
1532 | ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { |
1533 | domain = AMDGPU_GEM_DOMAIN_VRAM0x4; |
1534 | if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD(256*1024*1024)) |
1535 | domain = AMDGPU_GEM_DOMAIN_GTT0x2; |
1536 | } |
1537 | return domain; |
1538 | } |
1539 | |
1540 | #if defined(CONFIG_DEBUG_FS) |
1541 | #define amdgpu_bo_print_flag(m, bo, flag) \ |
1542 | do { \ |
1543 | if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ |
1544 | seq_printf((m), " " #flag); \ |
1545 | } \ |
1546 | } while (0) |
1547 | |
1548 | /** |
1549 | * amdgpu_bo_print_info - print BO info in debugfs file |
1550 | * |
1551 | * @id: Index or Id of the BO |
1552 | * @bo: Requested BO for printing info |
1553 | * @m: debugfs file |
1554 | * |
1555 | * Print BO information in debugfs file |
1556 | * |
1557 | * Returns: |
1558 | * Size of the BO in bytes. |
1559 | */ |
1560 | u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) |
1561 | { |
1562 | struct dma_buf_attachment *attachment; |
1563 | struct dma_buf *dma_buf; |
1564 | unsigned int domain; |
1565 | const char *placement; |
1566 | unsigned int pin_count; |
1567 | u64 size; |
1568 | |
1569 | domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); |
1570 | switch (domain) { |
1571 | case AMDGPU_GEM_DOMAIN_VRAM0x4: |
1572 | placement = "VRAM"; |
1573 | break; |
1574 | case AMDGPU_GEM_DOMAIN_GTT0x2: |
1575 | placement = " GTT"; |
1576 | break; |
1577 | case AMDGPU_GEM_DOMAIN_CPU0x1: |
1578 | default: |
1579 | placement = " CPU"; |
1580 | break; |
1581 | } |
1582 | |
1583 | size = amdgpu_bo_size(bo); |
1584 | seq_printf(m, "\t\t0x%08x: %12lld byte %s", |
1585 | id, size, placement); |
1586 | |
1587 | pin_count = READ_ONCE(bo->tbo.pin_count)({ typeof(bo->tbo.pin_count) __tmp = *(volatile typeof(bo-> tbo.pin_count) *)&(bo->tbo.pin_count); membar_datadep_consumer (); __tmp; }); |
1588 | if (pin_count) |
1589 | seq_printf(m, " pin count %d", pin_count); |
1590 | |
1591 | dma_buf = READ_ONCE(bo->tbo.base.dma_buf)({ typeof(bo->tbo.base.dma_buf) __tmp = *(volatile typeof( bo->tbo.base.dma_buf) *)&(bo->tbo.base.dma_buf); membar_datadep_consumer (); __tmp; }); |
1592 | attachment = READ_ONCE(bo->tbo.base.import_attach)({ typeof(bo->tbo.base.import_attach) __tmp = *(volatile typeof (bo->tbo.base.import_attach) *)&(bo->tbo.base.import_attach ); membar_datadep_consumer(); __tmp; }); |
1593 | |
1594 | if (attachment) |
1595 | seq_printf(m, " imported from %p", dma_buf); |
1596 | else if (dma_buf) |
1597 | seq_printf(m, " exported as %p", dma_buf); |
1598 | |
1599 | amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); |
1600 | amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); |
1601 | amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); |
1602 | amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); |
1603 | amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); |
1604 | amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); |
1605 | amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); |
1606 | |
1607 | seq_puts(m, "\n"); |
1608 | |
1609 | return size; |
1610 | } |
1611 | #endif |