File: | dev/pci/drm/amd/amdgpu/amdgpu_ttm.c |
Warning: | line 881, column 4 Value stored to 'attach' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | |
33 | #include <linux/dma-mapping.h> |
34 | #include <linux/iommu.h> |
35 | #include <linux/pagemap.h> |
36 | #include <linux/sched/task.h> |
37 | #include <linux/sched/mm.h> |
38 | #include <linux/seq_file.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/swap.h> |
41 | #include <linux/swiotlb.h> |
42 | #include <linux/dma-buf.h> |
43 | #include <linux/sizes.h> |
44 | #include <linux/module.h> |
45 | |
46 | #include <drm/drm_drv.h> |
47 | #include <drm/ttm/ttm_bo_api.h> |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_placement.h> |
50 | #include <drm/ttm/ttm_range_manager.h> |
51 | |
52 | #include <drm/amdgpu_drm.h> |
53 | #include <drm/drm_drv.h> |
54 | |
55 | #include "amdgpu.h" |
56 | #include "amdgpu_object.h" |
57 | #include "amdgpu_trace.h" |
58 | #include "amdgpu_amdkfd.h" |
59 | #include "amdgpu_sdma.h" |
60 | #include "amdgpu_ras.h" |
61 | #include "amdgpu_atomfirmware.h" |
62 | #include "amdgpu_res_cursor.h" |
63 | #include "bif/bif_4_1_d.h" |
64 | |
65 | MODULE_IMPORT_NS(DMA_BUF); |
66 | |
67 | #define AMDGPU_TTM_VRAM_MAX_DW_READ(size_t)128 (size_t)128 |
68 | |
69 | static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, |
70 | struct ttm_tt *ttm, |
71 | struct ttm_resource *bo_mem); |
72 | static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, |
73 | struct ttm_tt *ttm); |
74 | |
75 | static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, |
76 | unsigned int type, |
77 | uint64_t size_in_page) |
78 | { |
79 | return ttm_range_man_init(&adev->mman.bdev, type, |
80 | false0, size_in_page); |
81 | } |
82 | |
83 | /** |
84 | * amdgpu_evict_flags - Compute placement flags |
85 | * |
86 | * @bo: The buffer object to evict |
87 | * @placement: Possible destination(s) for evicted BO |
88 | * |
89 | * Fill in placement data when ttm_bo_evict() is called |
90 | */ |
91 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
92 | struct ttm_placement *placement) |
93 | { |
94 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
95 | struct amdgpu_bo *abo; |
96 | static const struct ttm_place placements = { |
97 | .fpfn = 0, |
98 | .lpfn = 0, |
99 | .mem_type = TTM_PL_SYSTEM0, |
100 | .flags = 0 |
101 | }; |
102 | |
103 | /* Don't handle scatter gather BOs */ |
104 | if (bo->type == ttm_bo_type_sg) { |
105 | placement->num_placement = 0; |
106 | placement->num_busy_placement = 0; |
107 | return; |
108 | } |
109 | |
110 | /* Object isn't an AMDGPU object so ignore */ |
111 | if (!amdgpu_bo_is_amdgpu_bo(bo)) { |
112 | placement->placement = &placements; |
113 | placement->busy_placement = &placements; |
114 | placement->num_placement = 1; |
115 | placement->num_busy_placement = 1; |
116 | return; |
117 | } |
118 | |
119 | abo = ttm_to_amdgpu_bo(bo); |
120 | if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE(1 << 12)) { |
121 | placement->num_placement = 0; |
122 | placement->num_busy_placement = 0; |
123 | return; |
124 | } |
125 | |
126 | switch (bo->resource->mem_type) { |
127 | case AMDGPU_PL_GDS(3 + 0): |
128 | case AMDGPU_PL_GWS(3 + 1): |
129 | case AMDGPU_PL_OA(3 + 2): |
130 | placement->num_placement = 0; |
131 | placement->num_busy_placement = 0; |
132 | return; |
133 | |
134 | case TTM_PL_VRAM2: |
135 | if (!adev->mman.buffer_funcs_enabled) { |
136 | /* Move to system memory */ |
137 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU0x1); |
138 | } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && |
139 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0)) && |
140 | amdgpu_bo_in_cpu_visible_vram(abo)) { |
141 | |
142 | /* Try evicting to the CPU inaccessible part of VRAM |
143 | * first, but only set GTT as busy placement, so this |
144 | * BO will be evicted to GTT rather than causing other |
145 | * BOs to be evicted from VRAM |
146 | */ |
147 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM0x4 | |
148 | AMDGPU_GEM_DOMAIN_GTT0x2 | |
149 | AMDGPU_GEM_DOMAIN_CPU0x1); |
150 | abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT12; |
151 | abo->placements[0].lpfn = 0; |
152 | abo->placement.busy_placement = &abo->placements[1]; |
153 | abo->placement.num_busy_placement = 1; |
154 | } else { |
155 | /* Move to GTT memory */ |
156 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT0x2 | |
157 | AMDGPU_GEM_DOMAIN_CPU0x1); |
158 | } |
159 | break; |
160 | case TTM_PL_TT1: |
161 | case AMDGPU_PL_PREEMPT(3 + 3): |
162 | default: |
163 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU0x1); |
164 | break; |
165 | } |
166 | *placement = abo->placement; |
167 | } |
168 | |
169 | /** |
170 | * amdgpu_ttm_map_buffer - Map memory into the GART windows |
171 | * @bo: buffer object to map |
172 | * @mem: memory object to map |
173 | * @mm_cur: range to map |
174 | * @window: which GART window to use |
175 | * @ring: DMA ring to use for the copy |
176 | * @tmz: if we should setup a TMZ enabled mapping |
177 | * @size: in number of bytes to map, out number of bytes mapped |
178 | * @addr: resulting address inside the MC address space |
179 | * |
180 | * Setup one of the GART windows to access a specific piece of memory or return |
181 | * the physical address for local memory. |
182 | */ |
183 | static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, |
184 | struct ttm_resource *mem, |
185 | struct amdgpu_res_cursor *mm_cur, |
186 | unsigned window, struct amdgpu_ring *ring, |
187 | bool_Bool tmz, uint64_t *size, uint64_t *addr) |
188 | { |
189 | struct amdgpu_device *adev = ring->adev; |
190 | unsigned offset, num_pages, num_dw, num_bytes; |
191 | uint64_t src_addr, dst_addr; |
192 | struct dma_fence *fence; |
193 | struct amdgpu_job *job; |
194 | void *cpu_addr; |
195 | uint64_t flags; |
196 | unsigned int i; |
197 | int r; |
198 | |
199 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <((!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8 )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 200, "!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8)" )) |
200 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8)((!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8 )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 200, "!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8)" )); |
201 | |
202 | if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT)({ int __ret = !!(mem->mem_type == (3 + 3)); if (__ret) printf ("WARNING %s failed at %s:%d\n", "mem->mem_type == (3 + 3)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 202); __builtin_expect (!!(__ret), 0); })) |
203 | return -EINVAL22; |
204 | |
205 | /* Map only what can't be accessed directly */ |
206 | if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) { |
207 | *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + |
208 | mm_cur->start; |
209 | return 0; |
210 | } |
211 | |
212 | |
213 | /* |
214 | * If start begins at an offset inside the page, then adjust the size |
215 | * and addr accordingly |
216 | */ |
217 | offset = mm_cur->start & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); |
218 | |
219 | num_pages = PFN_UP(*size + offset)(((*size + offset) + (1 << 12)-1) >> 12); |
220 | num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE)({ uint32_t __min_a = (num_pages); uint32_t __min_b = (512); __min_a < __min_b ? __min_a : __min_b; }); |
221 | |
222 | *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset)(((*size)<((uint64_t)num_pages * (1 << 12) - offset) )?(*size):((uint64_t)num_pages * (1 << 12) - offset)); |
223 | |
224 | *addr = adev->gmc.gart_start; |
225 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE512 * |
226 | AMDGPU_GPU_PAGE_SIZE4096; |
227 | *addr += offset; |
228 | |
229 | num_dw = roundup2(adev->mman.buffer_funcs->copy_num_dw, 8)(((adev->mman.buffer_funcs->copy_num_dw) + ((8) - 1)) & (~((__typeof(adev->mman.buffer_funcs->copy_num_dw))(8) - 1))); |
230 | num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE((1 << 12) / 4096); |
231 | |
232 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, |
233 | AMDGPU_IB_POOL_DELAYED, &job); |
234 | if (r) |
235 | return r; |
236 | |
237 | src_addr = num_dw * 4; |
238 | src_addr += job->ibs[0].gpu_addr; |
239 | |
240 | dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); |
241 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE512 * 8; |
242 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_addr), (dst_addr), (num_bytes), (0)) |
243 | dst_addr, num_bytes, false)(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_addr), (dst_addr), (num_bytes), (0)); |
244 | |
245 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); |
246 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 246); __builtin_expect (!!(__ret), 0); }); |
247 | |
248 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); |
249 | if (tmz) |
250 | flags |= AMDGPU_PTE_TMZ(1ULL << 3); |
251 | |
252 | cpu_addr = &job->ibs[0].ptr[num_dw]; |
253 | |
254 | if (mem->mem_type == TTM_PL_TT1) { |
255 | dma_addr_t *dma_addr; |
256 | |
257 | dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT12]; |
258 | amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); |
259 | } else { |
260 | dma_addr_t dma_address; |
261 | |
262 | dma_address = mm_cur->start; |
263 | dma_address += adev->vm_manager.vram_base_offset; |
264 | |
265 | for (i = 0; i < num_pages; ++i) { |
266 | amdgpu_gart_map(adev, i << PAGE_SHIFT12, 1, &dma_address, |
267 | flags, cpu_addr); |
268 | dma_address += PAGE_SIZE(1 << 12); |
269 | } |
270 | } |
271 | |
272 | r = amdgpu_job_submit(job, &adev->mman.entity, |
273 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), &fence); |
274 | if (r) |
275 | goto error_free; |
276 | |
277 | dma_fence_put(fence); |
278 | |
279 | return r; |
280 | |
281 | error_free: |
282 | amdgpu_job_free(job); |
283 | return r; |
284 | } |
285 | |
286 | /** |
287 | * amdgpu_ttm_copy_mem_to_mem - Helper function for copy |
288 | * @adev: amdgpu device |
289 | * @src: buffer/address where to read from |
290 | * @dst: buffer/address where to write to |
291 | * @size: number of bytes to copy |
292 | * @tmz: if a secure copy should be used |
293 | * @resv: resv object to sync to |
294 | * @f: Returns the last fence if multiple jobs are submitted. |
295 | * |
296 | * The function copies @size bytes from {src->mem + src->offset} to |
297 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a |
298 | * move and different for a BO to BO copy. |
299 | * |
300 | */ |
301 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, |
302 | const struct amdgpu_copy_mem *src, |
303 | const struct amdgpu_copy_mem *dst, |
304 | uint64_t size, bool_Bool tmz, |
305 | struct dma_resv *resv, |
306 | struct dma_fence **f) |
307 | { |
308 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
309 | struct amdgpu_res_cursor src_mm, dst_mm; |
310 | struct dma_fence *fence = NULL((void *)0); |
311 | int r = 0; |
312 | |
313 | if (!adev->mman.buffer_funcs_enabled) { |
314 | DRM_ERROR("Trying to move memory with ring turned off.\n")__drm_err("Trying to move memory with ring turned off.\n"); |
315 | return -EINVAL22; |
316 | } |
317 | |
318 | amdgpu_res_first(src->mem, src->offset, size, &src_mm); |
319 | amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); |
320 | |
321 | mutex_lock(&adev->mman.gtt_window_lock)rw_enter_write(&adev->mman.gtt_window_lock); |
322 | while (src_mm.remaining) { |
323 | uint64_t from, to, cur_size; |
324 | struct dma_fence *next; |
325 | |
326 | /* Never copy more than 256MiB at once to avoid a timeout */ |
327 | cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20)(((src_mm.size)<((((dst_mm.size)<(256ULL << 20))? (dst_mm.size):(256ULL << 20))))?(src_mm.size):((((dst_mm .size)<(256ULL << 20))?(dst_mm.size):(256ULL << 20)))); |
328 | |
329 | /* Map src to window 0 and dst to window 1. */ |
330 | r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, |
331 | 0, ring, tmz, &cur_size, &from); |
332 | if (r) |
333 | goto error; |
334 | |
335 | r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, |
336 | 1, ring, tmz, &cur_size, &to); |
337 | if (r) |
338 | goto error; |
339 | |
340 | r = amdgpu_copy_buffer(ring, from, to, cur_size, |
341 | resv, &next, false0, true1, tmz); |
342 | if (r) |
343 | goto error; |
344 | |
345 | dma_fence_put(fence); |
346 | fence = next; |
347 | |
348 | amdgpu_res_next(&src_mm, cur_size); |
349 | amdgpu_res_next(&dst_mm, cur_size); |
350 | } |
351 | error: |
352 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
353 | if (f) |
354 | *f = dma_fence_get(fence); |
355 | dma_fence_put(fence); |
356 | return r; |
357 | } |
358 | |
359 | /* |
360 | * amdgpu_move_blit - Copy an entire buffer to another buffer |
361 | * |
362 | * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to |
363 | * help move buffers to and from VRAM. |
364 | */ |
365 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
366 | bool_Bool evict, |
367 | struct ttm_resource *new_mem, |
368 | struct ttm_resource *old_mem) |
369 | { |
370 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
371 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
372 | struct amdgpu_copy_mem src, dst; |
373 | struct dma_fence *fence = NULL((void *)0); |
374 | int r; |
375 | |
376 | src.bo = bo; |
377 | dst.bo = bo; |
378 | src.mem = old_mem; |
379 | dst.mem = new_mem; |
380 | src.offset = 0; |
381 | dst.offset = 0; |
382 | |
383 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, |
384 | new_mem->num_pages << PAGE_SHIFT12, |
385 | amdgpu_bo_encrypted(abo), |
386 | bo->base.resv, &fence); |
387 | if (r) |
388 | goto error; |
389 | |
390 | /* clear the space being freed */ |
391 | if (old_mem->mem_type == TTM_PL_VRAM2 && |
392 | (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE(1 << 9))) { |
393 | struct dma_fence *wipe_fence = NULL((void *)0); |
394 | |
395 | r = amdgpu_fill_buffer(abo, AMDGPU_POISON0xd0bed0be, NULL((void *)0), &wipe_fence); |
396 | if (r) { |
397 | goto error; |
398 | } else if (wipe_fence) { |
399 | dma_fence_put(fence); |
400 | fence = wipe_fence; |
401 | } |
402 | } |
403 | |
404 | /* Always block for VM page tables before committing the new location */ |
405 | if (bo->type == ttm_bo_type_kernel) |
406 | r = ttm_bo_move_accel_cleanup(bo, fence, true1, false0, new_mem); |
407 | else |
408 | r = ttm_bo_move_accel_cleanup(bo, fence, evict, true1, new_mem); |
409 | dma_fence_put(fence); |
410 | return r; |
411 | |
412 | error: |
413 | if (fence) |
414 | dma_fence_wait(fence, false0); |
415 | dma_fence_put(fence); |
416 | return r; |
417 | } |
418 | |
419 | /* |
420 | * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy |
421 | * |
422 | * Called by amdgpu_bo_move() |
423 | */ |
424 | static bool_Bool amdgpu_mem_visible(struct amdgpu_device *adev, |
425 | struct ttm_resource *mem) |
426 | { |
427 | u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT12; |
428 | struct amdgpu_res_cursor cursor; |
429 | u64 end; |
430 | |
431 | if (mem->mem_type == TTM_PL_SYSTEM0 || |
432 | mem->mem_type == TTM_PL_TT1) |
433 | return true1; |
434 | if (mem->mem_type != TTM_PL_VRAM2) |
435 | return false0; |
436 | |
437 | amdgpu_res_first(mem, 0, mem_size, &cursor); |
438 | end = cursor.start + cursor.size; |
439 | while (cursor.remaining) { |
440 | amdgpu_res_next(&cursor, cursor.size); |
441 | |
442 | if (!cursor.remaining) |
443 | break; |
444 | |
445 | /* ttm_resource_ioremap only supports contiguous memory */ |
446 | if (end != cursor.start) |
447 | return false0; |
448 | |
449 | end = cursor.start + cursor.size; |
450 | } |
451 | |
452 | return end <= adev->gmc.visible_vram_size; |
453 | } |
454 | |
455 | /* |
456 | * amdgpu_bo_move - Move a buffer object to a new memory location |
457 | * |
458 | * Called by ttm_bo_handle_move_mem() |
459 | */ |
460 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool_Bool evict, |
461 | struct ttm_operation_ctx *ctx, |
462 | struct ttm_resource *new_mem, |
463 | struct ttm_place *hop) |
464 | { |
465 | struct amdgpu_device *adev; |
466 | struct amdgpu_bo *abo; |
467 | struct ttm_resource *old_mem = bo->resource; |
468 | int r; |
469 | |
470 | if (new_mem->mem_type == TTM_PL_TT1 || |
471 | new_mem->mem_type == AMDGPU_PL_PREEMPT(3 + 3)) { |
472 | r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); |
473 | if (r) |
474 | return r; |
475 | } |
476 | |
477 | /* Can't move a pinned BO */ |
478 | abo = ttm_to_amdgpu_bo(bo); |
479 | if (WARN_ON_ONCE(abo->tbo.pin_count > 0)({ static int __warned; int __ret = !!(abo->tbo.pin_count > 0); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "abo->tbo.pin_count > 0", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 479); __warned = 1; } __builtin_expect(!!(__ret), 0); })) |
480 | return -EINVAL22; |
481 | |
482 | adev = amdgpu_ttm_adev(bo->bdev); |
483 | |
484 | if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM0 && |
485 | bo->ttm == NULL((void *)0))) { |
486 | ttm_bo_move_null(bo, new_mem); |
487 | goto out; |
488 | } |
489 | if (old_mem->mem_type == TTM_PL_SYSTEM0 && |
490 | (new_mem->mem_type == TTM_PL_TT1 || |
491 | new_mem->mem_type == AMDGPU_PL_PREEMPT(3 + 3))) { |
492 | ttm_bo_move_null(bo, new_mem); |
493 | goto out; |
494 | } |
495 | if ((old_mem->mem_type == TTM_PL_TT1 || |
496 | old_mem->mem_type == AMDGPU_PL_PREEMPT(3 + 3)) && |
497 | new_mem->mem_type == TTM_PL_SYSTEM0) { |
498 | r = ttm_bo_wait_ctx(bo, ctx); |
499 | if (r) |
500 | return r; |
501 | |
502 | amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); |
503 | ttm_resource_free(bo, &bo->resource); |
504 | ttm_bo_assign_mem(bo, new_mem); |
505 | goto out; |
506 | } |
507 | |
508 | if (old_mem->mem_type == AMDGPU_PL_GDS(3 + 0) || |
509 | old_mem->mem_type == AMDGPU_PL_GWS(3 + 1) || |
510 | old_mem->mem_type == AMDGPU_PL_OA(3 + 2) || |
511 | new_mem->mem_type == AMDGPU_PL_GDS(3 + 0) || |
512 | new_mem->mem_type == AMDGPU_PL_GWS(3 + 1) || |
513 | new_mem->mem_type == AMDGPU_PL_OA(3 + 2)) { |
514 | /* Nothing to save here */ |
515 | ttm_bo_move_null(bo, new_mem); |
516 | goto out; |
517 | } |
518 | |
519 | if (bo->type == ttm_bo_type_device && |
520 | new_mem->mem_type == TTM_PL_VRAM2 && |
521 | old_mem->mem_type != TTM_PL_VRAM2) { |
522 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU |
523 | * accesses the BO after it's moved. |
524 | */ |
525 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); |
526 | } |
527 | |
528 | if (adev->mman.buffer_funcs_enabled) { |
529 | if (((old_mem->mem_type == TTM_PL_SYSTEM0 && |
530 | new_mem->mem_type == TTM_PL_VRAM2) || |
531 | (old_mem->mem_type == TTM_PL_VRAM2 && |
532 | new_mem->mem_type == TTM_PL_SYSTEM0))) { |
533 | hop->fpfn = 0; |
534 | hop->lpfn = 0; |
535 | hop->mem_type = TTM_PL_TT1; |
536 | hop->flags = TTM_PL_FLAG_TEMPORARY(1 << 2); |
537 | return -EMULTIHOP82; |
538 | } |
539 | |
540 | r = amdgpu_move_blit(bo, evict, new_mem, old_mem); |
541 | } else { |
542 | r = -ENODEV19; |
543 | } |
544 | |
545 | if (r) { |
546 | /* Check that all memory is CPU accessible */ |
547 | if (!amdgpu_mem_visible(adev, old_mem) || |
548 | !amdgpu_mem_visible(adev, new_mem)) { |
549 | pr_err("Move buffer fallback to memcpy unavailable\n")printk("\0013" "amdgpu: " "Move buffer fallback to memcpy unavailable\n" ); |
550 | return r; |
551 | } |
552 | |
553 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); |
554 | if (r) |
555 | return r; |
556 | } |
557 | |
558 | out: |
559 | /* update statistics */ |
560 | atomic64_add(bo->base.size, &adev->num_bytes_moved)__sync_fetch_and_add_8(&adev->num_bytes_moved, bo-> base.size); |
561 | amdgpu_bo_move_notify(bo, evict, new_mem); |
562 | return 0; |
563 | } |
564 | |
565 | /* |
566 | * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault |
567 | * |
568 | * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() |
569 | */ |
570 | static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, |
571 | struct ttm_resource *mem) |
572 | { |
573 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
574 | size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT12; |
575 | |
576 | switch (mem->mem_type) { |
577 | case TTM_PL_SYSTEM0: |
578 | /* system memory */ |
579 | return 0; |
580 | case TTM_PL_TT1: |
581 | case AMDGPU_PL_PREEMPT(3 + 3): |
582 | break; |
583 | case TTM_PL_VRAM2: |
584 | mem->bus.offset = mem->start << PAGE_SHIFT12; |
585 | /* check if it's visible */ |
586 | if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) |
587 | return -EINVAL22; |
588 | |
589 | if (adev->mman.aper_base_kaddr && |
590 | mem->placement & TTM_PL_FLAG_CONTIGUOUS(1 << 0)) |
591 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + |
592 | mem->bus.offset; |
593 | |
594 | mem->bus.offset += adev->gmc.aper_base; |
595 | mem->bus.is_iomem = true1; |
596 | break; |
597 | default: |
598 | return -EINVAL22; |
599 | } |
600 | return 0; |
601 | } |
602 | |
603 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
604 | unsigned long page_offset) |
605 | { |
606 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
607 | struct amdgpu_res_cursor cursor; |
608 | |
609 | amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT12, 0, |
610 | &cursor); |
611 | return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT12; |
612 | } |
613 | |
614 | /** |
615 | * amdgpu_ttm_domain_start - Returns GPU start address |
616 | * @adev: amdgpu device object |
617 | * @type: type of the memory |
618 | * |
619 | * Returns: |
620 | * GPU start address of a memory domain |
621 | */ |
622 | |
623 | uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) |
624 | { |
625 | switch (type) { |
626 | case TTM_PL_TT1: |
627 | return adev->gmc.gart_start; |
628 | case TTM_PL_VRAM2: |
629 | return adev->gmc.vram_start; |
630 | } |
631 | |
632 | return 0; |
633 | } |
634 | |
635 | /* |
636 | * TTM backend functions. |
637 | */ |
638 | struct amdgpu_ttm_tt { |
639 | struct ttm_tt ttm; |
640 | struct drm_gem_object *gobj; |
641 | u64 offset; |
642 | uint64_t userptr; |
643 | struct task_struct *usertask; |
644 | uint32_t userflags; |
645 | bool_Bool bound; |
646 | }; |
647 | |
648 | #define ttm_to_amdgpu_ttm_tt(ptr)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ptr); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}) container_of(ptr, struct amdgpu_ttm_tt, ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ptr); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}) |
649 | |
650 | #ifdef CONFIG_DRM_AMDGPU_USERPTR |
651 | /* |
652 | * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user |
653 | * memory and start HMM tracking CPU page table update |
654 | * |
655 | * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only |
656 | * once afterwards to stop HMM tracking |
657 | */ |
658 | int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct vm_page **pages, |
659 | struct hmm_range **range) |
660 | { |
661 | struct ttm_tt *ttm = bo->tbo.ttm; |
662 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
663 | unsigned long start = gtt->userptr; |
664 | struct vm_area_struct *vma; |
665 | struct mm_struct *mm; |
666 | bool_Bool readonly; |
667 | int r = 0; |
668 | |
669 | /* Make sure get_user_pages_done() can cleanup gracefully */ |
670 | *range = NULL((void *)0); |
671 | |
672 | mm = bo->notifier.mm; |
673 | if (unlikely(!mm)__builtin_expect(!!(!mm), 0)) { |
674 | DRM_DEBUG_DRIVER("BO is not registered?\n")___drm_dbg(((void *)0), DRM_UT_DRIVER, "BO is not registered?\n" ); |
675 | return -EFAULT14; |
676 | } |
677 | |
678 | if (!mmget_not_zero(mm)) /* Happens during process shutdown */ |
679 | return -ESRCH3; |
680 | |
681 | mmap_read_lock(mm); |
682 | vma = vma_lookup(mm, start); |
683 | if (unlikely(!vma)__builtin_expect(!!(!vma), 0)) { |
684 | r = -EFAULT14; |
685 | goto out_unlock; |
686 | } |
687 | if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&__builtin_expect(!!((gtt->userflags & (1 << 1)) && vma->vm_file), 0) |
688 | vma->vm_file)__builtin_expect(!!((gtt->userflags & (1 << 1)) && vma->vm_file), 0)) { |
689 | r = -EPERM1; |
690 | goto out_unlock; |
691 | } |
692 | |
693 | readonly = amdgpu_ttm_tt_is_readonly(ttm); |
694 | r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, |
695 | ttm->num_pages, range, readonly, |
696 | true1, NULL((void *)0)); |
697 | out_unlock: |
698 | mmap_read_unlock(mm); |
699 | if (r) |
700 | pr_debug("failed %d to get user pages 0x%lx\n", r, start)do { } while(0); |
701 | |
702 | mmput(mm); |
703 | |
704 | return r; |
705 | } |
706 | |
707 | /* |
708 | * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change |
709 | * Check if the pages backing this ttm range have been invalidated |
710 | * |
711 | * Returns: true if pages are still valid |
712 | */ |
713 | bool_Bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, |
714 | struct hmm_range *range) |
715 | { |
716 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
717 | |
718 | if (!gtt || !gtt->userptr || !range) |
719 | return false0; |
720 | |
721 | DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "user_pages_done 0x%llx pages 0x%x\n" , gtt->userptr, ttm->num_pages) |
722 | gtt->userptr, ttm->num_pages)___drm_dbg(((void *)0), DRM_UT_DRIVER, "user_pages_done 0x%llx pages 0x%x\n" , gtt->userptr, ttm->num_pages); |
723 | |
724 | WARN_ONCE(!range->hmm_pfns, "No user pages to check\n")({ static int __warned; int __ret = !!(!range->hmm_pfns); if (__ret && !__warned) { printf("No user pages to check\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
725 | |
726 | /* |
727 | * FIXME: Must always hold notifier_lock for this, and must |
728 | * not ignore the return code. |
729 | */ |
730 | return !amdgpu_hmm_range_get_pages_done(range); |
731 | } |
732 | #endif |
733 | |
734 | /* |
735 | * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. |
736 | * |
737 | * Called by amdgpu_cs_list_validate(). This creates the page list |
738 | * that backs user memory and will ultimately be mapped into the device |
739 | * address space. |
740 | */ |
741 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct vm_page **pages) |
742 | { |
743 | unsigned long i; |
744 | |
745 | for (i = 0; i < ttm->num_pages; ++i) |
746 | ttm->pages[i] = pages ? pages[i] : NULL((void *)0); |
747 | } |
748 | |
749 | /* |
750 | * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages |
751 | * |
752 | * Called by amdgpu_ttm_backend_bind() |
753 | **/ |
754 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, |
755 | struct ttm_tt *ttm) |
756 | { |
757 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
758 | return -ENOSYS78; |
759 | #ifdef notyet |
760 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
761 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
762 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); |
763 | enum dma_data_direction direction = write ? |
764 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
765 | int r; |
766 | |
767 | /* Allocate an SG array and squash pages into it */ |
768 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
769 | (u64)ttm->num_pages << PAGE_SHIFT12, |
770 | GFP_KERNEL(0x0001 | 0x0004)); |
771 | if (r) |
772 | goto release_sg; |
773 | |
774 | /* Map SG to device */ |
775 | r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); |
776 | if (r) |
777 | goto release_sg; |
778 | |
779 | /* convert SG to linear array of pages and dma addresses */ |
780 | drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, |
781 | ttm->num_pages); |
782 | |
783 | return 0; |
784 | |
785 | release_sg: |
786 | kfree(ttm->sg); |
787 | ttm->sg = NULL((void *)0); |
788 | return r; |
789 | #endif |
790 | } |
791 | |
792 | /* |
793 | * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages |
794 | */ |
795 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, |
796 | struct ttm_tt *ttm) |
797 | { |
798 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
799 | #ifdef notyet |
800 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
801 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
802 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); |
803 | enum dma_data_direction direction = write ? |
804 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
805 | |
806 | /* double check that we don't free the table twice */ |
807 | if (!ttm->sg || !ttm->sg->sgl) |
808 | return; |
809 | |
810 | /* unmap the pages mapped to the device */ |
811 | dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); |
812 | sg_free_table(ttm->sg); |
813 | #endif |
814 | } |
815 | |
816 | static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, |
817 | struct ttm_buffer_object *tbo, |
818 | uint64_t flags) |
819 | { |
820 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); |
821 | struct ttm_tt *ttm = tbo->ttm; |
822 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
823 | |
824 | if (amdgpu_bo_encrypted(abo)) |
825 | flags |= AMDGPU_PTE_TMZ(1ULL << 3); |
826 | |
827 | if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9(1 << 8)) { |
828 | uint64_t page_idx = 1; |
829 | |
830 | amdgpu_gart_bind(adev, gtt->offset, page_idx, |
831 | gtt->ttm.dma_address, flags); |
832 | |
833 | /* The memory type of the first page defaults to UC. Now |
834 | * modify the memory type to NC from the second page of |
835 | * the BO onward. |
836 | */ |
837 | flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK((uint64_t)(3ULL) << 57); |
838 | flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC)((uint64_t)(0) << 57); |
839 | |
840 | amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT12), |
841 | ttm->num_pages - page_idx, |
842 | &(gtt->ttm.dma_address[page_idx]), flags); |
843 | } else { |
844 | amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
845 | gtt->ttm.dma_address, flags); |
846 | } |
847 | } |
848 | |
849 | /* |
850 | * amdgpu_ttm_backend_bind - Bind GTT memory |
851 | * |
852 | * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). |
853 | * This handles binding GTT memory to the device address space. |
854 | */ |
855 | static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, |
856 | struct ttm_tt *ttm, |
857 | struct ttm_resource *bo_mem) |
858 | { |
859 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
860 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
861 | uint64_t flags; |
862 | int r; |
863 | |
864 | if (!bo_mem) |
865 | return -EINVAL22; |
866 | |
867 | if (gtt->bound) |
868 | return 0; |
869 | |
870 | if (gtt->userptr) { |
871 | r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); |
872 | if (r) { |
873 | DRM_ERROR("failed to pin userptr\n")__drm_err("failed to pin userptr\n"); |
874 | return r; |
875 | } |
876 | } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2)) { |
877 | if (!ttm->sg) { |
878 | struct dma_buf_attachment *attach; |
879 | struct sg_table *sgt; |
880 | |
881 | attach = gtt->gobj->import_attach; |
Value stored to 'attach' is never read | |
882 | #ifdef notyet |
883 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
884 | if (IS_ERR(sgt)) |
885 | return PTR_ERR(sgt); |
886 | #else |
887 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
888 | return -ENOSYS78; |
889 | #endif |
890 | |
891 | ttm->sg = sgt; |
892 | } |
893 | |
894 | drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, |
895 | ttm->num_pages); |
896 | } |
897 | |
898 | if (!ttm->num_pages) { |
899 | WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",({ int __ret = !!(1); if (__ret) printf("nothing to bind %u pages for mreg %p back %p!\n" , ttm->num_pages, bo_mem, ttm); __builtin_expect(!!(__ret) , 0); }) |
900 | ttm->num_pages, bo_mem, ttm)({ int __ret = !!(1); if (__ret) printf("nothing to bind %u pages for mreg %p back %p!\n" , ttm->num_pages, bo_mem, ttm); __builtin_expect(!!(__ret) , 0); }); |
901 | } |
902 | |
903 | if (bo_mem->mem_type != TTM_PL_TT1 || |
904 | !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { |
905 | gtt->offset = AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL; |
906 | return 0; |
907 | } |
908 | |
909 | /* compute PTE flags relevant to this BO memory */ |
910 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); |
911 | |
912 | /* bind pages into GART page tables */ |
913 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT12; |
914 | amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, |
915 | gtt->ttm.dma_address, flags); |
916 | gtt->bound = true1; |
917 | return 0; |
918 | } |
919 | |
920 | /* |
921 | * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either |
922 | * through AGP or GART aperture. |
923 | * |
924 | * If bo is accessible through AGP aperture, then use AGP aperture |
925 | * to access bo; otherwise allocate logical space in GART aperture |
926 | * and map bo to GART aperture. |
927 | */ |
928 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) |
929 | { |
930 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
931 | struct ttm_operation_ctx ctx = { false0, false0 }; |
932 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (bo->ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
933 | struct ttm_placement placement; |
934 | struct ttm_place placements; |
935 | struct ttm_resource *tmp; |
936 | uint64_t addr, flags; |
937 | int r; |
938 | |
939 | if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) |
940 | return 0; |
941 | |
942 | addr = amdgpu_gmc_agp_addr(bo); |
943 | if (addr != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) { |
944 | bo->resource->start = addr >> PAGE_SHIFT12; |
945 | return 0; |
946 | } |
947 | |
948 | /* allocate GART space */ |
949 | placement.num_placement = 1; |
950 | placement.placement = &placements; |
951 | placement.num_busy_placement = 1; |
952 | placement.busy_placement = &placements; |
953 | placements.fpfn = 0; |
954 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT12; |
955 | placements.mem_type = TTM_PL_TT1; |
956 | placements.flags = bo->resource->placement; |
957 | |
958 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); |
959 | if (unlikely(r)__builtin_expect(!!(r), 0)) |
960 | return r; |
961 | |
962 | /* compute PTE flags for this buffer object */ |
963 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); |
964 | |
965 | /* Bind pages */ |
966 | gtt->offset = (u64)tmp->start << PAGE_SHIFT12; |
967 | amdgpu_ttm_gart_bind(adev, bo, flags); |
968 | amdgpu_gart_invalidate_tlb(adev); |
969 | ttm_resource_free(bo, &bo->resource); |
970 | ttm_bo_assign_mem(bo, tmp); |
971 | |
972 | return 0; |
973 | } |
974 | |
975 | /* |
976 | * amdgpu_ttm_recover_gart - Rebind GTT pages |
977 | * |
978 | * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to |
979 | * rebind GTT pages during a GPU reset. |
980 | */ |
981 | void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) |
982 | { |
983 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
984 | uint64_t flags; |
985 | |
986 | if (!tbo->ttm) |
987 | return; |
988 | |
989 | flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); |
990 | amdgpu_ttm_gart_bind(adev, tbo, flags); |
991 | } |
992 | |
993 | /* |
994 | * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages |
995 | * |
996 | * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and |
997 | * ttm_tt_destroy(). |
998 | */ |
999 | static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, |
1000 | struct ttm_tt *ttm) |
1001 | { |
1002 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
1003 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1004 | |
1005 | /* if the pages have userptr pinning then clear that first */ |
1006 | if (gtt->userptr) { |
1007 | amdgpu_ttm_tt_unpin_userptr(bdev, ttm); |
1008 | } else if (ttm->sg && gtt->gobj->import_attach) { |
1009 | struct dma_buf_attachment *attach; |
1010 | |
1011 | attach = gtt->gobj->import_attach; |
1012 | #ifdef notyet |
1013 | dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); |
1014 | #else |
1015 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
1016 | #endif |
1017 | ttm->sg = NULL((void *)0); |
1018 | } |
1019 | |
1020 | if (!gtt->bound) |
1021 | return; |
1022 | |
1023 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) |
1024 | return; |
1025 | |
1026 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ |
1027 | amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); |
1028 | gtt->bound = false0; |
1029 | } |
1030 | |
1031 | static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, |
1032 | struct ttm_tt *ttm) |
1033 | { |
1034 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1035 | |
1036 | #ifdef notyet |
1037 | if (gtt->usertask) |
1038 | put_task_struct(gtt->usertask); |
1039 | #endif |
1040 | |
1041 | ttm_tt_fini(>t->ttm); |
1042 | kfree(gtt); |
1043 | } |
1044 | |
1045 | /** |
1046 | * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO |
1047 | * |
1048 | * @bo: The buffer object to create a GTT ttm_tt object around |
1049 | * @page_flags: Page flags to be added to the ttm_tt object |
1050 | * |
1051 | * Called by ttm_tt_create(). |
1052 | */ |
1053 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, |
1054 | uint32_t page_flags) |
1055 | { |
1056 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
1057 | struct amdgpu_ttm_tt *gtt; |
1058 | enum ttm_caching caching; |
1059 | |
1060 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL(0x0001 | 0x0004)); |
1061 | if (gtt == NULL((void *)0)) { |
1062 | return NULL((void *)0); |
1063 | } |
1064 | gtt->gobj = &bo->base; |
1065 | |
1066 | if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC(1 << 2)) |
1067 | caching = ttm_write_combined; |
1068 | else |
1069 | caching = ttm_cached; |
1070 | |
1071 | /* allocate space for the uninitialized page entries */ |
1072 | if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { |
1073 | kfree(gtt); |
1074 | return NULL((void *)0); |
1075 | } |
1076 | return >t->ttm; |
1077 | } |
1078 | |
1079 | /* |
1080 | * amdgpu_ttm_tt_populate - Map GTT pages visible to the device |
1081 | * |
1082 | * Map the pages of a ttm_tt object to an address space visible |
1083 | * to the underlying device. |
1084 | */ |
1085 | static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, |
1086 | struct ttm_tt *ttm, |
1087 | struct ttm_operation_ctx *ctx) |
1088 | { |
1089 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
1090 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1091 | pgoff_t i; |
1092 | int ret; |
1093 | |
1094 | /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ |
1095 | if (gtt->userptr) { |
1096 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL(0x0001 | 0x0004)); |
1097 | if (!ttm->sg) |
1098 | return -ENOMEM12; |
1099 | return 0; |
1100 | } |
1101 | |
1102 | if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2)) |
1103 | return 0; |
1104 | |
1105 | ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); |
1106 | if (ret) |
1107 | return ret; |
1108 | |
1109 | #ifdef notyet |
1110 | for (i = 0; i < ttm->num_pages; ++i) |
1111 | ttm->pages[i]->mapping = bdev->dev_mapping; |
1112 | #endif |
1113 | |
1114 | return 0; |
1115 | } |
1116 | |
1117 | /* |
1118 | * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays |
1119 | * |
1120 | * Unmaps pages of a ttm_tt object from the device address space and |
1121 | * unpopulates the page array backing it. |
1122 | */ |
1123 | static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, |
1124 | struct ttm_tt *ttm) |
1125 | { |
1126 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1127 | struct amdgpu_device *adev; |
1128 | pgoff_t i; |
1129 | struct vm_page *page; |
1130 | |
1131 | amdgpu_ttm_backend_unbind(bdev, ttm); |
1132 | |
1133 | if (gtt->userptr) { |
1134 | amdgpu_ttm_tt_set_user_pages(ttm, NULL((void *)0)); |
1135 | kfree(ttm->sg); |
1136 | ttm->sg = NULL((void *)0); |
1137 | return; |
1138 | } |
1139 | |
1140 | if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2)) |
1141 | return; |
1142 | |
1143 | for (i = 0; i < ttm->num_pages; ++i) { |
1144 | page = ttm->pages[i]; |
1145 | if (unlikely(page == NULL)__builtin_expect(!!(page == ((void *)0)), 0)) |
1146 | continue; |
1147 | pmap_page_protect(page, PROT_NONE0x00); |
1148 | } |
1149 | |
1150 | adev = amdgpu_ttm_adev(bdev); |
1151 | return ttm_pool_free(&adev->mman.bdev.pool, ttm); |
1152 | } |
1153 | |
1154 | /** |
1155 | * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current |
1156 | * task |
1157 | * |
1158 | * @tbo: The ttm_buffer_object that contains the userptr |
1159 | * @user_addr: The returned value |
1160 | */ |
1161 | int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, |
1162 | uint64_t *user_addr) |
1163 | { |
1164 | struct amdgpu_ttm_tt *gtt; |
1165 | |
1166 | if (!tbo->ttm) |
1167 | return -EINVAL22; |
1168 | |
1169 | gtt = (void *)tbo->ttm; |
1170 | *user_addr = gtt->userptr; |
1171 | return 0; |
1172 | } |
1173 | |
1174 | /** |
1175 | * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current |
1176 | * task |
1177 | * |
1178 | * @bo: The ttm_buffer_object to bind this userptr to |
1179 | * @addr: The address in the current tasks VM space to use |
1180 | * @flags: Requirements of userptr object. |
1181 | * |
1182 | * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages |
1183 | * to current task |
1184 | */ |
1185 | int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, |
1186 | uint64_t addr, uint32_t flags) |
1187 | { |
1188 | struct amdgpu_ttm_tt *gtt; |
1189 | |
1190 | if (!bo->ttm) { |
1191 | /* TODO: We want a separate TTM object type for userptrs */ |
1192 | bo->ttm = amdgpu_ttm_tt_create(bo, 0); |
1193 | if (bo->ttm == NULL((void *)0)) |
1194 | return -ENOMEM12; |
1195 | } |
1196 | |
1197 | /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ |
1198 | bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL(1 << 2); |
1199 | |
1200 | gtt = ttm_to_amdgpu_ttm_tt(bo->ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (bo->ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1201 | gtt->userptr = addr; |
1202 | gtt->userflags = flags; |
1203 | |
1204 | #ifdef notyet |
1205 | if (gtt->usertask) |
1206 | put_task_struct(gtt->usertask); |
1207 | gtt->usertask = current->group_leader; |
1208 | get_task_struct(gtt->usertask); |
1209 | #endif |
1210 | |
1211 | return 0; |
1212 | } |
1213 | |
1214 | /* |
1215 | * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object |
1216 | */ |
1217 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
1218 | { |
1219 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1220 | |
1221 | if (gtt == NULL((void *)0)) |
1222 | return NULL((void *)0); |
1223 | |
1224 | if (gtt->usertask == NULL((void *)0)) |
1225 | return NULL((void *)0); |
1226 | |
1227 | #ifdef notyet |
1228 | return gtt->usertask->mm; |
1229 | #else |
1230 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
1231 | return NULL((void *)0); |
1232 | #endif |
1233 | } |
1234 | |
1235 | /* |
1236 | * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an |
1237 | * address range for the current task. |
1238 | * |
1239 | */ |
1240 | bool_Bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
1241 | unsigned long end, unsigned long *userptr) |
1242 | { |
1243 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1244 | unsigned long size; |
1245 | |
1246 | if (gtt == NULL((void *)0) || !gtt->userptr) |
1247 | return false0; |
1248 | |
1249 | /* Return false if no part of the ttm_tt object lies within |
1250 | * the range |
1251 | */ |
1252 | size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE(1 << 12); |
1253 | if (gtt->userptr > end || gtt->userptr + size <= start) |
1254 | return false0; |
1255 | |
1256 | if (userptr) |
1257 | *userptr = gtt->userptr; |
1258 | return true1; |
1259 | } |
1260 | |
1261 | /* |
1262 | * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? |
1263 | */ |
1264 | bool_Bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) |
1265 | { |
1266 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1267 | |
1268 | if (gtt == NULL((void *)0) || !gtt->userptr) |
1269 | return false0; |
1270 | |
1271 | return true1; |
1272 | } |
1273 | |
1274 | /* |
1275 | * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? |
1276 | */ |
1277 | bool_Bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
1278 | { |
1279 | struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm)({ const __typeof( ((struct amdgpu_ttm_tt *)0)->ttm ) *__mptr = (ttm); (struct amdgpu_ttm_tt *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_ttm_tt, ttm) );}); |
1280 | |
1281 | if (gtt == NULL((void *)0)) |
1282 | return false0; |
1283 | |
1284 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); |
1285 | } |
1286 | |
1287 | /** |
1288 | * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object |
1289 | * |
1290 | * @ttm: The ttm_tt object to compute the flags for |
1291 | * @mem: The memory registry backing this ttm_tt object |
1292 | * |
1293 | * Figure out the flags to use for a VM PDE (Page Directory Entry). |
1294 | */ |
1295 | uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) |
1296 | { |
1297 | uint64_t flags = 0; |
1298 | |
1299 | if (mem && mem->mem_type != TTM_PL_SYSTEM0) |
1300 | flags |= AMDGPU_PTE_VALID(1ULL << 0); |
1301 | |
1302 | if (mem && (mem->mem_type == TTM_PL_TT1 || |
1303 | mem->mem_type == AMDGPU_PL_PREEMPT(3 + 3))) { |
1304 | flags |= AMDGPU_PTE_SYSTEM(1ULL << 1); |
1305 | |
1306 | if (ttm->caching == ttm_cached) |
1307 | flags |= AMDGPU_PTE_SNOOPED(1ULL << 2); |
1308 | } |
1309 | |
1310 | if (mem && mem->mem_type == TTM_PL_VRAM2 && |
1311 | mem->bus.caching == ttm_cached) |
1312 | flags |= AMDGPU_PTE_SNOOPED(1ULL << 2); |
1313 | |
1314 | return flags; |
1315 | } |
1316 | |
1317 | /** |
1318 | * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object |
1319 | * |
1320 | * @adev: amdgpu_device pointer |
1321 | * @ttm: The ttm_tt object to compute the flags for |
1322 | * @mem: The memory registry backing this ttm_tt object |
1323 | * |
1324 | * Figure out the flags to use for a VM PTE (Page Table Entry). |
1325 | */ |
1326 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
1327 | struct ttm_resource *mem) |
1328 | { |
1329 | uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); |
1330 | |
1331 | flags |= adev->gart.gart_pte_flags; |
1332 | flags |= AMDGPU_PTE_READABLE(1ULL << 5); |
1333 | |
1334 | if (!amdgpu_ttm_tt_is_readonly(ttm)) |
1335 | flags |= AMDGPU_PTE_WRITEABLE(1ULL << 6); |
1336 | |
1337 | return flags; |
1338 | } |
1339 | |
1340 | /* |
1341 | * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer |
1342 | * object. |
1343 | * |
1344 | * Return true if eviction is sensible. Called by ttm_mem_evict_first() on |
1345 | * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until |
1346 | * it can find space for a new object and by ttm_bo_force_list_clean() which is |
1347 | * used to clean out a memory space. |
1348 | */ |
1349 | static bool_Bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, |
1350 | const struct ttm_place *place) |
1351 | { |
1352 | struct dma_resv_iter resv_cursor; |
1353 | struct dma_fence *f; |
1354 | |
1355 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
1356 | return ttm_bo_eviction_valuable(bo, place); |
1357 | |
1358 | /* Swapout? */ |
1359 | if (bo->resource->mem_type == TTM_PL_SYSTEM0) |
1360 | return true1; |
1361 | |
1362 | if (bo->type == ttm_bo_type_kernel && |
1363 | !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) |
1364 | return false0; |
1365 | |
1366 | /* If bo is a KFD BO, check if the bo belongs to the current process. |
1367 | * If true, then return false as any KFD process needs all its BOs to |
1368 | * be resident to run successfully |
1369 | */ |
1370 | dma_resv_for_each_fence(&resv_cursor, bo->base.resv,for (dma_resv_iter_begin(&resv_cursor, bo->base.resv, DMA_RESV_USAGE_BOOKKEEP ), f = dma_resv_iter_first(&resv_cursor); f; f = dma_resv_iter_next (&resv_cursor)) |
1371 | DMA_RESV_USAGE_BOOKKEEP, f)for (dma_resv_iter_begin(&resv_cursor, bo->base.resv, DMA_RESV_USAGE_BOOKKEEP ), f = dma_resv_iter_first(&resv_cursor); f; f = dma_resv_iter_next (&resv_cursor)) { |
1372 | #ifdef notyet |
1373 | if (amdkfd_fence_check_mm(f, current->mm)) |
1374 | return false0; |
1375 | #endif |
1376 | } |
1377 | |
1378 | /* Preemptible BOs don't own system resources managed by the |
1379 | * driver (pages, VRAM, GART space). They point to resources |
1380 | * owned by someone else (e.g. pageable memory in user mode |
1381 | * or a DMABuf). They are used in a preemptible context so we |
1382 | * can guarantee no deadlocks and good QoS in case of MMU |
1383 | * notifiers or DMABuf move notifiers from the resource owner. |
1384 | */ |
1385 | if (bo->resource->mem_type == AMDGPU_PL_PREEMPT(3 + 3)) |
1386 | return false0; |
1387 | |
1388 | if (bo->resource->mem_type == TTM_PL_TT1 && |
1389 | amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) |
1390 | return false0; |
1391 | |
1392 | return ttm_bo_eviction_valuable(bo, place); |
1393 | } |
1394 | |
1395 | static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, |
1396 | void *buf, size_t size, bool_Bool write) |
1397 | { |
1398 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
1399 | #ifdef notyet |
1400 | while (size) { |
1401 | uint64_t aligned_pos = ALIGN_DOWN(pos, 4); |
1402 | uint64_t bytes = 4 - (pos & 0x3); |
1403 | uint32_t shift = (pos & 0x3) * 8; |
1404 | uint32_t mask = 0xffffffff << shift; |
1405 | uint32_t value = 0; |
1406 | |
1407 | if (size < bytes) { |
1408 | mask &= 0xffffffff >> (bytes - size) * 8; |
1409 | bytes = size; |
1410 | } |
1411 | |
1412 | if (mask != 0xffffffff) { |
1413 | amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false0); |
1414 | if (write) { |
1415 | value &= ~mask; |
1416 | value |= (*(uint32_t *)buf << shift) & mask; |
1417 | amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true1); |
1418 | } else { |
1419 | value = (value & mask) >> shift; |
1420 | memcpy(buf, &value, bytes)__builtin_memcpy((buf), (&value), (bytes)); |
1421 | } |
1422 | } else { |
1423 | amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); |
1424 | } |
1425 | |
1426 | pos += bytes; |
1427 | buf += bytes; |
1428 | size -= bytes; |
1429 | } |
1430 | #endif |
1431 | } |
1432 | |
1433 | static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, |
1434 | unsigned long offset, void *buf, int len, int write) |
1435 | { |
1436 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
1437 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
1438 | struct amdgpu_res_cursor src_mm; |
1439 | struct amdgpu_job *job; |
1440 | struct dma_fence *fence; |
1441 | uint64_t src_addr, dst_addr; |
1442 | unsigned int num_dw; |
1443 | int r, idx; |
1444 | |
1445 | if (len != PAGE_SIZE(1 << 12)) |
1446 | return -EINVAL22; |
1447 | |
1448 | if (!adev->mman.sdma_access_ptr) |
1449 | return -EACCES13; |
1450 | |
1451 | if (!drm_dev_enter(adev_to_drm(adev), &idx)) |
1452 | return -ENODEV19; |
1453 | |
1454 | if (write) |
1455 | memcpy(adev->mman.sdma_access_ptr, buf, len)__builtin_memcpy((adev->mman.sdma_access_ptr), (buf), (len )); |
1456 | |
1457 | num_dw = roundup2(adev->mman.buffer_funcs->copy_num_dw, 8)(((adev->mman.buffer_funcs->copy_num_dw) + ((8) - 1)) & (~((__typeof(adev->mman.buffer_funcs->copy_num_dw))(8) - 1))); |
1458 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); |
1459 | if (r) |
1460 | goto out; |
1461 | |
1462 | amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); |
1463 | src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; |
1464 | dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); |
1465 | if (write) |
1466 | swap(src_addr, dst_addr)do { __typeof(src_addr) __tmp = (src_addr); (src_addr) = (dst_addr ); (dst_addr) = __tmp; } while(0); |
1467 | |
1468 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false)(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_addr), (dst_addr), ((1 << 12)), (0)); |
1469 | |
1470 | amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0])((adev->mman.buffer_funcs_ring)->funcs->pad_ib((adev ->mman.buffer_funcs_ring), (&job->ibs[0]))); |
1471 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 1471); __builtin_expect (!!(__ret), 0); }); |
1472 | |
1473 | r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), &fence); |
1474 | if (r) { |
1475 | amdgpu_job_free(job); |
1476 | goto out; |
1477 | } |
1478 | |
1479 | if (!dma_fence_wait_timeout(fence, false0, adev->sdma_timeout)) |
1480 | r = -ETIMEDOUT60; |
1481 | dma_fence_put(fence); |
1482 | |
1483 | if (!(r || write)) |
1484 | memcpy(buf, adev->mman.sdma_access_ptr, len)__builtin_memcpy((buf), (adev->mman.sdma_access_ptr), (len )); |
1485 | out: |
1486 | drm_dev_exit(idx); |
1487 | return r; |
1488 | } |
1489 | |
1490 | /** |
1491 | * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. |
1492 | * |
1493 | * @bo: The buffer object to read/write |
1494 | * @offset: Offset into buffer object |
1495 | * @buf: Secondary buffer to write/read from |
1496 | * @len: Length in bytes of access |
1497 | * @write: true if writing |
1498 | * |
1499 | * This is used to access VRAM that backs a buffer object via MMIO |
1500 | * access for debugging purposes. |
1501 | */ |
1502 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, |
1503 | unsigned long offset, void *buf, int len, |
1504 | int write) |
1505 | { |
1506 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); |
1507 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
1508 | struct amdgpu_res_cursor cursor; |
1509 | int ret = 0; |
1510 | |
1511 | if (bo->resource->mem_type != TTM_PL_VRAM2) |
1512 | return -EIO5; |
1513 | |
1514 | if (amdgpu_device_has_timeouts_enabled(adev) && |
1515 | !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) |
1516 | return len; |
1517 | |
1518 | amdgpu_res_first(bo->resource, offset, len, &cursor); |
1519 | while (cursor.remaining) { |
1520 | size_t count, size = cursor.size; |
1521 | loff_t pos = cursor.start; |
1522 | |
1523 | count = amdgpu_device_aper_access(adev, pos, buf, size, write); |
1524 | size -= count; |
1525 | if (size) { |
1526 | /* using MM to access rest vram and handle un-aligned address */ |
1527 | pos += count; |
1528 | buf += count; |
1529 | amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); |
1530 | } |
1531 | |
1532 | ret += cursor.size; |
1533 | buf += cursor.size; |
1534 | amdgpu_res_next(&cursor, cursor.size); |
1535 | } |
1536 | |
1537 | return ret; |
1538 | } |
1539 | |
1540 | static void |
1541 | amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) |
1542 | { |
1543 | amdgpu_bo_move_notify(bo, false0, NULL((void *)0)); |
1544 | } |
1545 | |
1546 | static struct ttm_device_funcs amdgpu_bo_driver = { |
1547 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
1548 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
1549 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
1550 | .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, |
1551 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, |
1552 | .evict_flags = &amdgpu_evict_flags, |
1553 | .move = &amdgpu_bo_move, |
1554 | .delete_mem_notify = &amdgpu_bo_delete_mem_notify, |
1555 | .release_notify = &amdgpu_bo_release_notify, |
1556 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, |
1557 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, |
1558 | .access_memory = &amdgpu_ttm_access_memory, |
1559 | }; |
1560 | |
1561 | /* |
1562 | * Firmware Reservation functions |
1563 | */ |
1564 | /** |
1565 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram |
1566 | * |
1567 | * @adev: amdgpu_device pointer |
1568 | * |
1569 | * free fw reserved vram if it has been reserved. |
1570 | */ |
1571 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) |
1572 | { |
1573 | amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, |
1574 | NULL((void *)0), &adev->mman.fw_vram_usage_va); |
1575 | } |
1576 | |
1577 | /* |
1578 | * Driver Reservation functions |
1579 | */ |
1580 | /** |
1581 | * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram |
1582 | * |
1583 | * @adev: amdgpu_device pointer |
1584 | * |
1585 | * free drv reserved vram if it has been reserved. |
1586 | */ |
1587 | static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) |
1588 | { |
1589 | amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, |
1590 | NULL((void *)0), |
1591 | NULL((void *)0)); |
1592 | } |
1593 | |
1594 | /** |
1595 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw |
1596 | * |
1597 | * @adev: amdgpu_device pointer |
1598 | * |
1599 | * create bo vram reservation from fw. |
1600 | */ |
1601 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) |
1602 | { |
1603 | uint64_t vram_size = adev->gmc.visible_vram_size; |
1604 | |
1605 | adev->mman.fw_vram_usage_va = NULL((void *)0); |
1606 | adev->mman.fw_vram_usage_reserved_bo = NULL((void *)0); |
1607 | |
1608 | if (adev->mman.fw_vram_usage_size == 0 || |
1609 | adev->mman.fw_vram_usage_size > vram_size) |
1610 | return 0; |
1611 | |
1612 | return amdgpu_bo_create_kernel_at(adev, |
1613 | adev->mman.fw_vram_usage_start_offset, |
1614 | adev->mman.fw_vram_usage_size, |
1615 | &adev->mman.fw_vram_usage_reserved_bo, |
1616 | &adev->mman.fw_vram_usage_va); |
1617 | } |
1618 | |
1619 | /** |
1620 | * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver |
1621 | * |
1622 | * @adev: amdgpu_device pointer |
1623 | * |
1624 | * create bo vram reservation from drv. |
1625 | */ |
1626 | static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) |
1627 | { |
1628 | uint64_t vram_size = adev->gmc.visible_vram_size; |
1629 | |
1630 | adev->mman.drv_vram_usage_reserved_bo = NULL((void *)0); |
1631 | |
1632 | if (adev->mman.drv_vram_usage_size == 0 || |
1633 | adev->mman.drv_vram_usage_size > vram_size) |
1634 | return 0; |
1635 | |
1636 | return amdgpu_bo_create_kernel_at(adev, |
1637 | adev->mman.drv_vram_usage_start_offset, |
1638 | adev->mman.drv_vram_usage_size, |
1639 | &adev->mman.drv_vram_usage_reserved_bo, |
1640 | NULL((void *)0)); |
1641 | } |
1642 | |
1643 | /* |
1644 | * Memoy training reservation functions |
1645 | */ |
1646 | |
1647 | /** |
1648 | * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram |
1649 | * |
1650 | * @adev: amdgpu_device pointer |
1651 | * |
1652 | * free memory training reserved vram if it has been reserved. |
1653 | */ |
1654 | static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) |
1655 | { |
1656 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; |
1657 | |
1658 | ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; |
1659 | amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL((void *)0), NULL((void *)0)); |
1660 | ctx->c2p_bo = NULL((void *)0); |
1661 | |
1662 | return 0; |
1663 | } |
1664 | |
1665 | static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev, |
1666 | uint32_t reserve_size) |
1667 | { |
1668 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; |
1669 | |
1670 | memset(ctx, 0, sizeof(*ctx))__builtin_memset((ctx), (0), (sizeof(*ctx))); |
1671 | |
1672 | ctx->c2p_train_data_offset = |
1673 | roundup2((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M)((((adev->gmc.mc_vram_size - reserve_size - (1 << 20 ))) + (((1 << 20)) - 1)) & (~((__typeof((adev->gmc .mc_vram_size - reserve_size - (1 << 20))))((1 << 20)) - 1))); |
1674 | ctx->p2c_train_data_offset = |
1675 | (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET0x8000); |
1676 | ctx->train_data_size = |
1677 | GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES0x1000; |
1678 | |
1679 | DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",___drm_dbg(((void *)0), DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) |
1680 | ctx->train_data_size,___drm_dbg(((void *)0), DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) |
1681 | ctx->p2c_train_data_offset,___drm_dbg(((void *)0), DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) |
1682 | ctx->c2p_train_data_offset)___drm_dbg(((void *)0), DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset); |
1683 | } |
1684 | |
1685 | /* |
1686 | * reserve TMR memory at the top of VRAM which holds |
1687 | * IP Discovery data and is protected by PSP. |
1688 | */ |
1689 | static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) |
1690 | { |
1691 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; |
1692 | bool_Bool mem_train_support = false0; |
1693 | uint32_t reserve_size = 0; |
1694 | int ret; |
1695 | |
1696 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { |
1697 | if (amdgpu_atomfirmware_mem_training_supported(adev)) |
1698 | mem_train_support = true1; |
1699 | else |
1700 | DRM_DEBUG("memory training does not support!\n")___drm_dbg(((void *)0), DRM_UT_CORE, "memory training does not support!\n" ); |
1701 | } |
1702 | |
1703 | /* |
1704 | * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all |
1705 | * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) |
1706 | * |
1707 | * Otherwise, fallback to legacy approach to check and reserve tmr block for ip |
1708 | * discovery data and G6 memory training data respectively |
1709 | */ |
1710 | if (adev->bios) |
1711 | reserve_size = |
1712 | amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); |
1713 | if (!reserve_size) |
1714 | reserve_size = DISCOVERY_TMR_OFFSET(64 << 10); |
1715 | |
1716 | if (mem_train_support) { |
1717 | /* reserve vram for mem train according to TMR location */ |
1718 | amdgpu_ttm_training_data_block_init(adev, reserve_size); |
1719 | ret = amdgpu_bo_create_kernel_at(adev, |
1720 | ctx->c2p_train_data_offset, |
1721 | ctx->train_data_size, |
1722 | &ctx->c2p_bo, |
1723 | NULL((void *)0)); |
1724 | if (ret) { |
1725 | DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret)__drm_err("alloc c2p_bo failed(%d)!\n", ret); |
1726 | amdgpu_ttm_training_reserve_vram_fini(adev); |
1727 | return ret; |
1728 | } |
1729 | ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; |
1730 | } |
1731 | |
1732 | ret = amdgpu_bo_create_kernel_at(adev, |
1733 | adev->gmc.real_vram_size - reserve_size, |
1734 | reserve_size, |
1735 | &adev->mman.fw_reserved_memory, |
1736 | NULL((void *)0)); |
1737 | if (ret) { |
1738 | DRM_ERROR("alloc tmr failed(%d)!\n", ret)__drm_err("alloc tmr failed(%d)!\n", ret); |
1739 | amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, |
1740 | NULL((void *)0), NULL((void *)0)); |
1741 | return ret; |
1742 | } |
1743 | |
1744 | return 0; |
1745 | } |
1746 | |
1747 | /* |
1748 | * amdgpu_ttm_init - Init the memory management (ttm) as well as various |
1749 | * gtt/vram related fields. |
1750 | * |
1751 | * This initializes all of the memory space pools that the TTM layer |
1752 | * will need such as the GTT space (system memory mapped to the device), |
1753 | * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which |
1754 | * can be mapped per VMID. |
1755 | */ |
1756 | int amdgpu_ttm_init(struct amdgpu_device *adev) |
1757 | { |
1758 | uint64_t gtt_size; |
1759 | int r; |
1760 | u64 vis_vram_limit; |
1761 | |
1762 | rw_init(&adev->mman.gtt_window_lock, "gttwin")_rw_init_flags(&adev->mman.gtt_window_lock, "gttwin", 0 , ((void *)0)); |
1763 | |
1764 | /* No others user of address space so set it to 0 */ |
1765 | #ifdef notyet |
1766 | r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, |
1767 | adev_to_drm(adev)->anon_inode->i_mapping, |
1768 | adev_to_drm(adev)->vma_offset_manager, |
1769 | adev->need_swiotlb, |
1770 | dma_addressing_limited(adev->dev)); |
1771 | #else |
1772 | r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, |
1773 | /*adev_to_drm(adev)->anon_inode->i_mapping*/NULL((void *)0), |
1774 | adev_to_drm(adev)->vma_offset_manager, |
1775 | adev->need_swiotlb, |
1776 | dma_addressing_limited(adev->dev)); |
1777 | #endif |
1778 | if (r) { |
1779 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r)__drm_err("failed initializing buffer object driver(%d).\n", r ); |
1780 | return r; |
1781 | } |
1782 | adev->mman.bdev.iot = adev->iot; |
1783 | adev->mman.bdev.memt = adev->memt; |
1784 | adev->mman.bdev.dmat = adev->dmat; |
1785 | adev->mman.initialized = true1; |
1786 | |
1787 | /* Initialize VRAM pool with all of VRAM divided into pages */ |
1788 | r = amdgpu_vram_mgr_init(adev); |
1789 | if (r) { |
1790 | DRM_ERROR("Failed initializing VRAM heap.\n")__drm_err("Failed initializing VRAM heap.\n"); |
1791 | return r; |
1792 | } |
1793 | |
1794 | /* Reduce size of CPU-visible VRAM if requested */ |
1795 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; |
1796 | if (amdgpu_vis_vram_limit > 0 && |
1797 | vis_vram_limit <= adev->gmc.visible_vram_size) |
1798 | adev->gmc.visible_vram_size = vis_vram_limit; |
1799 | |
1800 | /* Change the size here instead of the init above so only lpfn is affected */ |
1801 | amdgpu_ttm_set_buffer_funcs_status(adev, false0); |
1802 | #if defined(CONFIG_64BIT1) && defined(__linux__) |
1803 | #ifdef CONFIG_X861 |
1804 | if (adev->gmc.xgmi.connected_to_cpu) |
1805 | adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, |
1806 | adev->gmc.visible_vram_size); |
1807 | |
1808 | else |
1809 | #endif |
1810 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, |
1811 | adev->gmc.visible_vram_size); |
1812 | #else |
1813 | if (bus_space_map(adev->memt, adev->gmc.aper_base, |
1814 | adev->gmc.visible_vram_size, |
1815 | BUS_SPACE_MAP_LINEAR0x0002 | BUS_SPACE_MAP_PREFETCHABLE0x0008, |
1816 | &adev->mman.aper_bsh)) { |
1817 | adev->mman.aper_base_kaddr = NULL((void *)0); |
1818 | } else { |
1819 | adev->mman.aper_base_kaddr = bus_space_vaddr(adev->memt,((adev->memt)->vaddr((adev->mman.aper_bsh))) |
1820 | adev->mman.aper_bsh)((adev->memt)->vaddr((adev->mman.aper_bsh))); |
1821 | } |
1822 | #endif |
1823 | |
1824 | /* |
1825 | *The reserved vram for firmware must be pinned to the specified |
1826 | *place on the VRAM, so reserve it early. |
1827 | */ |
1828 | r = amdgpu_ttm_fw_reserve_vram_init(adev); |
1829 | if (r) { |
1830 | return r; |
1831 | } |
1832 | |
1833 | /* |
1834 | *The reserved vram for driver must be pinned to the specified |
1835 | *place on the VRAM, so reserve it early. |
1836 | */ |
1837 | r = amdgpu_ttm_drv_reserve_vram_init(adev); |
1838 | if (r) |
1839 | return r; |
1840 | |
1841 | /* |
1842 | * only NAVI10 and onwards ASIC support for IP discovery. |
1843 | * If IP discovery enabled, a block of memory should be |
1844 | * reserved for IP discovey. |
1845 | */ |
1846 | if (adev->mman.discovery_bin) { |
1847 | r = amdgpu_ttm_reserve_tmr(adev); |
1848 | if (r) |
1849 | return r; |
1850 | } |
1851 | |
1852 | /* allocate memory as required for VGA |
1853 | * This is used for VGA emulation and pre-OS scanout buffers to |
1854 | * avoid display artifacts while transitioning between pre-OS |
1855 | * and driver. */ |
1856 | r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, |
1857 | &adev->mman.stolen_vga_memory, |
1858 | NULL((void *)0)); |
1859 | if (r) |
1860 | return r; |
1861 | r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, |
1862 | adev->mman.stolen_extended_size, |
1863 | &adev->mman.stolen_extended_memory, |
1864 | NULL((void *)0)); |
1865 | if (r) |
1866 | return r; |
1867 | r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, |
1868 | adev->mman.stolen_reserved_size, |
1869 | &adev->mman.stolen_reserved_memory, |
1870 | NULL((void *)0)); |
1871 | if (r) |
1872 | return r; |
1873 | |
1874 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n",printk("\0016" "[" "drm" "] " "amdgpu: %uM of VRAM memory ready\n" , (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))) |
1875 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)))printk("\0016" "[" "drm" "] " "amdgpu: %uM of VRAM memory ready\n" , (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
1876 | |
1877 | /* Compute GTT size, either based on 1/2 the size of RAM size |
1878 | * or whatever the user passed on module init */ |
1879 | if (amdgpu_gtt_size == -1) { |
1880 | #ifdef __linux__ |
1881 | struct sysinfo si; |
1882 | |
1883 | si_meminfo(&si); |
1884 | /* Certain GL unit tests for large textures can cause problems |
1885 | * with the OOM killer since there is no way to link this memory |
1886 | * to a process. This was originally mitigated (but not necessarily |
1887 | * eliminated) by limiting the GTT size. The problem is this limit |
1888 | * is often too low for many modern games so just make the limit 1/2 |
1889 | * of system memory which aligns with TTM. The OOM accounting needs |
1890 | * to be addressed, but we shouldn't prevent common 3D applications |
1891 | * from being usable just to potentially mitigate that corner case. |
1892 | */ |
1893 | gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),((((3072ULL << 20))>((u64)si.totalram * si.mem_unit / 2))?((3072ULL << 20)):((u64)si.totalram * si.mem_unit / 2)) |
1894 | (u64)si.totalram * si.mem_unit / 2)((((3072ULL << 20))>((u64)si.totalram * si.mem_unit / 2))?((3072ULL << 20)):((u64)si.totalram * si.mem_unit / 2)); |
1895 | #else |
1896 | gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),((((3072ULL << 20))>((u64)((paddr_t)(physmem) << 12) / 2))?((3072ULL << 20)):((u64)((paddr_t)(physmem) << 12) / 2)) |
1897 | (u64)ptoa(physmem) / 2)((((3072ULL << 20))>((u64)((paddr_t)(physmem) << 12) / 2))?((3072ULL << 20)):((u64)((paddr_t)(physmem) << 12) / 2)); |
1898 | #endif |
1899 | } else { |
1900 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; |
1901 | } |
1902 | |
1903 | /* Initialize GTT memory pool */ |
1904 | r = amdgpu_gtt_mgr_init(adev, gtt_size); |
1905 | if (r) { |
1906 | DRM_ERROR("Failed initializing GTT heap.\n")__drm_err("Failed initializing GTT heap.\n"); |
1907 | return r; |
1908 | } |
1909 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n",printk("\0016" "[" "drm" "] " "amdgpu: %uM of GTT memory ready.\n" , (unsigned)(gtt_size / (1024 * 1024))) |
1910 | (unsigned)(gtt_size / (1024 * 1024)))printk("\0016" "[" "drm" "] " "amdgpu: %uM of GTT memory ready.\n" , (unsigned)(gtt_size / (1024 * 1024))); |
1911 | |
1912 | /* Initialize preemptible memory pool */ |
1913 | r = amdgpu_preempt_mgr_init(adev); |
1914 | if (r) { |
1915 | DRM_ERROR("Failed initializing PREEMPT heap.\n")__drm_err("Failed initializing PREEMPT heap.\n"); |
1916 | return r; |
1917 | } |
1918 | |
1919 | /* Initialize various on-chip memory pools */ |
1920 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS(3 + 0), adev->gds.gds_size); |
1921 | if (r) { |
1922 | DRM_ERROR("Failed initializing GDS heap.\n")__drm_err("Failed initializing GDS heap.\n"); |
1923 | return r; |
1924 | } |
1925 | |
1926 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS(3 + 1), adev->gds.gws_size); |
1927 | if (r) { |
1928 | DRM_ERROR("Failed initializing gws heap.\n")__drm_err("Failed initializing gws heap.\n"); |
1929 | return r; |
1930 | } |
1931 | |
1932 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA(3 + 2), adev->gds.oa_size); |
1933 | if (r) { |
1934 | DRM_ERROR("Failed initializing oa heap.\n")__drm_err("Failed initializing oa heap.\n"); |
1935 | return r; |
1936 | } |
1937 | |
1938 | if (amdgpu_bo_create_kernel(adev, PAGE_SIZE(1 << 12), PAGE_SIZE(1 << 12), |
1939 | AMDGPU_GEM_DOMAIN_GTT0x2, |
1940 | &adev->mman.sdma_access_bo, NULL((void *)0), |
1941 | &adev->mman.sdma_access_ptr)) |
1942 | DRM_WARN("Debug VRAM access will use slowpath MM access\n")printk("\0014" "[" "drm" "] " "Debug VRAM access will use slowpath MM access\n" ); |
1943 | |
1944 | return 0; |
1945 | } |
1946 | |
1947 | /* |
1948 | * amdgpu_ttm_fini - De-initialize the TTM memory pools |
1949 | */ |
1950 | void amdgpu_ttm_fini(struct amdgpu_device *adev) |
1951 | { |
1952 | int idx; |
1953 | if (!adev->mman.initialized) |
1954 | return; |
1955 | |
1956 | amdgpu_ttm_training_reserve_vram_fini(adev); |
1957 | /* return the stolen vga memory back to VRAM */ |
1958 | amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL((void *)0), NULL((void *)0)); |
1959 | amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL((void *)0), NULL((void *)0)); |
1960 | /* return the FW reserved memory back to VRAM */ |
1961 | amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL((void *)0), |
1962 | NULL((void *)0)); |
1963 | if (adev->mman.stolen_reserved_size) |
1964 | amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, |
1965 | NULL((void *)0), NULL((void *)0)); |
1966 | amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL((void *)0), |
1967 | &adev->mman.sdma_access_ptr); |
1968 | amdgpu_ttm_fw_reserve_vram_fini(adev); |
1969 | amdgpu_ttm_drv_reserve_vram_fini(adev); |
1970 | |
1971 | if (drm_dev_enter(adev_to_drm(adev), &idx)) { |
1972 | |
1973 | #ifdef __linux__ |
1974 | if (adev->mman.aper_base_kaddr) |
1975 | iounmap(adev->mman.aper_base_kaddr); |
1976 | #else |
1977 | if (adev->mman.aper_base_kaddr) |
1978 | bus_space_unmap(adev->memt, adev->mman.aper_bsh, |
1979 | adev->gmc.visible_vram_size); |
1980 | #endif |
1981 | adev->mman.aper_base_kaddr = NULL((void *)0); |
1982 | |
1983 | drm_dev_exit(idx); |
1984 | } |
1985 | |
1986 | amdgpu_vram_mgr_fini(adev); |
1987 | amdgpu_gtt_mgr_fini(adev); |
1988 | amdgpu_preempt_mgr_fini(adev); |
1989 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS(3 + 0)); |
1990 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS(3 + 1)); |
1991 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA(3 + 2)); |
1992 | ttm_device_fini(&adev->mman.bdev); |
1993 | adev->mman.initialized = false0; |
1994 | DRM_INFO("amdgpu: ttm finalized\n")printk("\0016" "[" "drm" "] " "amdgpu: ttm finalized\n"); |
1995 | } |
1996 | |
1997 | /** |
1998 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions |
1999 | * |
2000 | * @adev: amdgpu_device pointer |
2001 | * @enable: true when we can use buffer functions. |
2002 | * |
2003 | * Enable/disable use of buffer functions during suspend/resume. This should |
2004 | * only be called at bootup or when userspace isn't running. |
2005 | */ |
2006 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool_Bool enable) |
2007 | { |
2008 | struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM2); |
2009 | uint64_t size; |
2010 | int r; |
2011 | |
2012 | if (!adev->mman.initialized || amdgpu_in_reset(adev) || |
2013 | adev->mman.buffer_funcs_enabled == enable) |
2014 | return; |
2015 | |
2016 | if (enable) { |
2017 | struct amdgpu_ring *ring; |
2018 | struct drm_gpu_scheduler *sched; |
2019 | |
2020 | ring = adev->mman.buffer_funcs_ring; |
2021 | sched = &ring->sched; |
2022 | r = drm_sched_entity_init(&adev->mman.entity, |
2023 | DRM_SCHED_PRIORITY_KERNEL, &sched, |
2024 | 1, NULL((void *)0)); |
2025 | if (r) { |
2026 | DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",__drm_err("Failed setting up TTM BO move entity (%d)\n", r) |
2027 | r)__drm_err("Failed setting up TTM BO move entity (%d)\n", r); |
2028 | return; |
2029 | } |
2030 | } else { |
2031 | drm_sched_entity_destroy(&adev->mman.entity); |
2032 | dma_fence_put(man->move); |
2033 | man->move = NULL((void *)0); |
2034 | } |
2035 | |
2036 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
2037 | if (enable) |
2038 | size = adev->gmc.real_vram_size; |
2039 | else |
2040 | size = adev->gmc.visible_vram_size; |
2041 | man->size = size; |
2042 | adev->mman.buffer_funcs_enabled = enable; |
2043 | } |
2044 | |
2045 | static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, |
2046 | bool_Bool direct_submit, |
2047 | unsigned int num_dw, |
2048 | struct dma_resv *resv, |
2049 | bool_Bool vm_needs_flush, |
2050 | struct amdgpu_job **job) |
2051 | { |
2052 | enum amdgpu_ib_pool_type pool = direct_submit ? |
2053 | AMDGPU_IB_POOL_DIRECT : |
2054 | AMDGPU_IB_POOL_DELAYED; |
2055 | int r; |
2056 | |
2057 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); |
2058 | if (r) |
2059 | return r; |
2060 | |
2061 | if (vm_needs_flush) { |
2062 | (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? |
2063 | adev->gmc.pdb0_bo : |
2064 | adev->gart.bo); |
2065 | (*job)->vm_needs_flush = true1; |
2066 | } |
2067 | if (resv) { |
2068 | r = amdgpu_sync_resv(adev, &(*job)->sync, resv, |
2069 | AMDGPU_SYNC_ALWAYS, |
2070 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul)); |
2071 | if (r) { |
2072 | DRM_ERROR("sync failed (%d).\n", r)__drm_err("sync failed (%d).\n", r); |
2073 | amdgpu_job_free(*job); |
2074 | return r; |
2075 | } |
2076 | } |
2077 | return 0; |
2078 | } |
2079 | |
2080 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, |
2081 | uint64_t dst_offset, uint32_t byte_count, |
2082 | struct dma_resv *resv, |
2083 | struct dma_fence **fence, bool_Bool direct_submit, |
2084 | bool_Bool vm_needs_flush, bool_Bool tmz) |
2085 | { |
2086 | struct amdgpu_device *adev = ring->adev; |
2087 | unsigned num_loops, num_dw; |
2088 | struct amdgpu_job *job; |
2089 | uint32_t max_bytes; |
2090 | unsigned i; |
2091 | int r; |
2092 | |
2093 | if (!direct_submit && !ring->sched.ready) { |
2094 | DRM_ERROR("Trying to move memory with ring turned off.\n")__drm_err("Trying to move memory with ring turned off.\n"); |
2095 | return -EINVAL22; |
2096 | } |
2097 | |
2098 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
2099 | num_loops = DIV_ROUND_UP(byte_count, max_bytes)(((byte_count) + ((max_bytes) - 1)) / (max_bytes)); |
2100 | num_dw = roundup2(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8)(((num_loops * adev->mman.buffer_funcs->copy_num_dw) + ( (8) - 1)) & (~((__typeof(num_loops * adev->mman.buffer_funcs ->copy_num_dw))(8) - 1))); |
2101 | r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, |
2102 | resv, vm_needs_flush, &job); |
2103 | if (r) |
2104 | return r; |
2105 | |
2106 | for (i = 0; i < num_loops; i++) { |
2107 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes)(((byte_count)<(max_bytes))?(byte_count):(max_bytes)); |
2108 | |
2109 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_offset), (dst_offset), (cur_size_in_bytes), (tmz )) |
2110 | dst_offset, cur_size_in_bytes, tmz)(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_offset), (dst_offset), (cur_size_in_bytes), (tmz )); |
2111 | |
2112 | src_offset += cur_size_in_bytes; |
2113 | dst_offset += cur_size_in_bytes; |
2114 | byte_count -= cur_size_in_bytes; |
2115 | } |
2116 | |
2117 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); |
2118 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 2118); __builtin_expect (!!(__ret), 0); }); |
2119 | if (direct_submit) |
2120 | r = amdgpu_job_submit_direct(job, ring, fence); |
2121 | else |
2122 | r = amdgpu_job_submit(job, &adev->mman.entity, |
2123 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), fence); |
2124 | if (r) |
2125 | goto error_free; |
2126 | |
2127 | return r; |
2128 | |
2129 | error_free: |
2130 | amdgpu_job_free(job); |
2131 | DRM_ERROR("Error scheduling IBs (%d)\n", r)__drm_err("Error scheduling IBs (%d)\n", r); |
2132 | return r; |
2133 | } |
2134 | |
2135 | static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, |
2136 | uint64_t dst_addr, uint32_t byte_count, |
2137 | struct dma_resv *resv, |
2138 | struct dma_fence **fence, |
2139 | bool_Bool vm_needs_flush) |
2140 | { |
2141 | struct amdgpu_device *adev = ring->adev; |
2142 | unsigned int num_loops, num_dw; |
2143 | struct amdgpu_job *job; |
2144 | uint32_t max_bytes; |
2145 | unsigned int i; |
2146 | int r; |
2147 | |
2148 | max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
2149 | num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes)(((byte_count) + ((max_bytes) - 1)) / (max_bytes)); |
2150 | num_dw = roundup2(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8)(((num_loops * adev->mman.buffer_funcs->fill_num_dw) + ( (8) - 1)) & (~((__typeof(num_loops * adev->mman.buffer_funcs ->fill_num_dw))(8) - 1))); |
2151 | r = amdgpu_ttm_prepare_job(adev, false0, num_dw, resv, vm_needs_flush, |
2152 | &job); |
2153 | if (r) |
2154 | return r; |
2155 | |
2156 | for (i = 0; i < num_loops; i++) { |
2157 | uint32_t cur_size = min(byte_count, max_bytes)(((byte_count)<(max_bytes))?(byte_count):(max_bytes)); |
2158 | |
2159 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,(adev)->mman.buffer_funcs->emit_fill_buffer((&job-> ibs[0]), (src_data), (dst_addr), (cur_size)) |
2160 | cur_size)(adev)->mman.buffer_funcs->emit_fill_buffer((&job-> ibs[0]), (src_data), (dst_addr), (cur_size)); |
2161 | |
2162 | dst_addr += cur_size; |
2163 | byte_count -= cur_size; |
2164 | } |
2165 | |
2166 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); |
2167 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 2167); __builtin_expect (!!(__ret), 0); }); |
2168 | r = amdgpu_job_submit(job, &adev->mman.entity, |
2169 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), fence); |
2170 | if (r) |
2171 | goto error_free; |
2172 | |
2173 | return 0; |
2174 | |
2175 | error_free: |
2176 | amdgpu_job_free(job); |
2177 | return r; |
2178 | } |
2179 | |
2180 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
2181 | uint32_t src_data, |
2182 | struct dma_resv *resv, |
2183 | struct dma_fence **f) |
2184 | { |
2185 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
2186 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
2187 | struct dma_fence *fence = NULL((void *)0); |
2188 | struct amdgpu_res_cursor dst; |
2189 | int r; |
2190 | |
2191 | if (!adev->mman.buffer_funcs_enabled) { |
2192 | DRM_ERROR("Trying to clear memory with ring turned off.\n")__drm_err("Trying to clear memory with ring turned off.\n"); |
2193 | return -EINVAL22; |
2194 | } |
2195 | |
2196 | amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); |
2197 | |
2198 | mutex_lock(&adev->mman.gtt_window_lock)rw_enter_write(&adev->mman.gtt_window_lock); |
2199 | while (dst.remaining) { |
2200 | struct dma_fence *next; |
2201 | uint64_t cur_size, to; |
2202 | |
2203 | /* Never fill more than 256MiB at once to avoid timeouts */ |
2204 | cur_size = min(dst.size, 256ULL << 20)(((dst.size)<(256ULL << 20))?(dst.size):(256ULL << 20)); |
2205 | |
2206 | r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst, |
2207 | 1, ring, false0, &cur_size, &to); |
2208 | if (r) |
2209 | goto error; |
2210 | |
2211 | r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, |
2212 | &next, true1); |
2213 | if (r) |
2214 | goto error; |
2215 | |
2216 | dma_fence_put(fence); |
2217 | fence = next; |
2218 | |
2219 | amdgpu_res_next(&dst, cur_size); |
2220 | } |
2221 | error: |
2222 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
2223 | if (f) |
2224 | *f = dma_fence_get(fence); |
2225 | dma_fence_put(fence); |
2226 | return r; |
2227 | } |
2228 | |
2229 | /** |
2230 | * amdgpu_ttm_evict_resources - evict memory buffers |
2231 | * @adev: amdgpu device object |
2232 | * @mem_type: evicted BO's memory type |
2233 | * |
2234 | * Evicts all @mem_type buffers on the lru list of the memory type. |
2235 | * |
2236 | * Returns: |
2237 | * 0 for success or a negative error code on failure. |
2238 | */ |
2239 | int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) |
2240 | { |
2241 | struct ttm_resource_manager *man; |
2242 | |
2243 | switch (mem_type) { |
2244 | case TTM_PL_VRAM2: |
2245 | case TTM_PL_TT1: |
2246 | case AMDGPU_PL_GWS(3 + 1): |
2247 | case AMDGPU_PL_GDS(3 + 0): |
2248 | case AMDGPU_PL_OA(3 + 2): |
2249 | man = ttm_manager_type(&adev->mman.bdev, mem_type); |
2250 | break; |
2251 | default: |
2252 | DRM_ERROR("Trying to evict invalid memory type\n")__drm_err("Trying to evict invalid memory type\n"); |
2253 | return -EINVAL22; |
2254 | } |
2255 | |
2256 | return ttm_resource_manager_evict_all(&adev->mman.bdev, man); |
2257 | } |
2258 | |
2259 | #if defined(CONFIG_DEBUG_FS) |
2260 | |
2261 | static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) |
2262 | { |
2263 | struct amdgpu_device *adev = (struct amdgpu_device *)m->private; |
2264 | |
2265 | return ttm_pool_debugfs(&adev->mman.bdev.pool, m); |
2266 | } |
2267 | |
2268 | DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); |
2269 | |
2270 | /* |
2271 | * amdgpu_ttm_vram_read - Linear read access to VRAM |
2272 | * |
2273 | * Accesses VRAM via MMIO for debugging purposes. |
2274 | */ |
2275 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, |
2276 | size_t size, loff_t *pos) |
2277 | { |
2278 | struct amdgpu_device *adev = file_inode(f)->i_private; |
2279 | ssize_t result = 0; |
2280 | |
2281 | if (size & 0x3 || *pos & 0x3) |
2282 | return -EINVAL22; |
2283 | |
2284 | if (*pos >= adev->gmc.mc_vram_size) |
2285 | return -ENXIO6; |
2286 | |
2287 | size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos))(((size)<((size_t)(adev->gmc.mc_vram_size - *pos)))?(size ):((size_t)(adev->gmc.mc_vram_size - *pos))); |
2288 | while (size) { |
2289 | size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4)(((size)<((size_t)128 * 4))?(size):((size_t)128 * 4)); |
2290 | uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ(size_t)128]; |
2291 | |
2292 | amdgpu_device_vram_access(adev, *pos, value, bytes, false0); |
2293 | if (copy_to_user(buf, value, bytes)) |
2294 | return -EFAULT14; |
2295 | |
2296 | result += bytes; |
2297 | buf += bytes; |
2298 | *pos += bytes; |
2299 | size -= bytes; |
2300 | } |
2301 | |
2302 | return result; |
2303 | } |
2304 | |
2305 | /* |
2306 | * amdgpu_ttm_vram_write - Linear write access to VRAM |
2307 | * |
2308 | * Accesses VRAM via MMIO for debugging purposes. |
2309 | */ |
2310 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, |
2311 | size_t size, loff_t *pos) |
2312 | { |
2313 | struct amdgpu_device *adev = file_inode(f)->i_private; |
2314 | ssize_t result = 0; |
2315 | int r; |
2316 | |
2317 | if (size & 0x3 || *pos & 0x3) |
2318 | return -EINVAL22; |
2319 | |
2320 | if (*pos >= adev->gmc.mc_vram_size) |
2321 | return -ENXIO6; |
2322 | |
2323 | while (size) { |
2324 | uint32_t value; |
2325 | |
2326 | if (*pos >= adev->gmc.mc_vram_size) |
2327 | return result; |
2328 | |
2329 | r = get_user(value, (uint32_t *)buf)-copyin((uint32_t *)buf, &(value), sizeof(value)); |
2330 | if (r) |
2331 | return r; |
2332 | |
2333 | amdgpu_device_mm_access(adev, *pos, &value, 4, true1); |
2334 | |
2335 | result += 4; |
2336 | buf += 4; |
2337 | *pos += 4; |
2338 | size -= 4; |
2339 | } |
2340 | |
2341 | return result; |
2342 | } |
2343 | |
2344 | static const struct file_operations amdgpu_ttm_vram_fops = { |
2345 | .owner = THIS_MODULE((void *)0), |
2346 | .read = amdgpu_ttm_vram_read, |
2347 | .write = amdgpu_ttm_vram_write, |
2348 | .llseek = default_llseek, |
2349 | }; |
2350 | |
2351 | /* |
2352 | * amdgpu_iomem_read - Virtual read access to GPU mapped memory |
2353 | * |
2354 | * This function is used to read memory that has been mapped to the |
2355 | * GPU and the known addresses are not physical addresses but instead |
2356 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
2357 | */ |
2358 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, |
2359 | size_t size, loff_t *pos) |
2360 | { |
2361 | struct amdgpu_device *adev = file_inode(f)->i_private; |
2362 | struct iommu_domain *dom; |
2363 | ssize_t result = 0; |
2364 | int r; |
2365 | |
2366 | /* retrieve the IOMMU domain if any for this device */ |
2367 | dom = iommu_get_domain_for_dev(adev->dev); |
2368 | |
2369 | while (size) { |
2370 | phys_addr_t addr = *pos & LINUX_PAGE_MASK(~((1 << 12) - 1)); |
2371 | loff_t off = *pos & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); |
2372 | size_t bytes = PAGE_SIZE(1 << 12) - off; |
2373 | unsigned long pfn; |
2374 | struct vm_page *p; |
2375 | void *ptr; |
2376 | |
2377 | bytes = bytes < size ? bytes : size; |
2378 | |
2379 | /* Translate the bus address to a physical address. If |
2380 | * the domain is NULL it means there is no IOMMU active |
2381 | * and the address translation is the identity |
2382 | */ |
2383 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
2384 | |
2385 | pfn = addr >> PAGE_SHIFT12; |
2386 | if (!pfn_valid(pfn)) |
2387 | return -EPERM1; |
2388 | |
2389 | p = pfn_to_page(pfn)(PHYS_TO_VM_PAGE(((paddr_t)(pfn) << 12))); |
2390 | #ifdef notyet |
2391 | if (p->mapping != adev->mman.bdev.dev_mapping) |
2392 | return -EPERM1; |
2393 | #else |
2394 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
2395 | #endif |
2396 | |
2397 | ptr = kmap(p); |
2398 | r = copy_to_user(buf, ptr + off, bytes); |
2399 | kunmap(p); |
2400 | if (r) |
2401 | return -EFAULT14; |
2402 | |
2403 | size -= bytes; |
2404 | *pos += bytes; |
2405 | result += bytes; |
2406 | } |
2407 | |
2408 | return result; |
2409 | } |
2410 | |
2411 | /* |
2412 | * amdgpu_iomem_write - Virtual write access to GPU mapped memory |
2413 | * |
2414 | * This function is used to write memory that has been mapped to the |
2415 | * GPU and the known addresses are not physical addresses but instead |
2416 | * bus addresses (e.g., what you'd put in an IB or ring buffer). |
2417 | */ |
2418 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, |
2419 | size_t size, loff_t *pos) |
2420 | { |
2421 | struct amdgpu_device *adev = file_inode(f)->i_private; |
2422 | struct iommu_domain *dom; |
2423 | ssize_t result = 0; |
2424 | int r; |
2425 | |
2426 | dom = iommu_get_domain_for_dev(adev->dev); |
2427 | |
2428 | while (size) { |
2429 | phys_addr_t addr = *pos & LINUX_PAGE_MASK(~((1 << 12) - 1)); |
2430 | loff_t off = *pos & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); |
2431 | size_t bytes = PAGE_SIZE(1 << 12) - off; |
2432 | unsigned long pfn; |
2433 | struct vm_page *p; |
2434 | void *ptr; |
2435 | |
2436 | bytes = bytes < size ? bytes : size; |
2437 | |
2438 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; |
2439 | |
2440 | pfn = addr >> PAGE_SHIFT12; |
2441 | if (!pfn_valid(pfn)) |
2442 | return -EPERM1; |
2443 | |
2444 | p = pfn_to_page(pfn)(PHYS_TO_VM_PAGE(((paddr_t)(pfn) << 12))); |
2445 | #ifdef notyet |
2446 | if (p->mapping != adev->mman.bdev.dev_mapping) |
2447 | return -EPERM1; |
2448 | #else |
2449 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
2450 | #endif |
2451 | |
2452 | ptr = kmap(p); |
2453 | r = copy_from_user(ptr + off, buf, bytes); |
2454 | kunmap(p); |
2455 | if (r) |
2456 | return -EFAULT14; |
2457 | |
2458 | size -= bytes; |
2459 | *pos += bytes; |
2460 | result += bytes; |
2461 | } |
2462 | |
2463 | return result; |
2464 | } |
2465 | |
2466 | static const struct file_operations amdgpu_ttm_iomem_fops = { |
2467 | .owner = THIS_MODULE((void *)0), |
2468 | .read = amdgpu_iomem_read, |
2469 | .write = amdgpu_iomem_write, |
2470 | .llseek = default_llseek |
2471 | }; |
2472 | |
2473 | #endif |
2474 | |
2475 | void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
2476 | { |
2477 | #if defined(CONFIG_DEBUG_FS) |
2478 | struct drm_minor *minor = adev_to_drm(adev)->primary; |
2479 | struct dentry *root = minor->debugfs_root; |
2480 | |
2481 | debugfs_create_file_size("amdgpu_vram", 0444, root, adev, |
2482 | &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); |
2483 | debugfs_create_file("amdgpu_iomem", 0444, root, adev,ERR_PTR(-78) |
2484 | &amdgpu_ttm_iomem_fops)ERR_PTR(-78); |
2485 | debugfs_create_file("ttm_page_pool", 0444, root, adev,ERR_PTR(-78) |
2486 | &amdgpu_ttm_page_pool_fops)ERR_PTR(-78); |
2487 | ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, |
2488 | TTM_PL_VRAM2), |
2489 | root, "amdgpu_vram_mm"); |
2490 | ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, |
2491 | TTM_PL_TT1), |
2492 | root, "amdgpu_gtt_mm"); |
2493 | ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, |
2494 | AMDGPU_PL_GDS(3 + 0)), |
2495 | root, "amdgpu_gds_mm"); |
2496 | ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, |
2497 | AMDGPU_PL_GWS(3 + 1)), |
2498 | root, "amdgpu_gws_mm"); |
2499 | ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, |
2500 | AMDGPU_PL_OA(3 + 2)), |
2501 | root, "amdgpu_oa_mm"); |
2502 | |
2503 | #endif |
2504 | } |