File: | dev/pci/drm/amd/amdgpu/amdgpu_ttm.c |
Warning: | line 195, column 6 Access to field 'start' results in a dereference of a null pointer (loaded from variable 'mm_node') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||
2 | * Copyright 2009 Jerome Glisse. | ||||
3 | * All Rights Reserved. | ||||
4 | * | ||||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||
6 | * copy of this software and associated documentation files (the | ||||
7 | * "Software"), to deal in the Software without restriction, including | ||||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||||
11 | * the following conditions: | ||||
12 | * | ||||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
20 | * | ||||
21 | * The above copyright notice and this permission notice (including the | ||||
22 | * next paragraph) shall be included in all copies or substantial portions | ||||
23 | * of the Software. | ||||
24 | * | ||||
25 | */ | ||||
26 | /* | ||||
27 | * Authors: | ||||
28 | * Jerome Glisse <glisse@freedesktop.org> | ||||
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | ||||
30 | * Dave Airlie | ||||
31 | */ | ||||
32 | |||||
33 | #include <linux/dma-mapping.h> | ||||
34 | #include <linux/iommu.h> | ||||
35 | #include <linux/hmm.h> | ||||
36 | #include <linux/pagemap.h> | ||||
37 | #include <linux/sched/task.h> | ||||
38 | #include <linux/sched/mm.h> | ||||
39 | #include <linux/seq_file.h> | ||||
40 | #include <linux/slab.h> | ||||
41 | #include <linux/swap.h> | ||||
42 | #include <linux/swiotlb.h> | ||||
43 | #include <linux/dma-buf.h> | ||||
44 | #include <linux/sizes.h> | ||||
45 | |||||
46 | #include <drm/ttm/ttm_bo_api.h> | ||||
47 | #include <drm/ttm/ttm_bo_driver.h> | ||||
48 | #include <drm/ttm/ttm_placement.h> | ||||
49 | #include <drm/ttm/ttm_module.h> | ||||
50 | #include <drm/ttm/ttm_page_alloc.h> | ||||
51 | |||||
52 | #include <drm/drm_debugfs.h> | ||||
53 | #include <drm/amdgpu_drm.h> | ||||
54 | |||||
55 | #include "amdgpu.h" | ||||
56 | #include "amdgpu_object.h" | ||||
57 | #include "amdgpu_trace.h" | ||||
58 | #include "amdgpu_amdkfd.h" | ||||
59 | #include "amdgpu_sdma.h" | ||||
60 | #include "amdgpu_ras.h" | ||||
61 | #include "amdgpu_atomfirmware.h" | ||||
62 | #include "bif/bif_4_1_d.h" | ||||
63 | |||||
64 | #define AMDGPU_TTM_VRAM_MAX_DW_READ(size_t)128 (size_t)128 | ||||
65 | |||||
66 | static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, | ||||
67 | struct ttm_tt *ttm, | ||||
68 | struct ttm_resource *bo_mem); | ||||
69 | |||||
70 | static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, | ||||
71 | unsigned int type, | ||||
72 | uint64_t size_in_page) | ||||
73 | { | ||||
74 | return ttm_range_man_init(&adev->mman.bdev, type, | ||||
75 | false0, size_in_page); | ||||
76 | } | ||||
77 | |||||
78 | /** | ||||
79 | * amdgpu_evict_flags - Compute placement flags | ||||
80 | * | ||||
81 | * @bo: The buffer object to evict | ||||
82 | * @placement: Possible destination(s) for evicted BO | ||||
83 | * | ||||
84 | * Fill in placement data when ttm_bo_evict() is called | ||||
85 | */ | ||||
86 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | ||||
87 | struct ttm_placement *placement) | ||||
88 | { | ||||
89 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||||
90 | struct amdgpu_bo *abo; | ||||
91 | static const struct ttm_place placements = { | ||||
92 | .fpfn = 0, | ||||
93 | .lpfn = 0, | ||||
94 | .mem_type = TTM_PL_SYSTEM0, | ||||
95 | .flags = TTM_PL_MASK_CACHING((1 << 16) | (1 << 17) | (1 << 18)) | ||||
96 | }; | ||||
97 | |||||
98 | /* Don't handle scatter gather BOs */ | ||||
99 | if (bo->type == ttm_bo_type_sg) { | ||||
100 | placement->num_placement = 0; | ||||
101 | placement->num_busy_placement = 0; | ||||
102 | return; | ||||
103 | } | ||||
104 | |||||
105 | /* Object isn't an AMDGPU object so ignore */ | ||||
106 | if (!amdgpu_bo_is_amdgpu_bo(bo)) { | ||||
107 | placement->placement = &placements; | ||||
108 | placement->busy_placement = &placements; | ||||
109 | placement->num_placement = 1; | ||||
110 | placement->num_busy_placement = 1; | ||||
111 | return; | ||||
112 | } | ||||
113 | |||||
114 | abo = ttm_to_amdgpu_bo(bo); | ||||
115 | switch (bo->mem.mem_type) { | ||||
116 | case AMDGPU_PL_GDS(3 + 0): | ||||
117 | case AMDGPU_PL_GWS(3 + 1): | ||||
118 | case AMDGPU_PL_OA(3 + 2): | ||||
119 | placement->num_placement = 0; | ||||
120 | placement->num_busy_placement = 0; | ||||
121 | return; | ||||
122 | |||||
123 | case TTM_PL_VRAM2: | ||||
124 | if (!adev->mman.buffer_funcs_enabled) { | ||||
125 | /* Move to system memory */ | ||||
126 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU0x1); | ||||
127 | } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && | ||||
128 | !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0)) && | ||||
129 | amdgpu_bo_in_cpu_visible_vram(abo)) { | ||||
130 | |||||
131 | /* Try evicting to the CPU inaccessible part of VRAM | ||||
132 | * first, but only set GTT as busy placement, so this | ||||
133 | * BO will be evicted to GTT rather than causing other | ||||
134 | * BOs to be evicted from VRAM | ||||
135 | */ | ||||
136 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM0x4 | | ||||
137 | AMDGPU_GEM_DOMAIN_GTT0x2); | ||||
138 | abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT12; | ||||
139 | abo->placements[0].lpfn = 0; | ||||
140 | abo->placement.busy_placement = &abo->placements[1]; | ||||
141 | abo->placement.num_busy_placement = 1; | ||||
142 | } else { | ||||
143 | /* Move to GTT memory */ | ||||
144 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT0x2); | ||||
145 | } | ||||
146 | break; | ||||
147 | case TTM_PL_TT1: | ||||
148 | default: | ||||
149 | amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU0x1); | ||||
150 | break; | ||||
151 | } | ||||
152 | *placement = abo->placement; | ||||
153 | } | ||||
154 | |||||
155 | /** | ||||
156 | * amdgpu_verify_access - Verify access for a mmap call | ||||
157 | * | ||||
158 | * @bo: The buffer object to map | ||||
159 | * @filp: The file pointer from the process performing the mmap | ||||
160 | * | ||||
161 | * This is called by ttm_bo_mmap() to verify whether a process | ||||
162 | * has the right to mmap a BO to their process space. | ||||
163 | */ | ||||
164 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | ||||
165 | { | ||||
166 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); | ||||
167 | struct drm_file *file_priv = (void *)filp; | ||||
168 | |||||
169 | /* | ||||
170 | * Don't verify access for KFD BOs. They don't have a GEM | ||||
171 | * object associated with them. | ||||
172 | */ | ||||
173 | if (abo->kfd_bo) | ||||
174 | return 0; | ||||
175 | |||||
176 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) | ||||
177 | return -EPERM1; | ||||
178 | return drm_vma_node_verify_access(&abo->tbo.base.vma_node, file_priv); | ||||
179 | } | ||||
180 | |||||
181 | /** | ||||
182 | * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. | ||||
183 | * | ||||
184 | * @bo: The bo to assign the memory to. | ||||
185 | * @mm_node: Memory manager node for drm allocator. | ||||
186 | * @mem: The region where the bo resides. | ||||
187 | * | ||||
188 | */ | ||||
189 | static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, | ||||
190 | struct drm_mm_node *mm_node, | ||||
191 | struct ttm_resource *mem) | ||||
192 | { | ||||
193 | uint64_t addr = 0; | ||||
194 | |||||
195 | if (mm_node->start != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) { | ||||
| |||||
196 | addr = mm_node->start << PAGE_SHIFT12; | ||||
197 | addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev), | ||||
198 | mem->mem_type); | ||||
199 | } | ||||
200 | return addr; | ||||
201 | } | ||||
202 | |||||
203 | /** | ||||
204 | * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to | ||||
205 | * @offset. It also modifies the offset to be within the drm_mm_node returned | ||||
206 | * | ||||
207 | * @mem: The region where the bo resides. | ||||
208 | * @offset: The offset that drm_mm_node is used for finding. | ||||
209 | * | ||||
210 | */ | ||||
211 | static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem, | ||||
212 | uint64_t *offset) | ||||
213 | { | ||||
214 | struct drm_mm_node *mm_node = mem->mm_node; | ||||
215 | |||||
216 | while (*offset >= (mm_node->size << PAGE_SHIFT12)) { | ||||
217 | *offset -= (mm_node->size << PAGE_SHIFT12); | ||||
218 | ++mm_node; | ||||
219 | } | ||||
220 | return mm_node; | ||||
221 | } | ||||
222 | |||||
223 | /** | ||||
224 | * amdgpu_ttm_map_buffer - Map memory into the GART windows | ||||
225 | * @bo: buffer object to map | ||||
226 | * @mem: memory object to map | ||||
227 | * @mm_node: drm_mm node object to map | ||||
228 | * @num_pages: number of pages to map | ||||
229 | * @offset: offset into @mm_node where to start | ||||
230 | * @window: which GART window to use | ||||
231 | * @ring: DMA ring to use for the copy | ||||
232 | * @tmz: if we should setup a TMZ enabled mapping | ||||
233 | * @addr: resulting address inside the MC address space | ||||
234 | * | ||||
235 | * Setup one of the GART windows to access a specific piece of memory or return | ||||
236 | * the physical address for local memory. | ||||
237 | */ | ||||
238 | static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, | ||||
239 | struct ttm_resource *mem, | ||||
240 | struct drm_mm_node *mm_node, | ||||
241 | unsigned num_pages, uint64_t offset, | ||||
242 | unsigned window, struct amdgpu_ring *ring, | ||||
243 | bool_Bool tmz, uint64_t *addr) | ||||
244 | { | ||||
245 | struct amdgpu_device *adev = ring->adev; | ||||
246 | struct amdgpu_job *job; | ||||
247 | unsigned num_dw, num_bytes; | ||||
248 | struct dma_fence *fence; | ||||
249 | uint64_t src_addr, dst_addr; | ||||
250 | void *cpu_addr; | ||||
251 | uint64_t flags; | ||||
252 | unsigned int i; | ||||
253 | int r; | ||||
254 | |||||
255 | BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <((!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8 )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 256, "!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8)" )) | ||||
256 | AMDGPU_GTT_MAX_TRANSFER_SIZE * 8)((!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8 )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 256, "!(adev->mman.buffer_funcs->copy_max_bytes < 512 * 8)" )); | ||||
257 | |||||
258 | /* Map only what can't be accessed directly */ | ||||
259 | if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) { | ||||
260 | *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset; | ||||
261 | return 0; | ||||
262 | } | ||||
263 | |||||
264 | *addr = adev->gmc.gart_start; | ||||
265 | *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE512 * | ||||
266 | AMDGPU_GPU_PAGE_SIZE4096; | ||||
267 | *addr += offset & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
268 | |||||
269 | num_dw = roundup2(adev->mman.buffer_funcs->copy_num_dw, 8)(((adev->mman.buffer_funcs->copy_num_dw) + ((8) - 1)) & (~((__typeof(adev->mman.buffer_funcs->copy_num_dw))(8) - 1))); | ||||
270 | num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE((1 << 12) / 4096); | ||||
271 | |||||
272 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, | ||||
273 | AMDGPU_IB_POOL_DELAYED, &job); | ||||
274 | if (r) | ||||
275 | return r; | ||||
276 | |||||
277 | src_addr = num_dw * 4; | ||||
278 | src_addr += job->ibs[0].gpu_addr; | ||||
279 | |||||
280 | dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); | ||||
281 | dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE512 * 8; | ||||
282 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_addr), (dst_addr), (num_bytes), (0)) | ||||
283 | dst_addr, num_bytes, false)(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_addr), (dst_addr), (num_bytes), (0)); | ||||
284 | |||||
285 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); | ||||
286 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 286); __builtin_expect (!!(__ret), 0); }); | ||||
287 | |||||
288 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); | ||||
289 | if (tmz) | ||||
290 | flags |= AMDGPU_PTE_TMZ(1ULL << 3); | ||||
291 | |||||
292 | cpu_addr = &job->ibs[0].ptr[num_dw]; | ||||
293 | |||||
294 | if (mem->mem_type == TTM_PL_TT1) { | ||||
295 | struct ttm_dma_tt *dma; | ||||
296 | dma_addr_t *dma_address; | ||||
297 | |||||
298 | dma = container_of(bo->ttm, struct ttm_dma_tt, ttm)({ const __typeof( ((struct ttm_dma_tt *)0)->ttm ) *__mptr = (bo->ttm); (struct ttm_dma_tt *)( (char *)__mptr - __builtin_offsetof (struct ttm_dma_tt, ttm) );}); | ||||
299 | dma_address = &dma->dma_address[offset >> PAGE_SHIFT12]; | ||||
300 | r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, | ||||
301 | cpu_addr); | ||||
302 | if (r) | ||||
303 | goto error_free; | ||||
304 | } else { | ||||
305 | dma_addr_t dma_address; | ||||
306 | |||||
307 | dma_address = (mm_node->start << PAGE_SHIFT12) + offset; | ||||
308 | dma_address += adev->vm_manager.vram_base_offset; | ||||
309 | |||||
310 | for (i = 0; i < num_pages; ++i) { | ||||
311 | r = amdgpu_gart_map(adev, i << PAGE_SHIFT12, 1, | ||||
312 | &dma_address, flags, cpu_addr); | ||||
313 | if (r) | ||||
314 | goto error_free; | ||||
315 | |||||
316 | dma_address += PAGE_SIZE(1 << 12); | ||||
317 | } | ||||
318 | } | ||||
319 | |||||
320 | r = amdgpu_job_submit(job, &adev->mman.entity, | ||||
321 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), &fence); | ||||
322 | if (r) | ||||
323 | goto error_free; | ||||
324 | |||||
325 | dma_fence_put(fence); | ||||
326 | |||||
327 | return r; | ||||
328 | |||||
329 | error_free: | ||||
330 | amdgpu_job_free(job); | ||||
331 | return r; | ||||
332 | } | ||||
333 | |||||
334 | /** | ||||
335 | * amdgpu_copy_ttm_mem_to_mem - Helper function for copy | ||||
336 | * @adev: amdgpu device | ||||
337 | * @src: buffer/address where to read from | ||||
338 | * @dst: buffer/address where to write to | ||||
339 | * @size: number of bytes to copy | ||||
340 | * @tmz: if a secure copy should be used | ||||
341 | * @resv: resv object to sync to | ||||
342 | * @f: Returns the last fence if multiple jobs are submitted. | ||||
343 | * | ||||
344 | * The function copies @size bytes from {src->mem + src->offset} to | ||||
345 | * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a | ||||
346 | * move and different for a BO to BO copy. | ||||
347 | * | ||||
348 | */ | ||||
349 | int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, | ||||
350 | const struct amdgpu_copy_mem *src, | ||||
351 | const struct amdgpu_copy_mem *dst, | ||||
352 | uint64_t size, bool_Bool tmz, | ||||
353 | struct dma_resv *resv, | ||||
354 | struct dma_fence **f) | ||||
355 | { | ||||
356 | const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE512 * | ||||
357 | AMDGPU_GPU_PAGE_SIZE4096); | ||||
358 | |||||
359 | uint64_t src_node_size, dst_node_size, src_offset, dst_offset; | ||||
360 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||||
361 | struct drm_mm_node *src_mm, *dst_mm; | ||||
362 | struct dma_fence *fence = NULL((void *)0); | ||||
363 | int r = 0; | ||||
364 | |||||
365 | if (!adev->mman.buffer_funcs_enabled
| ||||
366 | DRM_ERROR("Trying to move memory with ring turned off.\n")__drm_err("Trying to move memory with ring turned off.\n"); | ||||
367 | return -EINVAL22; | ||||
368 | } | ||||
369 | |||||
370 | src_offset = src->offset; | ||||
371 | if (src->mem->mm_node) { | ||||
372 | src_mm = amdgpu_find_mm_node(src->mem, &src_offset); | ||||
373 | src_node_size = (src_mm->size << PAGE_SHIFT12) - src_offset; | ||||
374 | } else { | ||||
375 | src_mm = NULL((void *)0); | ||||
376 | src_node_size = ULLONG_MAX0xffffffffffffffffULL; | ||||
377 | } | ||||
378 | |||||
379 | dst_offset = dst->offset; | ||||
380 | if (dst->mem->mm_node) { | ||||
381 | dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset); | ||||
382 | dst_node_size = (dst_mm->size << PAGE_SHIFT12) - dst_offset; | ||||
383 | } else { | ||||
384 | dst_mm = NULL((void *)0); | ||||
385 | dst_node_size = ULLONG_MAX0xffffffffffffffffULL; | ||||
386 | } | ||||
387 | |||||
388 | mutex_lock(&adev->mman.gtt_window_lock)rw_enter_write(&adev->mman.gtt_window_lock); | ||||
389 | |||||
390 | while (size) { | ||||
391 | uint32_t src_page_offset = src_offset & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
392 | uint32_t dst_page_offset = dst_offset & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
393 | struct dma_fence *next; | ||||
394 | uint32_t cur_size; | ||||
395 | uint64_t from, to; | ||||
396 | |||||
397 | /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst | ||||
398 | * begins at an offset, then adjust the size accordingly | ||||
399 | */ | ||||
400 | cur_size = max(src_page_offset, dst_page_offset)(((src_page_offset)>(dst_page_offset))?(src_page_offset):( dst_page_offset)); | ||||
401 | cur_size = min(min3(src_node_size, dst_node_size, size),((((((src_node_size)<((((dst_node_size)<(size))?(dst_node_size ):(size))))?(src_node_size):((((dst_node_size)<(size))?(dst_node_size ):(size)))))<((uint64_t)(GTT_MAX_BYTES - cur_size)))?((((src_node_size )<((((dst_node_size)<(size))?(dst_node_size):(size))))? (src_node_size):((((dst_node_size)<(size))?(dst_node_size) :(size))))):((uint64_t)(GTT_MAX_BYTES - cur_size))) | ||||
402 | (uint64_t)(GTT_MAX_BYTES - cur_size))((((((src_node_size)<((((dst_node_size)<(size))?(dst_node_size ):(size))))?(src_node_size):((((dst_node_size)<(size))?(dst_node_size ):(size)))))<((uint64_t)(GTT_MAX_BYTES - cur_size)))?((((src_node_size )<((((dst_node_size)<(size))?(dst_node_size):(size))))? (src_node_size):((((dst_node_size)<(size))?(dst_node_size) :(size))))):((uint64_t)(GTT_MAX_BYTES - cur_size))); | ||||
403 | |||||
404 | /* Map src to window 0 and dst to window 1. */ | ||||
405 | r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm, | ||||
406 | PFN_UP(cur_size + src_page_offset)(((cur_size + src_page_offset) + (1 << 12)-1) >> 12 ), | ||||
407 | src_offset, 0, ring, tmz, &from); | ||||
408 | if (r) | ||||
409 | goto error; | ||||
410 | |||||
411 | r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm, | ||||
412 | PFN_UP(cur_size + dst_page_offset)(((cur_size + dst_page_offset) + (1 << 12)-1) >> 12 ), | ||||
413 | dst_offset, 1, ring, tmz, &to); | ||||
414 | if (r) | ||||
415 | goto error; | ||||
416 | |||||
417 | r = amdgpu_copy_buffer(ring, from, to, cur_size, | ||||
418 | resv, &next, false0, true1, tmz); | ||||
419 | if (r) | ||||
420 | goto error; | ||||
421 | |||||
422 | dma_fence_put(fence); | ||||
423 | fence = next; | ||||
424 | |||||
425 | size -= cur_size; | ||||
426 | if (!size) | ||||
427 | break; | ||||
428 | |||||
429 | src_node_size -= cur_size; | ||||
430 | if (!src_node_size) { | ||||
431 | ++src_mm; | ||||
432 | src_node_size = src_mm->size << PAGE_SHIFT12; | ||||
433 | src_offset = 0; | ||||
434 | } else { | ||||
435 | src_offset += cur_size; | ||||
436 | } | ||||
437 | |||||
438 | dst_node_size -= cur_size; | ||||
439 | if (!dst_node_size) { | ||||
440 | ++dst_mm; | ||||
441 | dst_node_size = dst_mm->size << PAGE_SHIFT12; | ||||
442 | dst_offset = 0; | ||||
443 | } else { | ||||
444 | dst_offset += cur_size; | ||||
445 | } | ||||
446 | } | ||||
447 | error: | ||||
448 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); | ||||
449 | if (f) | ||||
450 | *f = dma_fence_get(fence); | ||||
451 | dma_fence_put(fence); | ||||
452 | return r; | ||||
453 | } | ||||
454 | |||||
455 | /** | ||||
456 | * amdgpu_move_blit - Copy an entire buffer to another buffer | ||||
457 | * | ||||
458 | * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to | ||||
459 | * help move buffers to and from VRAM. | ||||
460 | */ | ||||
461 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | ||||
462 | bool_Bool evict, | ||||
463 | struct ttm_resource *new_mem, | ||||
464 | struct ttm_resource *old_mem) | ||||
465 | { | ||||
466 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||||
467 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); | ||||
468 | struct amdgpu_copy_mem src, dst; | ||||
469 | struct dma_fence *fence = NULL((void *)0); | ||||
470 | int r; | ||||
471 | |||||
472 | src.bo = bo; | ||||
473 | dst.bo = bo; | ||||
474 | src.mem = old_mem; | ||||
475 | dst.mem = new_mem; | ||||
476 | src.offset = 0; | ||||
477 | dst.offset = 0; | ||||
478 | |||||
479 | r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, | ||||
480 | new_mem->num_pages << PAGE_SHIFT12, | ||||
481 | amdgpu_bo_encrypted(abo), | ||||
482 | bo->base.resv, &fence); | ||||
483 | if (r) | ||||
484 | goto error; | ||||
485 | |||||
486 | /* clear the space being freed */ | ||||
487 | if (old_mem->mem_type == TTM_PL_VRAM2 && | ||||
488 | (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE(1 << 9))) { | ||||
489 | struct dma_fence *wipe_fence = NULL((void *)0); | ||||
490 | |||||
491 | r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON0xd0bed0be, | ||||
492 | NULL((void *)0), &wipe_fence); | ||||
493 | if (r) { | ||||
494 | goto error; | ||||
495 | } else if (wipe_fence) { | ||||
496 | dma_fence_put(fence); | ||||
497 | fence = wipe_fence; | ||||
498 | } | ||||
499 | } | ||||
500 | |||||
501 | /* Always block for VM page tables before committing the new location */ | ||||
502 | if (bo->type == ttm_bo_type_kernel) | ||||
503 | r = ttm_bo_move_accel_cleanup(bo, fence, true1, false0, new_mem); | ||||
504 | else | ||||
505 | r = ttm_bo_move_accel_cleanup(bo, fence, evict, true1, new_mem); | ||||
506 | dma_fence_put(fence); | ||||
507 | return r; | ||||
508 | |||||
509 | error: | ||||
510 | if (fence) | ||||
511 | dma_fence_wait(fence, false0); | ||||
512 | dma_fence_put(fence); | ||||
513 | return r; | ||||
514 | } | ||||
515 | |||||
516 | /** | ||||
517 | * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer | ||||
518 | * | ||||
519 | * Called by amdgpu_bo_move(). | ||||
520 | */ | ||||
521 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool_Bool evict, | ||||
522 | struct ttm_operation_ctx *ctx, | ||||
523 | struct ttm_resource *new_mem) | ||||
524 | { | ||||
525 | struct ttm_resource *old_mem = &bo->mem; | ||||
526 | struct ttm_resource tmp_mem; | ||||
527 | struct ttm_place placements; | ||||
528 | struct ttm_placement placement; | ||||
529 | int r; | ||||
530 | |||||
531 | /* create space/pages for new_mem in GTT space */ | ||||
532 | tmp_mem = *new_mem; | ||||
533 | tmp_mem.mm_node = NULL((void *)0); | ||||
534 | placement.num_placement = 1; | ||||
535 | placement.placement = &placements; | ||||
536 | placement.num_busy_placement = 1; | ||||
537 | placement.busy_placement = &placements; | ||||
538 | placements.fpfn = 0; | ||||
539 | placements.lpfn = 0; | ||||
540 | placements.mem_type = TTM_PL_TT1; | ||||
541 | placements.flags = TTM_PL_MASK_CACHING((1 << 16) | (1 << 17) | (1 << 18)); | ||||
542 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); | ||||
543 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
544 | pr_err("Failed to find GTT space for blit from VRAM\n")printk("\0013" "amdgpu: " "Failed to find GTT space for blit from VRAM\n" ); | ||||
545 | return r; | ||||
546 | } | ||||
547 | |||||
548 | /* set caching flags */ | ||||
549 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | ||||
550 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
551 | goto out_cleanup; | ||||
552 | } | ||||
553 | |||||
554 | r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); | ||||
555 | if (unlikely(r)__builtin_expect(!!(r), 0)) | ||||
556 | goto out_cleanup; | ||||
557 | |||||
558 | /* Bind the memory to the GTT space */ | ||||
559 | r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); | ||||
560 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
561 | goto out_cleanup; | ||||
562 | } | ||||
563 | |||||
564 | /* blit VRAM to GTT */ | ||||
565 | r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem); | ||||
566 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
567 | goto out_cleanup; | ||||
568 | } | ||||
569 | |||||
570 | /* move BO (in tmp_mem) to new_mem */ | ||||
571 | r = ttm_bo_move_ttm(bo, ctx, new_mem); | ||||
572 | out_cleanup: | ||||
573 | ttm_resource_free(bo, &tmp_mem); | ||||
574 | return r; | ||||
575 | } | ||||
576 | |||||
577 | /** | ||||
578 | * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM | ||||
579 | * | ||||
580 | * Called by amdgpu_bo_move(). | ||||
581 | */ | ||||
582 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool_Bool evict, | ||||
583 | struct ttm_operation_ctx *ctx, | ||||
584 | struct ttm_resource *new_mem) | ||||
585 | { | ||||
586 | struct ttm_resource *old_mem = &bo->mem; | ||||
587 | struct ttm_resource tmp_mem; | ||||
588 | struct ttm_placement placement; | ||||
589 | struct ttm_place placements; | ||||
590 | int r; | ||||
591 | |||||
592 | /* make space in GTT for old_mem buffer */ | ||||
593 | tmp_mem = *new_mem; | ||||
594 | tmp_mem.mm_node = NULL((void *)0); | ||||
595 | placement.num_placement = 1; | ||||
596 | placement.placement = &placements; | ||||
597 | placement.num_busy_placement = 1; | ||||
598 | placement.busy_placement = &placements; | ||||
599 | placements.fpfn = 0; | ||||
600 | placements.lpfn = 0; | ||||
601 | placements.mem_type = TTM_PL_TT1; | ||||
602 | placements.flags = TTM_PL_MASK_CACHING((1 << 16) | (1 << 17) | (1 << 18)); | ||||
603 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); | ||||
604 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
605 | pr_err("Failed to find GTT space for blit to VRAM\n")printk("\0013" "amdgpu: " "Failed to find GTT space for blit to VRAM\n" ); | ||||
606 | return r; | ||||
607 | } | ||||
608 | |||||
609 | /* move/bind old memory to GTT space */ | ||||
610 | r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); | ||||
611 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
612 | goto out_cleanup; | ||||
613 | } | ||||
614 | |||||
615 | /* copy to VRAM */ | ||||
616 | r = amdgpu_move_blit(bo, evict, new_mem, old_mem); | ||||
617 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
618 | goto out_cleanup; | ||||
619 | } | ||||
620 | out_cleanup: | ||||
621 | ttm_resource_free(bo, &tmp_mem); | ||||
622 | return r; | ||||
623 | } | ||||
624 | |||||
625 | /** | ||||
626 | * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy | ||||
627 | * | ||||
628 | * Called by amdgpu_bo_move() | ||||
629 | */ | ||||
630 | static bool_Bool amdgpu_mem_visible(struct amdgpu_device *adev, | ||||
631 | struct ttm_resource *mem) | ||||
632 | { | ||||
633 | struct drm_mm_node *nodes = mem->mm_node; | ||||
634 | |||||
635 | if (mem->mem_type == TTM_PL_SYSTEM0 || | ||||
636 | mem->mem_type == TTM_PL_TT1) | ||||
637 | return true1; | ||||
638 | if (mem->mem_type != TTM_PL_VRAM2) | ||||
639 | return false0; | ||||
640 | |||||
641 | /* ttm_resource_ioremap only supports contiguous memory */ | ||||
642 | if (nodes->size != mem->num_pages) | ||||
643 | return false0; | ||||
644 | |||||
645 | return ((nodes->start + nodes->size) << PAGE_SHIFT12) | ||||
646 | <= adev->gmc.visible_vram_size; | ||||
647 | } | ||||
648 | |||||
649 | /** | ||||
650 | * amdgpu_bo_move - Move a buffer object to a new memory location | ||||
651 | * | ||||
652 | * Called by ttm_bo_handle_move_mem() | ||||
653 | */ | ||||
654 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool_Bool evict, | ||||
655 | struct ttm_operation_ctx *ctx, | ||||
656 | struct ttm_resource *new_mem) | ||||
657 | { | ||||
658 | struct amdgpu_device *adev; | ||||
659 | struct amdgpu_bo *abo; | ||||
660 | struct ttm_resource *old_mem = &bo->mem; | ||||
661 | int r; | ||||
662 | |||||
663 | /* Can't move a pinned BO */ | ||||
664 | abo = ttm_to_amdgpu_bo(bo); | ||||
| |||||
665 | if (WARN_ON_ONCE(abo->pin_count > 0)({ static int __warned; int __ret = !!(abo->pin_count > 0); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "abo->pin_count > 0", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 665); __warned = 1; } __builtin_expect(!!(__ret), 0); })) | ||||
666 | return -EINVAL22; | ||||
667 | |||||
668 | adev = amdgpu_ttm_adev(bo->bdev); | ||||
669 | |||||
670 | if (old_mem->mem_type == TTM_PL_SYSTEM0 && bo->ttm == NULL((void *)0)) { | ||||
671 | ttm_bo_move_null(bo, new_mem); | ||||
672 | return 0; | ||||
673 | } | ||||
674 | if ((old_mem->mem_type == TTM_PL_TT1 && | ||||
675 | new_mem->mem_type == TTM_PL_SYSTEM0) || | ||||
676 | (old_mem->mem_type
| ||||
677 | new_mem->mem_type == TTM_PL_TT1)) { | ||||
678 | /* bind is enough */ | ||||
679 | ttm_bo_move_null(bo, new_mem); | ||||
680 | return 0; | ||||
681 | } | ||||
682 | if (old_mem->mem_type == AMDGPU_PL_GDS(3 + 0) || | ||||
683 | old_mem->mem_type == AMDGPU_PL_GWS(3 + 1) || | ||||
684 | old_mem->mem_type == AMDGPU_PL_OA(3 + 2) || | ||||
685 | new_mem->mem_type == AMDGPU_PL_GDS(3 + 0) || | ||||
686 | new_mem->mem_type == AMDGPU_PL_GWS(3 + 1) || | ||||
687 | new_mem->mem_type == AMDGPU_PL_OA(3 + 2)) { | ||||
688 | /* Nothing to save here */ | ||||
689 | ttm_bo_move_null(bo, new_mem); | ||||
690 | return 0; | ||||
691 | } | ||||
692 | |||||
693 | if (!adev->mman.buffer_funcs_enabled) { | ||||
694 | r = -ENODEV19; | ||||
695 | goto memcpy; | ||||
696 | } | ||||
697 | |||||
698 | if (old_mem->mem_type == TTM_PL_VRAM2 && | ||||
699 | new_mem->mem_type == TTM_PL_SYSTEM0) { | ||||
700 | r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); | ||||
701 | } else if (old_mem->mem_type
| ||||
702 | new_mem->mem_type == TTM_PL_VRAM2) { | ||||
703 | r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); | ||||
704 | } else { | ||||
705 | r = amdgpu_move_blit(bo, evict, | ||||
706 | new_mem, old_mem); | ||||
707 | } | ||||
708 | |||||
709 | if (r) { | ||||
710 | memcpy: | ||||
711 | /* Check that all memory is CPU accessible */ | ||||
712 | if (!amdgpu_mem_visible(adev, old_mem) || | ||||
713 | !amdgpu_mem_visible(adev, new_mem)) { | ||||
714 | pr_err("Move buffer fallback to memcpy unavailable\n")printk("\0013" "amdgpu: " "Move buffer fallback to memcpy unavailable\n" ); | ||||
715 | return r; | ||||
716 | } | ||||
717 | |||||
718 | r = ttm_bo_move_memcpy(bo, ctx, new_mem); | ||||
719 | if (r) | ||||
720 | return r; | ||||
721 | } | ||||
722 | |||||
723 | if (bo->type == ttm_bo_type_device && | ||||
724 | new_mem->mem_type == TTM_PL_VRAM2 && | ||||
725 | old_mem->mem_type != TTM_PL_VRAM2) { | ||||
726 | /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU | ||||
727 | * accesses the BO after it's moved. | ||||
728 | */ | ||||
729 | abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0); | ||||
730 | } | ||||
731 | |||||
732 | /* update statistics */ | ||||
733 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved)__sync_fetch_and_add_8(&adev->num_bytes_moved, (u64)bo ->num_pages << 12); | ||||
734 | return 0; | ||||
735 | } | ||||
736 | |||||
737 | /** | ||||
738 | * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault | ||||
739 | * | ||||
740 | * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() | ||||
741 | */ | ||||
742 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) | ||||
743 | { | ||||
744 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
745 | struct drm_mm_node *mm_node = mem->mm_node; | ||||
746 | size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT12; | ||||
747 | |||||
748 | switch (mem->mem_type) { | ||||
749 | case TTM_PL_SYSTEM0: | ||||
750 | /* system memory */ | ||||
751 | return 0; | ||||
752 | case TTM_PL_TT1: | ||||
753 | break; | ||||
754 | case TTM_PL_VRAM2: | ||||
755 | mem->bus.offset = mem->start << PAGE_SHIFT12; | ||||
756 | /* check if it's visible */ | ||||
757 | if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) | ||||
758 | return -EINVAL22; | ||||
759 | /* Only physically contiguous buffers apply. In a contiguous | ||||
760 | * buffer, size of the first mm_node would match the number of | ||||
761 | * pages in ttm_resource. | ||||
762 | */ | ||||
763 | if (adev->mman.aper_base_kaddr && | ||||
764 | (mm_node->size == mem->num_pages)) | ||||
765 | mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + | ||||
766 | mem->bus.offset; | ||||
767 | |||||
768 | mem->bus.offset += adev->gmc.aper_base; | ||||
769 | mem->bus.is_iomem = true1; | ||||
770 | break; | ||||
771 | default: | ||||
772 | return -EINVAL22; | ||||
773 | } | ||||
774 | return 0; | ||||
775 | } | ||||
776 | |||||
777 | static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, | ||||
778 | unsigned long page_offset) | ||||
779 | { | ||||
780 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||||
781 | uint64_t offset = (page_offset << PAGE_SHIFT12); | ||||
782 | struct drm_mm_node *mm; | ||||
783 | |||||
784 | mm = amdgpu_find_mm_node(&bo->mem, &offset); | ||||
785 | offset += adev->gmc.aper_base; | ||||
786 | return mm->start + (offset >> PAGE_SHIFT12); | ||||
787 | } | ||||
788 | |||||
789 | /** | ||||
790 | * amdgpu_ttm_domain_start - Returns GPU start address | ||||
791 | * @adev: amdgpu device object | ||||
792 | * @type: type of the memory | ||||
793 | * | ||||
794 | * Returns: | ||||
795 | * GPU start address of a memory domain | ||||
796 | */ | ||||
797 | |||||
798 | uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) | ||||
799 | { | ||||
800 | switch (type) { | ||||
801 | case TTM_PL_TT1: | ||||
802 | return adev->gmc.gart_start; | ||||
803 | case TTM_PL_VRAM2: | ||||
804 | return adev->gmc.vram_start; | ||||
805 | } | ||||
806 | |||||
807 | return 0; | ||||
808 | } | ||||
809 | |||||
810 | /* | ||||
811 | * TTM backend functions. | ||||
812 | */ | ||||
813 | struct amdgpu_ttm_tt { | ||||
814 | struct ttm_dma_tt ttm; | ||||
815 | struct drm_gem_object *gobj; | ||||
816 | u64 offset; | ||||
817 | uint64_t userptr; | ||||
818 | struct task_struct *usertask; | ||||
819 | uint32_t userflags; | ||||
820 | bool_Bool bound; | ||||
821 | #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)0 | ||||
822 | struct hmm_range *range; | ||||
823 | #endif | ||||
824 | }; | ||||
825 | |||||
826 | #ifdef CONFIG_DRM_AMDGPU_USERPTR | ||||
827 | /** | ||||
828 | * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user | ||||
829 | * memory and start HMM tracking CPU page table update | ||||
830 | * | ||||
831 | * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only | ||||
832 | * once afterwards to stop HMM tracking | ||||
833 | */ | ||||
834 | int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct vm_page **pages) | ||||
835 | { | ||||
836 | struct ttm_tt *ttm = bo->tbo.ttm; | ||||
837 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
838 | unsigned long start = gtt->userptr; | ||||
839 | struct vm_area_struct *vma; | ||||
840 | struct hmm_range *range; | ||||
841 | unsigned long timeout; | ||||
842 | struct mm_struct *mm; | ||||
843 | unsigned long i; | ||||
844 | int r = 0; | ||||
845 | |||||
846 | mm = bo->notifier.mm; | ||||
847 | if (unlikely(!mm)__builtin_expect(!!(!mm), 0)) { | ||||
848 | DRM_DEBUG_DRIVER("BO is not registered?\n")__drm_dbg(DRM_UT_DRIVER, "BO is not registered?\n"); | ||||
849 | return -EFAULT14; | ||||
850 | } | ||||
851 | |||||
852 | /* Another get_user_pages is running at the same time?? */ | ||||
853 | if (WARN_ON(gtt->range)({ int __ret = !!(gtt->range); if (__ret) printf("WARNING %s failed at %s:%d\n" , "gtt->range", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c" , 853); __builtin_expect(!!(__ret), 0); })) | ||||
854 | return -EFAULT14; | ||||
855 | |||||
856 | if (!mmget_not_zero(mm)) /* Happens during process shutdown */ | ||||
857 | return -ESRCH3; | ||||
858 | |||||
859 | range = kzalloc(sizeof(*range), GFP_KERNEL(0x0001 | 0x0004)); | ||||
860 | if (unlikely(!range)__builtin_expect(!!(!range), 0)) { | ||||
861 | r = -ENOMEM12; | ||||
862 | goto out; | ||||
863 | } | ||||
864 | range->notifier = &bo->notifier; | ||||
865 | range->start = bo->notifier.interval_tree.start; | ||||
866 | range->end = bo->notifier.interval_tree.last + 1; | ||||
867 | range->default_flags = HMM_PFN_REQ_FAULT; | ||||
868 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | ||||
869 | range->default_flags |= HMM_PFN_REQ_WRITE; | ||||
870 | |||||
871 | range->hmm_pfns = kvmalloc_array(ttm->num_pages, | ||||
872 | sizeof(*range->hmm_pfns), GFP_KERNEL(0x0001 | 0x0004)); | ||||
873 | if (unlikely(!range->hmm_pfns)__builtin_expect(!!(!range->hmm_pfns), 0)) { | ||||
874 | r = -ENOMEM12; | ||||
875 | goto out_free_ranges; | ||||
876 | } | ||||
877 | |||||
878 | mmap_read_lock(mm); | ||||
879 | vma = find_vma(mm, start); | ||||
880 | if (unlikely(!vma || start < vma->vm_start)__builtin_expect(!!(!vma || start < vma->vm_start), 0)) { | ||||
881 | r = -EFAULT14; | ||||
882 | goto out_unlock; | ||||
883 | } | ||||
884 | if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&__builtin_expect(!!((gtt->userflags & (1 << 1)) && vma->vm_file), 0) | ||||
885 | vma->vm_file)__builtin_expect(!!((gtt->userflags & (1 << 1)) && vma->vm_file), 0)) { | ||||
886 | r = -EPERM1; | ||||
887 | goto out_unlock; | ||||
888 | } | ||||
889 | mmap_read_unlock(mm); | ||||
890 | timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT)(((uint64_t)(HMM_RANGE_DEFAULT_TIMEOUT)) * hz / 1000); | ||||
891 | |||||
892 | retry: | ||||
893 | range->notifier_seq = mmu_interval_read_begin(&bo->notifier); | ||||
894 | |||||
895 | mmap_read_lock(mm); | ||||
896 | r = hmm_range_fault(range); | ||||
897 | mmap_read_unlock(mm); | ||||
898 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
899 | /* | ||||
900 | * FIXME: This timeout should encompass the retry from | ||||
901 | * mmu_interval_read_retry() as well. | ||||
902 | */ | ||||
903 | if (r == -EBUSY16 && !time_after(jiffies, timeout)((long)(timeout) - (long)(jiffies) < 0)) | ||||
904 | goto retry; | ||||
905 | goto out_free_pfns; | ||||
906 | } | ||||
907 | |||||
908 | /* | ||||
909 | * Due to default_flags, all pages are HMM_PFN_VALID or | ||||
910 | * hmm_range_fault() fails. FIXME: The pages cannot be touched outside | ||||
911 | * the notifier_lock, and mmu_interval_read_retry() must be done first. | ||||
912 | */ | ||||
913 | for (i = 0; i < ttm->num_pages; i++) | ||||
914 | pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); | ||||
915 | |||||
916 | gtt->range = range; | ||||
917 | mmput(mm); | ||||
918 | |||||
919 | return 0; | ||||
920 | |||||
921 | out_unlock: | ||||
922 | mmap_read_unlock(mm); | ||||
923 | out_free_pfns: | ||||
924 | kvfree(range->hmm_pfns); | ||||
925 | out_free_ranges: | ||||
926 | kfree(range); | ||||
927 | out: | ||||
928 | mmput(mm); | ||||
929 | return r; | ||||
930 | } | ||||
931 | |||||
932 | /** | ||||
933 | * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change | ||||
934 | * Check if the pages backing this ttm range have been invalidated | ||||
935 | * | ||||
936 | * Returns: true if pages are still valid | ||||
937 | */ | ||||
938 | bool_Bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) | ||||
939 | { | ||||
940 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
941 | bool_Bool r = false0; | ||||
942 | |||||
943 | if (!gtt || !gtt->userptr) | ||||
944 | return false0; | ||||
945 | |||||
946 | DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",__drm_dbg(DRM_UT_DRIVER, "user_pages_done 0x%llx pages 0x%lx\n" , gtt->userptr, ttm->num_pages) | ||||
947 | gtt->userptr, ttm->num_pages)__drm_dbg(DRM_UT_DRIVER, "user_pages_done 0x%llx pages 0x%lx\n" , gtt->userptr, ttm->num_pages); | ||||
948 | |||||
949 | WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,({ static int __warned; int __ret = !!(!gtt->range || !gtt ->range->hmm_pfns); if (__ret && !__warned) { printf ("No user pages to check\n"); __warned = 1; } __builtin_expect (!!(__ret), 0); }) | ||||
950 | "No user pages to check\n")({ static int __warned; int __ret = !!(!gtt->range || !gtt ->range->hmm_pfns); if (__ret && !__warned) { printf ("No user pages to check\n"); __warned = 1; } __builtin_expect (!!(__ret), 0); }); | ||||
951 | |||||
952 | if (gtt->range) { | ||||
953 | /* | ||||
954 | * FIXME: Must always hold notifier_lock for this, and must | ||||
955 | * not ignore the return code. | ||||
956 | */ | ||||
957 | r = mmu_interval_read_retry(gtt->range->notifier, | ||||
958 | gtt->range->notifier_seq); | ||||
959 | kvfree(gtt->range->hmm_pfns); | ||||
960 | kfree(gtt->range); | ||||
961 | gtt->range = NULL((void *)0); | ||||
962 | } | ||||
963 | |||||
964 | return !r; | ||||
965 | } | ||||
966 | #endif | ||||
967 | |||||
968 | /** | ||||
969 | * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. | ||||
970 | * | ||||
971 | * Called by amdgpu_cs_list_validate(). This creates the page list | ||||
972 | * that backs user memory and will ultimately be mapped into the device | ||||
973 | * address space. | ||||
974 | */ | ||||
975 | void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct vm_page **pages) | ||||
976 | { | ||||
977 | unsigned long i; | ||||
978 | |||||
979 | for (i = 0; i < ttm->num_pages; ++i) | ||||
980 | ttm->pages[i] = pages ? pages[i] : NULL((void *)0); | ||||
981 | } | ||||
982 | |||||
983 | /** | ||||
984 | * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages | ||||
985 | * | ||||
986 | * Called by amdgpu_ttm_backend_bind() | ||||
987 | **/ | ||||
988 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, | ||||
989 | struct ttm_tt *ttm) | ||||
990 | { | ||||
991 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
992 | return -ENOSYS78; | ||||
993 | #ifdef notyet | ||||
994 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
995 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
996 | int r; | ||||
997 | |||||
998 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); | ||||
999 | enum dma_data_direction direction = write ? | ||||
1000 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||||
1001 | |||||
1002 | /* Allocate an SG array and squash pages into it */ | ||||
1003 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | ||||
1004 | ttm->num_pages << PAGE_SHIFT12, | ||||
1005 | GFP_KERNEL(0x0001 | 0x0004)); | ||||
1006 | if (r) | ||||
1007 | goto release_sg; | ||||
1008 | |||||
1009 | /* Map SG to device */ | ||||
1010 | r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); | ||||
1011 | if (r) | ||||
1012 | goto release_sg; | ||||
1013 | |||||
1014 | /* convert SG to linear array of pages and dma addresses */ | ||||
1015 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||||
1016 | gtt->ttm.dma_address, ttm->num_pages); | ||||
1017 | |||||
1018 | return 0; | ||||
1019 | |||||
1020 | release_sg: | ||||
1021 | kfree(ttm->sg); | ||||
1022 | ttm->sg = NULL((void *)0); | ||||
1023 | return r; | ||||
1024 | #endif | ||||
1025 | } | ||||
1026 | |||||
1027 | /** | ||||
1028 | * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages | ||||
1029 | */ | ||||
1030 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, | ||||
1031 | struct ttm_tt *ttm) | ||||
1032 | { | ||||
1033 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
1034 | #ifdef notyet | ||||
1035 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
1036 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1037 | |||||
1038 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); | ||||
1039 | enum dma_data_direction direction = write ? | ||||
1040 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||||
1041 | |||||
1042 | /* double check that we don't free the table twice */ | ||||
1043 | if (!ttm->sg || !ttm->sg->sgl) | ||||
1044 | return; | ||||
1045 | |||||
1046 | /* unmap the pages mapped to the device */ | ||||
1047 | dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); | ||||
1048 | sg_free_table(ttm->sg); | ||||
1049 | |||||
1050 | #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)0 | ||||
1051 | if (gtt->range) { | ||||
1052 | unsigned long i; | ||||
1053 | |||||
1054 | for (i = 0; i < ttm->num_pages; i++) { | ||||
1055 | if (ttm->pages[i] != | ||||
1056 | hmm_pfn_to_page(gtt->range->hmm_pfns[i])) | ||||
1057 | break; | ||||
1058 | } | ||||
1059 | |||||
1060 | WARN((i == ttm->num_pages), "Missing get_user_page_done\n")({ int __ret = !!((i == ttm->num_pages)); if (__ret) printf ("Missing get_user_page_done\n"); __builtin_expect(!!(__ret), 0); }); | ||||
1061 | } | ||||
1062 | #endif | ||||
1063 | #endif | ||||
1064 | } | ||||
1065 | |||||
1066 | static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, | ||||
1067 | struct ttm_buffer_object *tbo, | ||||
1068 | uint64_t flags) | ||||
1069 | { | ||||
1070 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); | ||||
1071 | struct ttm_tt *ttm = tbo->ttm; | ||||
1072 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1073 | int r; | ||||
1074 | |||||
1075 | if (amdgpu_bo_encrypted(abo)) | ||||
1076 | flags |= AMDGPU_PTE_TMZ(1ULL << 3); | ||||
1077 | |||||
1078 | if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9(1 << 8)) { | ||||
1079 | uint64_t page_idx = 1; | ||||
1080 | |||||
1081 | r = amdgpu_gart_bind(adev, gtt->offset, page_idx, | ||||
1082 | ttm->pages, gtt->ttm.dma_address, flags); | ||||
1083 | if (r) | ||||
1084 | goto gart_bind_fail; | ||||
1085 | |||||
1086 | /* The memory type of the first page defaults to UC. Now | ||||
1087 | * modify the memory type to NC from the second page of | ||||
1088 | * the BO onward. | ||||
1089 | */ | ||||
1090 | flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK((uint64_t)(3ULL) << 57); | ||||
1091 | flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC)((uint64_t)(0) << 57); | ||||
1092 | |||||
1093 | r = amdgpu_gart_bind(adev, | ||||
1094 | gtt->offset + (page_idx << PAGE_SHIFT12), | ||||
1095 | ttm->num_pages - page_idx, | ||||
1096 | &ttm->pages[page_idx], | ||||
1097 | &(gtt->ttm.dma_address[page_idx]), flags); | ||||
1098 | } else { | ||||
1099 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, | ||||
1100 | ttm->pages, gtt->ttm.dma_address, flags); | ||||
1101 | } | ||||
1102 | |||||
1103 | gart_bind_fail: | ||||
1104 | if (r) | ||||
1105 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",__drm_err("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages , gtt->offset) | ||||
1106 | ttm->num_pages, gtt->offset)__drm_err("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages , gtt->offset); | ||||
1107 | |||||
1108 | return r; | ||||
1109 | } | ||||
1110 | |||||
1111 | /** | ||||
1112 | * amdgpu_ttm_backend_bind - Bind GTT memory | ||||
1113 | * | ||||
1114 | * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). | ||||
1115 | * This handles binding GTT memory to the device address space. | ||||
1116 | */ | ||||
1117 | static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, | ||||
1118 | struct ttm_tt *ttm, | ||||
1119 | struct ttm_resource *bo_mem) | ||||
1120 | { | ||||
1121 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
1122 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | ||||
1123 | uint64_t flags; | ||||
1124 | int r = 0; | ||||
1125 | |||||
1126 | if (!bo_mem) | ||||
1127 | return -EINVAL22; | ||||
1128 | |||||
1129 | if (gtt->bound) | ||||
1130 | return 0; | ||||
1131 | |||||
1132 | if (gtt->userptr) { | ||||
1133 | r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); | ||||
1134 | if (r) { | ||||
1135 | DRM_ERROR("failed to pin userptr\n")__drm_err("failed to pin userptr\n"); | ||||
1136 | return r; | ||||
1137 | } | ||||
1138 | } | ||||
1139 | if (!ttm->num_pages) { | ||||
1140 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",({ int __ret = !!(1); if (__ret) printf("nothing to bind %lu pages for mreg %p back %p!\n" , ttm->num_pages, bo_mem, ttm); __builtin_expect(!!(__ret) , 0); }) | ||||
1141 | ttm->num_pages, bo_mem, ttm)({ int __ret = !!(1); if (__ret) printf("nothing to bind %lu pages for mreg %p back %p!\n" , ttm->num_pages, bo_mem, ttm); __builtin_expect(!!(__ret) , 0); }); | ||||
1142 | } | ||||
1143 | |||||
1144 | if (bo_mem->mem_type == AMDGPU_PL_GDS(3 + 0) || | ||||
1145 | bo_mem->mem_type == AMDGPU_PL_GWS(3 + 1) || | ||||
1146 | bo_mem->mem_type == AMDGPU_PL_OA(3 + 2)) | ||||
1147 | return -EINVAL22; | ||||
1148 | |||||
1149 | if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { | ||||
1150 | gtt->offset = AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL; | ||||
1151 | return 0; | ||||
1152 | } | ||||
1153 | |||||
1154 | /* compute PTE flags relevant to this BO memory */ | ||||
1155 | flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); | ||||
1156 | |||||
1157 | /* bind pages into GART page tables */ | ||||
1158 | gtt->offset = (u64)bo_mem->start << PAGE_SHIFT12; | ||||
1159 | r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, | ||||
1160 | ttm->pages, gtt->ttm.dma_address, flags); | ||||
1161 | |||||
1162 | if (r) | ||||
1163 | DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",__drm_err("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages , gtt->offset) | ||||
1164 | ttm->num_pages, gtt->offset)__drm_err("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages , gtt->offset); | ||||
1165 | gtt->bound = true1; | ||||
1166 | return r; | ||||
1167 | } | ||||
1168 | |||||
1169 | /** | ||||
1170 | * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either | ||||
1171 | * through AGP or GART aperture. | ||||
1172 | * | ||||
1173 | * If bo is accessible through AGP aperture, then use AGP aperture | ||||
1174 | * to access bo; otherwise allocate logical space in GART aperture | ||||
1175 | * and map bo to GART aperture. | ||||
1176 | */ | ||||
1177 | int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) | ||||
1178 | { | ||||
1179 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||||
1180 | struct ttm_operation_ctx ctx = { false0, false0 }; | ||||
1181 | struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; | ||||
1182 | struct ttm_resource tmp; | ||||
1183 | struct ttm_placement placement; | ||||
1184 | struct ttm_place placements; | ||||
1185 | uint64_t addr, flags; | ||||
1186 | int r; | ||||
1187 | |||||
1188 | if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) | ||||
1189 | return 0; | ||||
1190 | |||||
1191 | addr = amdgpu_gmc_agp_addr(bo); | ||||
1192 | if (addr != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) { | ||||
1193 | bo->mem.start = addr >> PAGE_SHIFT12; | ||||
1194 | } else { | ||||
1195 | |||||
1196 | /* allocate GART space */ | ||||
1197 | tmp = bo->mem; | ||||
1198 | tmp.mm_node = NULL((void *)0); | ||||
1199 | placement.num_placement = 1; | ||||
1200 | placement.placement = &placements; | ||||
1201 | placement.num_busy_placement = 1; | ||||
1202 | placement.busy_placement = &placements; | ||||
1203 | placements.fpfn = 0; | ||||
1204 | placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT12; | ||||
1205 | placements.mem_type = TTM_PL_TT1; | ||||
1206 | placements.flags = bo->mem.placement; | ||||
1207 | |||||
1208 | r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); | ||||
1209 | if (unlikely(r)__builtin_expect(!!(r), 0)) | ||||
1210 | return r; | ||||
1211 | |||||
1212 | /* compute PTE flags for this buffer object */ | ||||
1213 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); | ||||
1214 | |||||
1215 | /* Bind pages */ | ||||
1216 | gtt->offset = (u64)tmp.start << PAGE_SHIFT12; | ||||
1217 | r = amdgpu_ttm_gart_bind(adev, bo, flags); | ||||
1218 | if (unlikely(r)__builtin_expect(!!(r), 0)) { | ||||
1219 | ttm_resource_free(bo, &tmp); | ||||
1220 | return r; | ||||
1221 | } | ||||
1222 | |||||
1223 | ttm_resource_free(bo, &bo->mem); | ||||
1224 | bo->mem = tmp; | ||||
1225 | } | ||||
1226 | |||||
1227 | return 0; | ||||
1228 | } | ||||
1229 | |||||
1230 | /** | ||||
1231 | * amdgpu_ttm_recover_gart - Rebind GTT pages | ||||
1232 | * | ||||
1233 | * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to | ||||
1234 | * rebind GTT pages during a GPU reset. | ||||
1235 | */ | ||||
1236 | int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) | ||||
1237 | { | ||||
1238 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); | ||||
1239 | uint64_t flags; | ||||
1240 | int r; | ||||
1241 | |||||
1242 | if (!tbo->ttm) | ||||
1243 | return 0; | ||||
1244 | |||||
1245 | flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); | ||||
1246 | r = amdgpu_ttm_gart_bind(adev, tbo, flags); | ||||
1247 | |||||
1248 | return r; | ||||
1249 | } | ||||
1250 | |||||
1251 | /** | ||||
1252 | * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages | ||||
1253 | * | ||||
1254 | * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and | ||||
1255 | * ttm_tt_destroy(). | ||||
1256 | */ | ||||
1257 | static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, | ||||
1258 | struct ttm_tt *ttm) | ||||
1259 | { | ||||
1260 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
1261 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1262 | int r; | ||||
1263 | |||||
1264 | /* if the pages have userptr pinning then clear that first */ | ||||
1265 | if (gtt->userptr) | ||||
1266 | amdgpu_ttm_tt_unpin_userptr(bdev, ttm); | ||||
1267 | |||||
1268 | if (!gtt->bound) | ||||
1269 | return; | ||||
1270 | |||||
1271 | if (gtt->offset == AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL) | ||||
1272 | return; | ||||
1273 | |||||
1274 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | ||||
1275 | r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); | ||||
1276 | if (r) | ||||
1277 | DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",__drm_err("failed to unbind %lu pages at 0x%08llX\n", gtt-> ttm.ttm.num_pages, gtt->offset) | ||||
1278 | gtt->ttm.ttm.num_pages, gtt->offset)__drm_err("failed to unbind %lu pages at 0x%08llX\n", gtt-> ttm.ttm.num_pages, gtt->offset); | ||||
1279 | gtt->bound = false0; | ||||
1280 | } | ||||
1281 | |||||
1282 | static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, | ||||
1283 | struct ttm_tt *ttm) | ||||
1284 | { | ||||
1285 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1286 | |||||
1287 | amdgpu_ttm_backend_unbind(bdev, ttm); | ||||
1288 | ttm_tt_destroy_common(bdev, ttm); | ||||
1289 | #ifdef notyet | ||||
1290 | if (gtt->usertask) | ||||
1291 | put_task_struct(gtt->usertask); | ||||
1292 | #endif | ||||
1293 | |||||
1294 | ttm_dma_tt_fini(>t->ttm); | ||||
1295 | kfree(gtt); | ||||
1296 | } | ||||
1297 | |||||
1298 | /** | ||||
1299 | * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO | ||||
1300 | * | ||||
1301 | * @bo: The buffer object to create a GTT ttm_tt object around | ||||
1302 | * | ||||
1303 | * Called by ttm_tt_create(). | ||||
1304 | */ | ||||
1305 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, | ||||
1306 | uint32_t page_flags) | ||||
1307 | { | ||||
1308 | struct amdgpu_ttm_tt *gtt; | ||||
1309 | |||||
1310 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL(0x0001 | 0x0004)); | ||||
1311 | if (gtt == NULL((void *)0)) { | ||||
1312 | return NULL((void *)0); | ||||
1313 | } | ||||
1314 | gtt->gobj = &bo->base; | ||||
1315 | |||||
1316 | /* allocate space for the uninitialized page entries */ | ||||
1317 | if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { | ||||
1318 | kfree(gtt); | ||||
1319 | return NULL((void *)0); | ||||
1320 | } | ||||
1321 | return >t->ttm.ttm; | ||||
1322 | } | ||||
1323 | |||||
1324 | /** | ||||
1325 | * amdgpu_ttm_tt_populate - Map GTT pages visible to the device | ||||
1326 | * | ||||
1327 | * Map the pages of a ttm_tt object to an address space visible | ||||
1328 | * to the underlying device. | ||||
1329 | */ | ||||
1330 | static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, | ||||
1331 | struct ttm_tt *ttm, | ||||
1332 | struct ttm_operation_ctx *ctx) | ||||
1333 | { | ||||
1334 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); | ||||
1335 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1336 | |||||
1337 | /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ | ||||
1338 | if (gtt && gtt->userptr) { | ||||
1339 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL(0x0001 | 0x0004)); | ||||
1340 | if (!ttm->sg) | ||||
1341 | return -ENOMEM12; | ||||
1342 | |||||
1343 | ttm->page_flags |= TTM_PAGE_FLAG_SG(1 << 8); | ||||
1344 | ttm_tt_set_populated(ttm); | ||||
1345 | return 0; | ||||
1346 | } | ||||
1347 | |||||
1348 | if (ttm->page_flags & TTM_PAGE_FLAG_SG(1 << 8)) { | ||||
1349 | if (!ttm->sg) { | ||||
1350 | struct dma_buf_attachment *attach; | ||||
1351 | struct sg_table *sgt; | ||||
1352 | |||||
1353 | attach = gtt->gobj->import_attach; | ||||
1354 | #ifdef notyet | ||||
1355 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); | ||||
1356 | if (IS_ERR(sgt)) | ||||
1357 | return PTR_ERR(sgt); | ||||
1358 | #else | ||||
1359 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
1360 | return -ENOSYS78; | ||||
1361 | #endif | ||||
1362 | |||||
1363 | ttm->sg = sgt; | ||||
1364 | } | ||||
1365 | |||||
1366 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||||
1367 | gtt->ttm.dma_address, | ||||
1368 | ttm->num_pages); | ||||
1369 | ttm_tt_set_populated(ttm); | ||||
1370 | return 0; | ||||
1371 | } | ||||
1372 | |||||
1373 | #ifdef CONFIG_SWIOTLB | ||||
1374 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { | ||||
1375 | return ttm_dma_populate(>t->ttm, adev->dev, ctx); | ||||
1376 | } | ||||
1377 | #endif | ||||
1378 | |||||
1379 | /* fall back to generic helper to populate the page array | ||||
1380 | * and map them to the device */ | ||||
1381 | return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); | ||||
1382 | } | ||||
1383 | |||||
1384 | /** | ||||
1385 | * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays | ||||
1386 | * | ||||
1387 | * Unmaps pages of a ttm_tt object from the device address space and | ||||
1388 | * unpopulates the page array backing it. | ||||
1389 | */ | ||||
1390 | static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) | ||||
1391 | { | ||||
1392 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1393 | struct amdgpu_device *adev; | ||||
1394 | |||||
1395 | if (gtt && gtt->userptr) { | ||||
1396 | amdgpu_ttm_tt_set_user_pages(ttm, NULL((void *)0)); | ||||
1397 | kfree(ttm->sg); | ||||
1398 | ttm->sg = NULL((void *)0); | ||||
1399 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG(1 << 8); | ||||
1400 | return; | ||||
1401 | } | ||||
1402 | |||||
1403 | if (ttm->sg && gtt->gobj->import_attach) { | ||||
1404 | struct dma_buf_attachment *attach; | ||||
1405 | |||||
1406 | attach = gtt->gobj->import_attach; | ||||
1407 | #ifdef notyet | ||||
1408 | dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); | ||||
1409 | #else | ||||
1410 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
1411 | #endif | ||||
1412 | ttm->sg = NULL((void *)0); | ||||
1413 | return; | ||||
1414 | } | ||||
1415 | |||||
1416 | if (ttm->page_flags & TTM_PAGE_FLAG_SG(1 << 8)) | ||||
1417 | return; | ||||
1418 | |||||
1419 | adev = amdgpu_ttm_adev(bdev); | ||||
1420 | |||||
1421 | #ifdef CONFIG_SWIOTLB | ||||
1422 | if (adev->need_swiotlb && swiotlb_nr_tbl()) { | ||||
1423 | ttm_dma_unpopulate(>t->ttm, adev->dev); | ||||
1424 | return; | ||||
1425 | } | ||||
1426 | #endif | ||||
1427 | |||||
1428 | /* fall back to generic helper to unmap and unpopulate array */ | ||||
1429 | ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); | ||||
1430 | } | ||||
1431 | |||||
1432 | /** | ||||
1433 | * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current | ||||
1434 | * task | ||||
1435 | * | ||||
1436 | * @bo: The ttm_buffer_object to bind this userptr to | ||||
1437 | * @addr: The address in the current tasks VM space to use | ||||
1438 | * @flags: Requirements of userptr object. | ||||
1439 | * | ||||
1440 | * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages | ||||
1441 | * to current task | ||||
1442 | */ | ||||
1443 | int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, | ||||
1444 | uint64_t addr, uint32_t flags) | ||||
1445 | { | ||||
1446 | struct amdgpu_ttm_tt *gtt; | ||||
1447 | |||||
1448 | if (!bo->ttm) { | ||||
1449 | /* TODO: We want a separate TTM object type for userptrs */ | ||||
1450 | bo->ttm = amdgpu_ttm_tt_create(bo, 0); | ||||
1451 | if (bo->ttm == NULL((void *)0)) | ||||
1452 | return -ENOMEM12; | ||||
1453 | } | ||||
1454 | |||||
1455 | gtt = (void*)bo->ttm; | ||||
1456 | gtt->userptr = addr; | ||||
1457 | gtt->userflags = flags; | ||||
1458 | |||||
1459 | #ifdef notyet | ||||
1460 | if (gtt->usertask) | ||||
1461 | put_task_struct(gtt->usertask); | ||||
1462 | gtt->usertask = current->group_leader; | ||||
1463 | get_task_struct(gtt->usertask); | ||||
1464 | #endif | ||||
1465 | |||||
1466 | return 0; | ||||
1467 | } | ||||
1468 | |||||
1469 | /** | ||||
1470 | * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object | ||||
1471 | */ | ||||
1472 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) | ||||
1473 | { | ||||
1474 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1475 | |||||
1476 | if (gtt == NULL((void *)0)) | ||||
1477 | return NULL((void *)0); | ||||
1478 | |||||
1479 | if (gtt->usertask == NULL((void *)0)) | ||||
1480 | return NULL((void *)0); | ||||
1481 | |||||
1482 | #ifdef notyet | ||||
1483 | return gtt->usertask->mm; | ||||
1484 | #else | ||||
1485 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
1486 | return NULL((void *)0); | ||||
1487 | #endif | ||||
1488 | } | ||||
1489 | |||||
1490 | /** | ||||
1491 | * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an | ||||
1492 | * address range for the current task. | ||||
1493 | * | ||||
1494 | */ | ||||
1495 | bool_Bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, | ||||
1496 | unsigned long end) | ||||
1497 | { | ||||
1498 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1499 | unsigned long size; | ||||
1500 | |||||
1501 | if (gtt == NULL((void *)0) || !gtt->userptr) | ||||
1502 | return false0; | ||||
1503 | |||||
1504 | /* Return false if no part of the ttm_tt object lies within | ||||
1505 | * the range | ||||
1506 | */ | ||||
1507 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE(1 << 12); | ||||
1508 | if (gtt->userptr > end || gtt->userptr + size <= start) | ||||
1509 | return false0; | ||||
1510 | |||||
1511 | return true1; | ||||
1512 | } | ||||
1513 | |||||
1514 | /** | ||||
1515 | * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? | ||||
1516 | */ | ||||
1517 | bool_Bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) | ||||
1518 | { | ||||
1519 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1520 | |||||
1521 | if (gtt == NULL((void *)0) || !gtt->userptr) | ||||
1522 | return false0; | ||||
1523 | |||||
1524 | return true1; | ||||
1525 | } | ||||
1526 | |||||
1527 | /** | ||||
1528 | * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? | ||||
1529 | */ | ||||
1530 | bool_Bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) | ||||
1531 | { | ||||
1532 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||||
1533 | |||||
1534 | if (gtt == NULL((void *)0)) | ||||
1535 | return false0; | ||||
1536 | |||||
1537 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY(1 << 0)); | ||||
1538 | } | ||||
1539 | |||||
1540 | /** | ||||
1541 | * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object | ||||
1542 | * | ||||
1543 | * @ttm: The ttm_tt object to compute the flags for | ||||
1544 | * @mem: The memory registry backing this ttm_tt object | ||||
1545 | * | ||||
1546 | * Figure out the flags to use for a VM PDE (Page Directory Entry). | ||||
1547 | */ | ||||
1548 | uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) | ||||
1549 | { | ||||
1550 | uint64_t flags = 0; | ||||
1551 | |||||
1552 | if (mem && mem->mem_type != TTM_PL_SYSTEM0) | ||||
1553 | flags |= AMDGPU_PTE_VALID(1ULL << 0); | ||||
1554 | |||||
1555 | if (mem && mem->mem_type == TTM_PL_TT1) { | ||||
1556 | flags |= AMDGPU_PTE_SYSTEM(1ULL << 1); | ||||
1557 | |||||
1558 | if (ttm->caching_state == tt_cached) | ||||
1559 | flags |= AMDGPU_PTE_SNOOPED(1ULL << 2); | ||||
1560 | } | ||||
1561 | |||||
1562 | return flags; | ||||
1563 | } | ||||
1564 | |||||
1565 | /** | ||||
1566 | * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object | ||||
1567 | * | ||||
1568 | * @ttm: The ttm_tt object to compute the flags for | ||||
1569 | * @mem: The memory registry backing this ttm_tt object | ||||
1570 | |||||
1571 | * Figure out the flags to use for a VM PTE (Page Table Entry). | ||||
1572 | */ | ||||
1573 | uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | ||||
1574 | struct ttm_resource *mem) | ||||
1575 | { | ||||
1576 | uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); | ||||
1577 | |||||
1578 | flags |= adev->gart.gart_pte_flags; | ||||
1579 | flags |= AMDGPU_PTE_READABLE(1ULL << 5); | ||||
1580 | |||||
1581 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | ||||
1582 | flags |= AMDGPU_PTE_WRITEABLE(1ULL << 6); | ||||
1583 | |||||
1584 | return flags; | ||||
1585 | } | ||||
1586 | |||||
1587 | /** | ||||
1588 | * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer | ||||
1589 | * object. | ||||
1590 | * | ||||
1591 | * Return true if eviction is sensible. Called by ttm_mem_evict_first() on | ||||
1592 | * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until | ||||
1593 | * it can find space for a new object and by ttm_bo_force_list_clean() which is | ||||
1594 | * used to clean out a memory space. | ||||
1595 | */ | ||||
1596 | static bool_Bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||||
1597 | const struct ttm_place *place) | ||||
1598 | { | ||||
1599 | unsigned long num_pages = bo->mem.num_pages; | ||||
1600 | struct drm_mm_node *node = bo->mem.mm_node; | ||||
1601 | struct dma_resv_list *flist; | ||||
1602 | struct dma_fence *f; | ||||
1603 | int i; | ||||
1604 | |||||
1605 | if (bo->type == ttm_bo_type_kernel && | ||||
1606 | !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) | ||||
1607 | return false0; | ||||
1608 | |||||
1609 | /* If bo is a KFD BO, check if the bo belongs to the current process. | ||||
1610 | * If true, then return false as any KFD process needs all its BOs to | ||||
1611 | * be resident to run successfully | ||||
1612 | */ | ||||
1613 | flist = dma_resv_get_list(bo->base.resv); | ||||
1614 | if (flist) { | ||||
1615 | for (i = 0; i < flist->shared_count; ++i) { | ||||
1616 | f = rcu_dereference_protected(flist->shared[i],(flist->shared[i]) | ||||
1617 | dma_resv_held(bo->base.resv))(flist->shared[i]); | ||||
1618 | #ifdef notyet | ||||
1619 | if (amdkfd_fence_check_mm(f, current->mm)) | ||||
1620 | return false0; | ||||
1621 | #endif | ||||
1622 | } | ||||
1623 | } | ||||
1624 | |||||
1625 | switch (bo->mem.mem_type) { | ||||
1626 | case TTM_PL_TT1: | ||||
1627 | if (amdgpu_bo_is_amdgpu_bo(bo) && | ||||
1628 | amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) | ||||
1629 | return false0; | ||||
1630 | return true1; | ||||
1631 | |||||
1632 | case TTM_PL_VRAM2: | ||||
1633 | /* Check each drm MM node individually */ | ||||
1634 | while (num_pages) { | ||||
1635 | if (place->fpfn < (node->start + node->size) && | ||||
1636 | !(place->lpfn && place->lpfn <= node->start)) | ||||
1637 | return true1; | ||||
1638 | |||||
1639 | num_pages -= node->size; | ||||
1640 | ++node; | ||||
1641 | } | ||||
1642 | return false0; | ||||
1643 | |||||
1644 | default: | ||||
1645 | break; | ||||
1646 | } | ||||
1647 | |||||
1648 | return ttm_bo_eviction_valuable(bo, place); | ||||
1649 | } | ||||
1650 | |||||
1651 | /** | ||||
1652 | * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. | ||||
1653 | * | ||||
1654 | * @bo: The buffer object to read/write | ||||
1655 | * @offset: Offset into buffer object | ||||
1656 | * @buf: Secondary buffer to write/read from | ||||
1657 | * @len: Length in bytes of access | ||||
1658 | * @write: true if writing | ||||
1659 | * | ||||
1660 | * This is used to access VRAM that backs a buffer object via MMIO | ||||
1661 | * access for debugging purposes. | ||||
1662 | */ | ||||
1663 | static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, | ||||
1664 | unsigned long offset, | ||||
1665 | void *buf, int len, int write) | ||||
1666 | { | ||||
1667 | struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); | ||||
1668 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); | ||||
1669 | struct drm_mm_node *nodes; | ||||
1670 | uint32_t value = 0; | ||||
1671 | int ret = 0; | ||||
1672 | uint64_t pos; | ||||
1673 | unsigned long flags; | ||||
1674 | |||||
1675 | if (bo->mem.mem_type != TTM_PL_VRAM2) | ||||
1676 | return -EIO5; | ||||
1677 | |||||
1678 | pos = offset; | ||||
1679 | nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos); | ||||
1680 | pos += (nodes->start << PAGE_SHIFT12); | ||||
1681 | |||||
1682 | while (len && pos < adev->gmc.mc_vram_size) { | ||||
1683 | uint64_t aligned_pos = pos & ~(uint64_t)3; | ||||
1684 | uint64_t bytes = 4 - (pos & 3); | ||||
1685 | uint32_t shift = (pos & 3) * 8; | ||||
1686 | uint32_t mask = 0xffffffff << shift; | ||||
1687 | |||||
1688 | if (len < bytes) { | ||||
1689 | mask &= 0xffffffff >> (bytes - len) * 8; | ||||
1690 | bytes = len; | ||||
1691 | } | ||||
1692 | |||||
1693 | if (mask != 0xffffffff) { | ||||
1694 | spin_lock_irqsave(&adev->mmio_idx_lock, flags)do { flags = 0; mtx_enter(&adev->mmio_idx_lock); } while (0); | ||||
1695 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000)amdgpu_device_wreg(adev, (0x0), (((uint32_t)aligned_pos) | 0x80000000 ), (1<<1)); | ||||
1696 | WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31)amdgpu_device_wreg(adev, (0x6), (aligned_pos >> 31), (1 <<1)); | ||||
1697 | if (!write || mask != 0xffffffff) | ||||
1698 | value = RREG32_NO_KIQ(mmMM_DATA)amdgpu_device_rreg(adev, (0x1), (1<<1)); | ||||
1699 | if (write) { | ||||
1700 | value &= ~mask; | ||||
1701 | value |= (*(uint32_t *)buf << shift) & mask; | ||||
1702 | WREG32_NO_KIQ(mmMM_DATA, value)amdgpu_device_wreg(adev, (0x1), (value), (1<<1)); | ||||
1703 | } | ||||
1704 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->mmio_idx_lock); } while (0); | ||||
1705 | if (!write) { | ||||
1706 | value = (value & mask) >> shift; | ||||
1707 | memcpy(buf, &value, bytes)__builtin_memcpy((buf), (&value), (bytes)); | ||||
1708 | } | ||||
1709 | } else { | ||||
1710 | bytes = (nodes->start + nodes->size) << PAGE_SHIFT12; | ||||
1711 | bytes = min(bytes - pos, (uint64_t)len & ~0x3ull)(((bytes - pos)<((uint64_t)len & ~0x3ull))?(bytes - pos ):((uint64_t)len & ~0x3ull)); | ||||
1712 | |||||
1713 | amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, | ||||
1714 | bytes, write); | ||||
1715 | } | ||||
1716 | |||||
1717 | ret += bytes; | ||||
1718 | buf = (uint8_t *)buf + bytes; | ||||
1719 | pos += bytes; | ||||
1720 | len -= bytes; | ||||
1721 | if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT12) { | ||||
1722 | ++nodes; | ||||
1723 | pos = (nodes->start << PAGE_SHIFT12); | ||||
1724 | } | ||||
1725 | } | ||||
1726 | |||||
1727 | return ret; | ||||
1728 | } | ||||
1729 | |||||
1730 | static struct ttm_bo_driver amdgpu_bo_driver = { | ||||
1731 | .ttm_tt_create = &amdgpu_ttm_tt_create, | ||||
1732 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | ||||
1733 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | ||||
1734 | .ttm_tt_bind = &amdgpu_ttm_backend_bind, | ||||
1735 | .ttm_tt_unbind = &amdgpu_ttm_backend_unbind, | ||||
1736 | .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, | ||||
1737 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, | ||||
1738 | .evict_flags = &amdgpu_evict_flags, | ||||
1739 | .move = &amdgpu_bo_move, | ||||
1740 | .verify_access = &amdgpu_verify_access, | ||||
1741 | .move_notify = &amdgpu_bo_move_notify, | ||||
1742 | .release_notify = &amdgpu_bo_release_notify, | ||||
1743 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | ||||
1744 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | ||||
1745 | .io_mem_pfn = amdgpu_ttm_io_mem_pfn, | ||||
1746 | .access_memory = &amdgpu_ttm_access_memory, | ||||
1747 | .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify | ||||
1748 | }; | ||||
1749 | |||||
1750 | /* | ||||
1751 | * Firmware Reservation functions | ||||
1752 | */ | ||||
1753 | /** | ||||
1754 | * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram | ||||
1755 | * | ||||
1756 | * @adev: amdgpu_device pointer | ||||
1757 | * | ||||
1758 | * free fw reserved vram if it has been reserved. | ||||
1759 | */ | ||||
1760 | static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) | ||||
1761 | { | ||||
1762 | amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, | ||||
1763 | NULL((void *)0), &adev->mman.fw_vram_usage_va); | ||||
1764 | } | ||||
1765 | |||||
1766 | /** | ||||
1767 | * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw | ||||
1768 | * | ||||
1769 | * @adev: amdgpu_device pointer | ||||
1770 | * | ||||
1771 | * create bo vram reservation from fw. | ||||
1772 | */ | ||||
1773 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) | ||||
1774 | { | ||||
1775 | uint64_t vram_size = adev->gmc.visible_vram_size; | ||||
1776 | |||||
1777 | adev->mman.fw_vram_usage_va = NULL((void *)0); | ||||
1778 | adev->mman.fw_vram_usage_reserved_bo = NULL((void *)0); | ||||
1779 | |||||
1780 | if (adev->mman.fw_vram_usage_size == 0 || | ||||
1781 | adev->mman.fw_vram_usage_size > vram_size) | ||||
1782 | return 0; | ||||
1783 | |||||
1784 | return amdgpu_bo_create_kernel_at(adev, | ||||
1785 | adev->mman.fw_vram_usage_start_offset, | ||||
1786 | adev->mman.fw_vram_usage_size, | ||||
1787 | AMDGPU_GEM_DOMAIN_VRAM0x4, | ||||
1788 | &adev->mman.fw_vram_usage_reserved_bo, | ||||
1789 | &adev->mman.fw_vram_usage_va); | ||||
1790 | } | ||||
1791 | |||||
1792 | /* | ||||
1793 | * Memoy training reservation functions | ||||
1794 | */ | ||||
1795 | |||||
1796 | /** | ||||
1797 | * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram | ||||
1798 | * | ||||
1799 | * @adev: amdgpu_device pointer | ||||
1800 | * | ||||
1801 | * free memory training reserved vram if it has been reserved. | ||||
1802 | */ | ||||
1803 | static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) | ||||
1804 | { | ||||
1805 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; | ||||
1806 | |||||
1807 | ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; | ||||
1808 | amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL((void *)0), NULL((void *)0)); | ||||
1809 | ctx->c2p_bo = NULL((void *)0); | ||||
1810 | |||||
1811 | return 0; | ||||
1812 | } | ||||
1813 | |||||
1814 | static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) | ||||
1815 | { | ||||
1816 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; | ||||
1817 | |||||
1818 | memset(ctx, 0, sizeof(*ctx))__builtin_memset((ctx), (0), (sizeof(*ctx))); | ||||
1819 | |||||
1820 | ctx->c2p_train_data_offset = | ||||
1821 | roundup2((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M)((((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - (1024 * 1024 * 1))) + (((1024 * 1024 * 1)) - 1)) & (~( (__typeof((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - (1024 * 1024 * 1))))((1024 * 1024 * 1)) - 1))); | ||||
1822 | ctx->p2c_train_data_offset = | ||||
1823 | (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET0x8000); | ||||
1824 | ctx->train_data_size = | ||||
1825 | GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES0x1000; | ||||
1826 | |||||
1827 | DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",__drm_dbg(DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) | ||||
1828 | ctx->train_data_size,__drm_dbg(DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) | ||||
1829 | ctx->p2c_train_data_offset,__drm_dbg(DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset) | ||||
1830 | ctx->c2p_train_data_offset)__drm_dbg(DRM_UT_CORE, "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n" , ctx->train_data_size, ctx->p2c_train_data_offset, ctx ->c2p_train_data_offset); | ||||
1831 | } | ||||
1832 | |||||
1833 | /* | ||||
1834 | * reserve TMR memory at the top of VRAM which holds | ||||
1835 | * IP Discovery data and is protected by PSP. | ||||
1836 | */ | ||||
1837 | static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) | ||||
1838 | { | ||||
1839 | int ret; | ||||
1840 | struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; | ||||
1841 | bool_Bool mem_train_support = false0; | ||||
1842 | |||||
1843 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | ||||
1844 | ret = amdgpu_mem_train_support(adev); | ||||
1845 | if (ret == 1) | ||||
1846 | mem_train_support = true1; | ||||
1847 | else if (ret == -1) | ||||
1848 | return -EINVAL22; | ||||
1849 | else | ||||
1850 | DRM_DEBUG("memory training does not support!\n")__drm_dbg(DRM_UT_CORE, "memory training does not support!\n"); | ||||
1851 | } | ||||
1852 | |||||
1853 | /* | ||||
1854 | * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all | ||||
1855 | * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) | ||||
1856 | * | ||||
1857 | * Otherwise, fallback to legacy approach to check and reserve tmr block for ip | ||||
1858 | * discovery data and G6 memory training data respectively | ||||
1859 | */ | ||||
1860 | adev->mman.discovery_tmr_size = | ||||
1861 | amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); | ||||
1862 | if (!adev->mman.discovery_tmr_size) | ||||
1863 | adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET(64 << 10); | ||||
1864 | |||||
1865 | if (mem_train_support) { | ||||
1866 | /* reserve vram for mem train according to TMR location */ | ||||
1867 | amdgpu_ttm_training_data_block_init(adev); | ||||
1868 | ret = amdgpu_bo_create_kernel_at(adev, | ||||
1869 | ctx->c2p_train_data_offset, | ||||
1870 | ctx->train_data_size, | ||||
1871 | AMDGPU_GEM_DOMAIN_VRAM0x4, | ||||
1872 | &ctx->c2p_bo, | ||||
1873 | NULL((void *)0)); | ||||
1874 | if (ret) { | ||||
1875 | DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret)__drm_err("alloc c2p_bo failed(%d)!\n", ret); | ||||
1876 | amdgpu_ttm_training_reserve_vram_fini(adev); | ||||
1877 | return ret; | ||||
1878 | } | ||||
1879 | ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; | ||||
1880 | } | ||||
1881 | |||||
1882 | ret = amdgpu_bo_create_kernel_at(adev, | ||||
1883 | adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, | ||||
1884 | adev->mman.discovery_tmr_size, | ||||
1885 | AMDGPU_GEM_DOMAIN_VRAM0x4, | ||||
1886 | &adev->mman.discovery_memory, | ||||
1887 | NULL((void *)0)); | ||||
1888 | if (ret) { | ||||
1889 | DRM_ERROR("alloc tmr failed(%d)!\n", ret)__drm_err("alloc tmr failed(%d)!\n", ret); | ||||
1890 | amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL((void *)0), NULL((void *)0)); | ||||
1891 | return ret; | ||||
1892 | } | ||||
1893 | |||||
1894 | return 0; | ||||
1895 | } | ||||
1896 | |||||
1897 | /** | ||||
1898 | * amdgpu_ttm_init - Init the memory management (ttm) as well as various | ||||
1899 | * gtt/vram related fields. | ||||
1900 | * | ||||
1901 | * This initializes all of the memory space pools that the TTM layer | ||||
1902 | * will need such as the GTT space (system memory mapped to the device), | ||||
1903 | * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which | ||||
1904 | * can be mapped per VMID. | ||||
1905 | */ | ||||
1906 | int amdgpu_ttm_init(struct amdgpu_device *adev) | ||||
1907 | { | ||||
1908 | uint64_t gtt_size; | ||||
1909 | int r; | ||||
1910 | u64 vis_vram_limit; | ||||
1911 | |||||
1912 | rw_init(&adev->mman.gtt_window_lock, "gttwin")_rw_init_flags(&adev->mman.gtt_window_lock, "gttwin", 0 , ((void *)0)); | ||||
1913 | |||||
1914 | /* No others user of address space so set it to 0 */ | ||||
1915 | #ifdef notyet | ||||
1916 | r = ttm_bo_device_init(&adev->mman.bdev, | ||||
1917 | &amdgpu_bo_driver, | ||||
1918 | adev_to_drm(adev)->anon_inode->i_mapping, | ||||
1919 | adev_to_drm(adev)->vma_offset_manager, | ||||
1920 | dma_addressing_limited(adev->dev)0); | ||||
1921 | #else | ||||
1922 | r = ttm_bo_device_init(&adev->mman.bdev, | ||||
1923 | &amdgpu_bo_driver, | ||||
1924 | /*adev_to_drm(adev)->anon_inode->i_mapping*/NULL((void *)0), | ||||
1925 | adev_to_drm(adev)->vma_offset_manager, | ||||
1926 | dma_addressing_limited(adev->dev)0); | ||||
1927 | #endif | ||||
1928 | if (r) { | ||||
1929 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r)__drm_err("failed initializing buffer object driver(%d).\n", r ); | ||||
1930 | return r; | ||||
1931 | } | ||||
1932 | adev->mman.bdev.iot = adev->iot; | ||||
1933 | adev->mman.bdev.memt = adev->memt; | ||||
1934 | adev->mman.bdev.dmat = adev->dmat; | ||||
1935 | adev->mman.initialized = true1; | ||||
1936 | |||||
1937 | /* We opt to avoid OOM on system pages allocations */ | ||||
1938 | adev->mman.bdev.no_retry = true1; | ||||
1939 | |||||
1940 | /* Initialize VRAM pool with all of VRAM divided into pages */ | ||||
1941 | r = amdgpu_vram_mgr_init(adev); | ||||
1942 | if (r) { | ||||
1943 | DRM_ERROR("Failed initializing VRAM heap.\n")__drm_err("Failed initializing VRAM heap.\n"); | ||||
1944 | return r; | ||||
1945 | } | ||||
1946 | |||||
1947 | /* Reduce size of CPU-visible VRAM if requested */ | ||||
1948 | vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; | ||||
1949 | if (amdgpu_vis_vram_limit > 0 && | ||||
1950 | vis_vram_limit <= adev->gmc.visible_vram_size) | ||||
1951 | adev->gmc.visible_vram_size = vis_vram_limit; | ||||
1952 | |||||
1953 | /* Change the size here instead of the init above so only lpfn is affected */ | ||||
1954 | amdgpu_ttm_set_buffer_funcs_status(adev, false0); | ||||
1955 | #ifdef CONFIG_64BIT1 | ||||
1956 | #ifdef __linux__ | ||||
1957 | adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, | ||||
1958 | adev->gmc.visible_vram_size); | ||||
1959 | #else | ||||
1960 | if (bus_space_map(adev->memt, adev->gmc.aper_base, | ||||
1961 | adev->gmc.visible_vram_size, | ||||
1962 | BUS_SPACE_MAP_LINEAR0x0002 | BUS_SPACE_MAP_PREFETCHABLE0x0008, | ||||
1963 | &adev->mman.aper_bsh)) { | ||||
1964 | adev->mman.aper_base_kaddr = NULL((void *)0); | ||||
1965 | } else { | ||||
1966 | adev->mman.aper_base_kaddr = bus_space_vaddr(adev->memt,((adev->memt)->vaddr((adev->mman.aper_bsh))) | ||||
1967 | adev->mman.aper_bsh)((adev->memt)->vaddr((adev->mman.aper_bsh))); | ||||
1968 | } | ||||
1969 | #endif | ||||
1970 | #endif | ||||
1971 | |||||
1972 | /* | ||||
1973 | *The reserved vram for firmware must be pinned to the specified | ||||
1974 | *place on the VRAM, so reserve it early. | ||||
1975 | */ | ||||
1976 | r = amdgpu_ttm_fw_reserve_vram_init(adev); | ||||
1977 | if (r) { | ||||
1978 | return r; | ||||
1979 | } | ||||
1980 | |||||
1981 | /* | ||||
1982 | * only NAVI10 and onwards ASIC support for IP discovery. | ||||
1983 | * If IP discovery enabled, a block of memory should be | ||||
1984 | * reserved for IP discovey. | ||||
1985 | */ | ||||
1986 | if (adev->mman.discovery_bin) { | ||||
1987 | r = amdgpu_ttm_reserve_tmr(adev); | ||||
1988 | if (r) | ||||
1989 | return r; | ||||
1990 | } | ||||
1991 | |||||
1992 | /* allocate memory as required for VGA | ||||
1993 | * This is used for VGA emulation and pre-OS scanout buffers to | ||||
1994 | * avoid display artifacts while transitioning between pre-OS | ||||
1995 | * and driver. */ | ||||
1996 | r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, | ||||
1997 | AMDGPU_GEM_DOMAIN_VRAM0x4, | ||||
1998 | &adev->mman.stolen_vga_memory, | ||||
1999 | NULL((void *)0)); | ||||
2000 | if (r) | ||||
2001 | return r; | ||||
2002 | r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, | ||||
2003 | adev->mman.stolen_extended_size, | ||||
2004 | AMDGPU_GEM_DOMAIN_VRAM0x4, | ||||
2005 | &adev->mman.stolen_extended_memory, | ||||
2006 | NULL((void *)0)); | ||||
2007 | if (r) | ||||
2008 | return r; | ||||
2009 | |||||
2010 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n",printk("\0016" "[" "drm" "] " "amdgpu: %uM of VRAM memory ready\n" , (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))) | ||||
2011 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)))printk("\0016" "[" "drm" "] " "amdgpu: %uM of VRAM memory ready\n" , (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); | ||||
2012 | |||||
2013 | /* Compute GTT size, either bsaed on 3/4th the size of RAM size | ||||
2014 | * or whatever the user passed on module init */ | ||||
2015 | if (amdgpu_gtt_size == -1) { | ||||
2016 | #ifdef __linux__ | ||||
2017 | struct sysinfo si; | ||||
2018 | |||||
2019 | si_meminfo(&si); | ||||
2020 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )si.totalram * si.mem_unit * 3/4)))?(((((3072ULL << 20) )>(adev->gmc.mc_vram_size))?((3072ULL << 20)):(adev ->gmc.mc_vram_size))):(((uint64_t)si.totalram * si.mem_unit * 3/4))) | ||||
2021 | adev->gmc.mc_vram_size),(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )si.totalram * si.mem_unit * 3/4)))?(((((3072ULL << 20) )>(adev->gmc.mc_vram_size))?((3072ULL << 20)):(adev ->gmc.mc_vram_size))):(((uint64_t)si.totalram * si.mem_unit * 3/4))) | ||||
2022 | ((uint64_t)si.totalram * si.mem_unit * 3/4))(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )si.totalram * si.mem_unit * 3/4)))?(((((3072ULL << 20) )>(adev->gmc.mc_vram_size))?((3072ULL << 20)):(adev ->gmc.mc_vram_size))):(((uint64_t)si.totalram * si.mem_unit * 3/4))); | ||||
2023 | #else | ||||
2024 | gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )((paddr_t)(physmem) << 12) * 3/4)))?(((((3072ULL << 20))>(adev->gmc.mc_vram_size))?((3072ULL << 20)) :(adev->gmc.mc_vram_size))):(((uint64_t)((paddr_t)(physmem ) << 12) * 3/4))) | ||||
2025 | adev->gmc.mc_vram_size),(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )((paddr_t)(physmem) << 12) * 3/4)))?(((((3072ULL << 20))>(adev->gmc.mc_vram_size))?((3072ULL << 20)) :(adev->gmc.mc_vram_size))):(((uint64_t)((paddr_t)(physmem ) << 12) * 3/4))) | ||||
2026 | ((uint64_t)ptoa(physmem) * 3/4))(((((((3072ULL << 20))>(adev->gmc.mc_vram_size))? ((3072ULL << 20)):(adev->gmc.mc_vram_size)))<(((uint64_t )((paddr_t)(physmem) << 12) * 3/4)))?(((((3072ULL << 20))>(adev->gmc.mc_vram_size))?((3072ULL << 20)) :(adev->gmc.mc_vram_size))):(((uint64_t)((paddr_t)(physmem ) << 12) * 3/4))); | ||||
2027 | #endif | ||||
2028 | } | ||||
2029 | else | ||||
2030 | gtt_size = (uint64_t)amdgpu_gtt_size << 20; | ||||
2031 | |||||
2032 | /* Initialize GTT memory pool */ | ||||
2033 | r = amdgpu_gtt_mgr_init(adev, gtt_size); | ||||
2034 | if (r) { | ||||
2035 | DRM_ERROR("Failed initializing GTT heap.\n")__drm_err("Failed initializing GTT heap.\n"); | ||||
2036 | return r; | ||||
2037 | } | ||||
2038 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n",printk("\0016" "[" "drm" "] " "amdgpu: %uM of GTT memory ready.\n" , (unsigned)(gtt_size / (1024 * 1024))) | ||||
2039 | (unsigned)(gtt_size / (1024 * 1024)))printk("\0016" "[" "drm" "] " "amdgpu: %uM of GTT memory ready.\n" , (unsigned)(gtt_size / (1024 * 1024))); | ||||
2040 | |||||
2041 | /* Initialize various on-chip memory pools */ | ||||
2042 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS(3 + 0), adev->gds.gds_size); | ||||
2043 | if (r) { | ||||
2044 | DRM_ERROR("Failed initializing GDS heap.\n")__drm_err("Failed initializing GDS heap.\n"); | ||||
2045 | return r; | ||||
2046 | } | ||||
2047 | |||||
2048 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS(3 + 1), adev->gds.gws_size); | ||||
2049 | if (r) { | ||||
2050 | DRM_ERROR("Failed initializing gws heap.\n")__drm_err("Failed initializing gws heap.\n"); | ||||
2051 | return r; | ||||
2052 | } | ||||
2053 | |||||
2054 | r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA(3 + 2), adev->gds.oa_size); | ||||
2055 | if (r) { | ||||
2056 | DRM_ERROR("Failed initializing oa heap.\n")__drm_err("Failed initializing oa heap.\n"); | ||||
2057 | return r; | ||||
2058 | } | ||||
2059 | |||||
2060 | return 0; | ||||
2061 | } | ||||
2062 | |||||
2063 | /** | ||||
2064 | * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm | ||||
2065 | */ | ||||
2066 | void amdgpu_ttm_late_init(struct amdgpu_device *adev) | ||||
2067 | { | ||||
2068 | /* return the VGA stolen memory (if any) back to VRAM */ | ||||
2069 | if (!adev->mman.keep_stolen_vga_memory) | ||||
2070 | amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL((void *)0), NULL((void *)0)); | ||||
2071 | amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL((void *)0), NULL((void *)0)); | ||||
2072 | } | ||||
2073 | |||||
2074 | /** | ||||
2075 | * amdgpu_ttm_fini - De-initialize the TTM memory pools | ||||
2076 | */ | ||||
2077 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | ||||
2078 | { | ||||
2079 | if (!adev->mman.initialized) | ||||
2080 | return; | ||||
2081 | |||||
2082 | amdgpu_ttm_training_reserve_vram_fini(adev); | ||||
2083 | /* return the stolen vga memory back to VRAM */ | ||||
2084 | if (adev->mman.keep_stolen_vga_memory) | ||||
2085 | amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL((void *)0), NULL((void *)0)); | ||||
2086 | /* return the IP Discovery TMR memory back to VRAM */ | ||||
2087 | amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL((void *)0), NULL((void *)0)); | ||||
2088 | amdgpu_ttm_fw_reserve_vram_fini(adev); | ||||
2089 | |||||
2090 | #ifdef __linux__ | ||||
2091 | if (adev->mman.aper_base_kaddr) | ||||
2092 | iounmap(adev->mman.aper_base_kaddr); | ||||
2093 | #else | ||||
2094 | if (adev->mman.aper_base_kaddr) | ||||
2095 | bus_space_unmap(adev->memt, adev->mman.aper_bsh, | ||||
2096 | adev->gmc.visible_vram_size); | ||||
2097 | #endif | ||||
2098 | adev->mman.aper_base_kaddr = NULL((void *)0); | ||||
2099 | |||||
2100 | amdgpu_vram_mgr_fini(adev); | ||||
2101 | amdgpu_gtt_mgr_fini(adev); | ||||
2102 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS(3 + 0)); | ||||
2103 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS(3 + 1)); | ||||
2104 | ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA(3 + 2)); | ||||
2105 | ttm_bo_device_release(&adev->mman.bdev); | ||||
2106 | adev->mman.initialized = false0; | ||||
2107 | DRM_INFO("amdgpu: ttm finalized\n")printk("\0016" "[" "drm" "] " "amdgpu: ttm finalized\n"); | ||||
2108 | } | ||||
2109 | |||||
2110 | /** | ||||
2111 | * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions | ||||
2112 | * | ||||
2113 | * @adev: amdgpu_device pointer | ||||
2114 | * @enable: true when we can use buffer functions. | ||||
2115 | * | ||||
2116 | * Enable/disable use of buffer functions during suspend/resume. This should | ||||
2117 | * only be called at bootup or when userspace isn't running. | ||||
2118 | */ | ||||
2119 | void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool_Bool enable) | ||||
2120 | { | ||||
2121 | struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM2); | ||||
2122 | uint64_t size; | ||||
2123 | int r; | ||||
2124 | |||||
2125 | if (!adev->mman.initialized || amdgpu_in_reset(adev) || | ||||
2126 | adev->mman.buffer_funcs_enabled == enable) | ||||
2127 | return; | ||||
2128 | |||||
2129 | if (enable) { | ||||
2130 | struct amdgpu_ring *ring; | ||||
2131 | struct drm_gpu_scheduler *sched; | ||||
2132 | |||||
2133 | ring = adev->mman.buffer_funcs_ring; | ||||
2134 | sched = &ring->sched; | ||||
2135 | r = drm_sched_entity_init(&adev->mman.entity, | ||||
2136 | DRM_SCHED_PRIORITY_KERNEL, &sched, | ||||
2137 | 1, NULL((void *)0)); | ||||
2138 | if (r) { | ||||
2139 | DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",__drm_err("Failed setting up TTM BO move entity (%d)\n", r) | ||||
2140 | r)__drm_err("Failed setting up TTM BO move entity (%d)\n", r); | ||||
2141 | return; | ||||
2142 | } | ||||
2143 | } else { | ||||
2144 | drm_sched_entity_destroy(&adev->mman.entity); | ||||
2145 | dma_fence_put(man->move); | ||||
2146 | man->move = NULL((void *)0); | ||||
2147 | } | ||||
2148 | |||||
2149 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||||
2150 | if (enable) | ||||
2151 | size = adev->gmc.real_vram_size; | ||||
2152 | else | ||||
2153 | size = adev->gmc.visible_vram_size; | ||||
2154 | man->size = size >> PAGE_SHIFT12; | ||||
2155 | adev->mman.buffer_funcs_enabled = enable; | ||||
2156 | } | ||||
2157 | |||||
2158 | #ifdef __linux__ | ||||
2159 | |||||
2160 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) | ||||
2161 | { | ||||
2162 | struct drm_file *file_priv = filp->private_data; | ||||
2163 | struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); | ||||
2164 | |||||
2165 | if (adev == NULL((void *)0)) | ||||
2166 | return -EINVAL22; | ||||
2167 | |||||
2168 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | ||||
2169 | } | ||||
2170 | |||||
2171 | #else | ||||
2172 | |||||
2173 | struct uvm_object * | ||||
2174 | amdgpu_mmap(struct file *filp, vm_prot_t accessprot, voff_t off, vsize_t size) | ||||
2175 | { | ||||
2176 | struct drm_file *file_priv = (void *)filp; | ||||
2177 | struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); | ||||
2178 | |||||
2179 | if (adev == NULL((void *)0)) | ||||
2180 | return NULL((void *)0); | ||||
2181 | |||||
2182 | return ttm_bo_mmap(filp, off, size, &adev->mman.bdev); | ||||
2183 | } | ||||
2184 | |||||
2185 | #endif | ||||
2186 | |||||
2187 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, | ||||
2188 | uint64_t dst_offset, uint32_t byte_count, | ||||
2189 | struct dma_resv *resv, | ||||
2190 | struct dma_fence **fence, bool_Bool direct_submit, | ||||
2191 | bool_Bool vm_needs_flush, bool_Bool tmz) | ||||
2192 | { | ||||
2193 | enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : | ||||
2194 | AMDGPU_IB_POOL_DELAYED; | ||||
2195 | struct amdgpu_device *adev = ring->adev; | ||||
2196 | struct amdgpu_job *job; | ||||
2197 | |||||
2198 | uint32_t max_bytes; | ||||
2199 | unsigned num_loops, num_dw; | ||||
2200 | unsigned i; | ||||
2201 | int r; | ||||
2202 | |||||
2203 | if (direct_submit && !ring->sched.ready) { | ||||
2204 | DRM_ERROR("Trying to move memory with ring turned off.\n")__drm_err("Trying to move memory with ring turned off.\n"); | ||||
2205 | return -EINVAL22; | ||||
2206 | } | ||||
2207 | |||||
2208 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; | ||||
2209 | num_loops = DIV_ROUND_UP(byte_count, max_bytes)(((byte_count) + ((max_bytes) - 1)) / (max_bytes)); | ||||
2210 | num_dw = roundup2(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8)(((num_loops * adev->mman.buffer_funcs->copy_num_dw) + ( (8) - 1)) & (~((__typeof(num_loops * adev->mman.buffer_funcs ->copy_num_dw))(8) - 1))); | ||||
2211 | |||||
2212 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job); | ||||
2213 | if (r) | ||||
2214 | return r; | ||||
2215 | |||||
2216 | if (vm_needs_flush) { | ||||
2217 | job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); | ||||
2218 | job->vm_needs_flush = true1; | ||||
2219 | } | ||||
2220 | if (resv) { | ||||
2221 | r = amdgpu_sync_resv(adev, &job->sync, resv, | ||||
2222 | AMDGPU_SYNC_ALWAYS, | ||||
2223 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul)); | ||||
2224 | if (r) { | ||||
2225 | DRM_ERROR("sync failed (%d).\n", r)__drm_err("sync failed (%d).\n", r); | ||||
2226 | goto error_free; | ||||
2227 | } | ||||
2228 | } | ||||
2229 | |||||
2230 | for (i = 0; i < num_loops; i++) { | ||||
2231 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes)(((byte_count)<(max_bytes))?(byte_count):(max_bytes)); | ||||
2232 | |||||
2233 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_offset), (dst_offset), (cur_size_in_bytes), (tmz )) | ||||
2234 | dst_offset, cur_size_in_bytes, tmz)(adev)->mman.buffer_funcs->emit_copy_buffer((&job-> ibs[0]), (src_offset), (dst_offset), (cur_size_in_bytes), (tmz )); | ||||
2235 | |||||
2236 | src_offset += cur_size_in_bytes; | ||||
2237 | dst_offset += cur_size_in_bytes; | ||||
2238 | byte_count -= cur_size_in_bytes; | ||||
2239 | } | ||||
2240 | |||||
2241 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); | ||||
2242 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 2242); __builtin_expect (!!(__ret), 0); }); | ||||
2243 | if (direct_submit) | ||||
2244 | r = amdgpu_job_submit_direct(job, ring, fence); | ||||
2245 | else | ||||
2246 | r = amdgpu_job_submit(job, &adev->mman.entity, | ||||
2247 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), fence); | ||||
2248 | if (r) | ||||
2249 | goto error_free; | ||||
2250 | |||||
2251 | return r; | ||||
2252 | |||||
2253 | error_free: | ||||
2254 | amdgpu_job_free(job); | ||||
2255 | DRM_ERROR("Error scheduling IBs (%d)\n", r)__drm_err("Error scheduling IBs (%d)\n", r); | ||||
2256 | return r; | ||||
2257 | } | ||||
2258 | |||||
2259 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | ||||
2260 | uint32_t src_data, | ||||
2261 | struct dma_resv *resv, | ||||
2262 | struct dma_fence **fence) | ||||
2263 | { | ||||
2264 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||||
2265 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; | ||||
2266 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||||
2267 | |||||
2268 | struct drm_mm_node *mm_node; | ||||
2269 | unsigned long num_pages; | ||||
2270 | unsigned int num_loops, num_dw; | ||||
2271 | |||||
2272 | struct amdgpu_job *job; | ||||
2273 | int r; | ||||
2274 | |||||
2275 | if (!adev->mman.buffer_funcs_enabled) { | ||||
2276 | DRM_ERROR("Trying to clear memory with ring turned off.\n")__drm_err("Trying to clear memory with ring turned off.\n"); | ||||
2277 | return -EINVAL22; | ||||
2278 | } | ||||
2279 | |||||
2280 | if (bo->tbo.mem.mem_type == TTM_PL_TT1) { | ||||
2281 | r = amdgpu_ttm_alloc_gart(&bo->tbo); | ||||
2282 | if (r) | ||||
2283 | return r; | ||||
2284 | } | ||||
2285 | |||||
2286 | num_pages = bo->tbo.num_pages; | ||||
2287 | mm_node = bo->tbo.mem.mm_node; | ||||
2288 | num_loops = 0; | ||||
2289 | while (num_pages) { | ||||
2290 | uint64_t byte_count = mm_node->size << PAGE_SHIFT12; | ||||
2291 | |||||
2292 | num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes)(((byte_count) + ((max_bytes) - 1)) / (max_bytes)); | ||||
2293 | num_pages -= mm_node->size; | ||||
2294 | ++mm_node; | ||||
2295 | } | ||||
2296 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; | ||||
2297 | |||||
2298 | /* for IB padding */ | ||||
2299 | num_dw += 64; | ||||
2300 | |||||
2301 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, | ||||
2302 | &job); | ||||
2303 | if (r) | ||||
2304 | return r; | ||||
2305 | |||||
2306 | if (resv) { | ||||
2307 | r = amdgpu_sync_resv(adev, &job->sync, resv, | ||||
2308 | AMDGPU_SYNC_ALWAYS, | ||||
2309 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul)); | ||||
2310 | if (r) { | ||||
2311 | DRM_ERROR("sync failed (%d).\n", r)__drm_err("sync failed (%d).\n", r); | ||||
2312 | goto error_free; | ||||
2313 | } | ||||
2314 | } | ||||
2315 | |||||
2316 | num_pages = bo->tbo.num_pages; | ||||
2317 | mm_node = bo->tbo.mem.mm_node; | ||||
2318 | |||||
2319 | while (num_pages) { | ||||
2320 | uint64_t byte_count = mm_node->size << PAGE_SHIFT12; | ||||
2321 | uint64_t dst_addr; | ||||
2322 | |||||
2323 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); | ||||
2324 | while (byte_count) { | ||||
2325 | uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,({ uint64_t __min_a = (byte_count); uint64_t __min_b = (max_bytes ); __min_a < __min_b ? __min_a : __min_b; }) | ||||
2326 | max_bytes)({ uint64_t __min_a = (byte_count); uint64_t __min_b = (max_bytes ); __min_a < __min_b ? __min_a : __min_b; }); | ||||
2327 | |||||
2328 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,(adev)->mman.buffer_funcs->emit_fill_buffer((&job-> ibs[0]), (src_data), (dst_addr), (cur_size_in_bytes)) | ||||
2329 | dst_addr, cur_size_in_bytes)(adev)->mman.buffer_funcs->emit_fill_buffer((&job-> ibs[0]), (src_data), (dst_addr), (cur_size_in_bytes)); | ||||
2330 | |||||
2331 | dst_addr += cur_size_in_bytes; | ||||
2332 | byte_count -= cur_size_in_bytes; | ||||
2333 | } | ||||
2334 | |||||
2335 | num_pages -= mm_node->size; | ||||
2336 | ++mm_node; | ||||
2337 | } | ||||
2338 | |||||
2339 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); | ||||
2340 | WARN_ON(job->ibs[0].length_dw > num_dw)({ int __ret = !!(job->ibs[0].length_dw > num_dw); if ( __ret) printf("WARNING %s failed at %s:%d\n", "job->ibs[0].length_dw > num_dw" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c", 2340); __builtin_expect (!!(__ret), 0); }); | ||||
2341 | r = amdgpu_job_submit(job, &adev->mman.entity, | ||||
2342 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), fence); | ||||
2343 | if (r) | ||||
2344 | goto error_free; | ||||
2345 | |||||
2346 | return 0; | ||||
2347 | |||||
2348 | error_free: | ||||
2349 | amdgpu_job_free(job); | ||||
2350 | return r; | ||||
2351 | } | ||||
2352 | |||||
2353 | #if defined(CONFIG_DEBUG_FS) | ||||
2354 | |||||
2355 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | ||||
2356 | { | ||||
2357 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||||
2358 | unsigned ttm_pl = (uintptr_t)node->info_ent->data; | ||||
2359 | struct drm_device *dev = node->minor->dev; | ||||
2360 | struct amdgpu_device *adev = drm_to_adev(dev); | ||||
2361 | struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); | ||||
2362 | struct drm_printer p = drm_seq_file_printer(m); | ||||
2363 | |||||
2364 | man->func->debug(man, &p); | ||||
2365 | return 0; | ||||
2366 | } | ||||
2367 | |||||
2368 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { | ||||
2369 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM2}, | ||||
2370 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT1}, | ||||
2371 | {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS(3 + 0)}, | ||||
2372 | {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS(3 + 1)}, | ||||
2373 | {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA(3 + 2)}, | ||||
2374 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL((void *)0)}, | ||||
2375 | #ifdef CONFIG_SWIOTLB | ||||
2376 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL((void *)0)} | ||||
2377 | #endif | ||||
2378 | }; | ||||
2379 | |||||
2380 | /** | ||||
2381 | * amdgpu_ttm_vram_read - Linear read access to VRAM | ||||
2382 | * | ||||
2383 | * Accesses VRAM via MMIO for debugging purposes. | ||||
2384 | */ | ||||
2385 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | ||||
2386 | size_t size, loff_t *pos) | ||||
2387 | { | ||||
2388 | struct amdgpu_device *adev = file_inode(f)->i_private; | ||||
2389 | ssize_t result = 0; | ||||
2390 | |||||
2391 | if (size & 0x3 || *pos & 0x3) | ||||
2392 | return -EINVAL22; | ||||
2393 | |||||
2394 | if (*pos >= adev->gmc.mc_vram_size) | ||||
2395 | return -ENXIO6; | ||||
2396 | |||||
2397 | size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos))(((size)<((size_t)(adev->gmc.mc_vram_size - *pos)))?(size ):((size_t)(adev->gmc.mc_vram_size - *pos))); | ||||
2398 | while (size) { | ||||
2399 | size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4)(((size)<((size_t)128 * 4))?(size):((size_t)128 * 4)); | ||||
2400 | uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ(size_t)128]; | ||||
2401 | |||||
2402 | amdgpu_device_vram_access(adev, *pos, value, bytes, false0); | ||||
2403 | if (copy_to_user(buf, value, bytes)) | ||||
2404 | return -EFAULT14; | ||||
2405 | |||||
2406 | result += bytes; | ||||
2407 | buf += bytes; | ||||
2408 | *pos += bytes; | ||||
2409 | size -= bytes; | ||||
2410 | } | ||||
2411 | |||||
2412 | return result; | ||||
2413 | } | ||||
2414 | |||||
2415 | /** | ||||
2416 | * amdgpu_ttm_vram_write - Linear write access to VRAM | ||||
2417 | * | ||||
2418 | * Accesses VRAM via MMIO for debugging purposes. | ||||
2419 | */ | ||||
2420 | static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, | ||||
2421 | size_t size, loff_t *pos) | ||||
2422 | { | ||||
2423 | struct amdgpu_device *adev = file_inode(f)->i_private; | ||||
2424 | ssize_t result = 0; | ||||
2425 | int r; | ||||
2426 | |||||
2427 | if (size & 0x3 || *pos & 0x3) | ||||
2428 | return -EINVAL22; | ||||
2429 | |||||
2430 | if (*pos >= adev->gmc.mc_vram_size) | ||||
2431 | return -ENXIO6; | ||||
2432 | |||||
2433 | while (size) { | ||||
2434 | unsigned long flags; | ||||
2435 | uint32_t value; | ||||
2436 | |||||
2437 | if (*pos >= adev->gmc.mc_vram_size) | ||||
2438 | return result; | ||||
2439 | |||||
2440 | r = get_user(value, (uint32_t *)buf)-copyin((uint32_t *)buf, &(value), sizeof(value)); | ||||
2441 | if (r) | ||||
2442 | return r; | ||||
2443 | |||||
2444 | spin_lock_irqsave(&adev->mmio_idx_lock, flags)do { flags = 0; mtx_enter(&adev->mmio_idx_lock); } while (0); | ||||
2445 | WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000)amdgpu_device_wreg(adev, (0x0), (((uint32_t)*pos) | 0x80000000 ), (1<<1)); | ||||
2446 | WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31)amdgpu_device_wreg(adev, (0x6), (*pos >> 31), (1<< 1)); | ||||
2447 | WREG32_NO_KIQ(mmMM_DATA, value)amdgpu_device_wreg(adev, (0x1), (value), (1<<1)); | ||||
2448 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->mmio_idx_lock); } while (0); | ||||
2449 | |||||
2450 | result += 4; | ||||
2451 | buf += 4; | ||||
2452 | *pos += 4; | ||||
2453 | size -= 4; | ||||
2454 | } | ||||
2455 | |||||
2456 | return result; | ||||
2457 | } | ||||
2458 | |||||
2459 | static const struct file_operations amdgpu_ttm_vram_fops = { | ||||
2460 | .owner = THIS_MODULE((void *)0), | ||||
2461 | .read = amdgpu_ttm_vram_read, | ||||
2462 | .write = amdgpu_ttm_vram_write, | ||||
2463 | .llseek = default_llseek, | ||||
2464 | }; | ||||
2465 | |||||
2466 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS | ||||
2467 | |||||
2468 | /** | ||||
2469 | * amdgpu_ttm_gtt_read - Linear read access to GTT memory | ||||
2470 | */ | ||||
2471 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, | ||||
2472 | size_t size, loff_t *pos) | ||||
2473 | { | ||||
2474 | struct amdgpu_device *adev = file_inode(f)->i_private; | ||||
2475 | ssize_t result = 0; | ||||
2476 | int r; | ||||
2477 | |||||
2478 | while (size) { | ||||
2479 | loff_t p = *pos / PAGE_SIZE(1 << 12); | ||||
2480 | unsigned off = *pos & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
2481 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off)({ size_t __min_a = (size); size_t __min_b = ((1 << 12) - off); __min_a < __min_b ? __min_a : __min_b; }); | ||||
2482 | struct vm_page *page; | ||||
2483 | void *ptr; | ||||
2484 | |||||
2485 | if (p >= adev->gart.num_cpu_pages) | ||||
2486 | return result; | ||||
2487 | |||||
2488 | page = adev->gart.pages[p]; | ||||
2489 | if (page) { | ||||
2490 | ptr = kmap(page); | ||||
2491 | ptr += off; | ||||
2492 | |||||
2493 | r = copy_to_user(buf, ptr, cur_size); | ||||
2494 | kunmap(adev->gart.pages[p]); | ||||
2495 | } else | ||||
2496 | r = clear_user(buf, cur_size); | ||||
2497 | |||||
2498 | if (r) | ||||
2499 | return -EFAULT14; | ||||
2500 | |||||
2501 | result += cur_size; | ||||
2502 | buf += cur_size; | ||||
2503 | *pos += cur_size; | ||||
2504 | size -= cur_size; | ||||
2505 | } | ||||
2506 | |||||
2507 | return result; | ||||
2508 | } | ||||
2509 | |||||
2510 | static const struct file_operations amdgpu_ttm_gtt_fops = { | ||||
2511 | .owner = THIS_MODULE((void *)0), | ||||
2512 | .read = amdgpu_ttm_gtt_read, | ||||
2513 | .llseek = default_llseek | ||||
2514 | }; | ||||
2515 | |||||
2516 | #endif | ||||
2517 | |||||
2518 | /** | ||||
2519 | * amdgpu_iomem_read - Virtual read access to GPU mapped memory | ||||
2520 | * | ||||
2521 | * This function is used to read memory that has been mapped to the | ||||
2522 | * GPU and the known addresses are not physical addresses but instead | ||||
2523 | * bus addresses (e.g., what you'd put in an IB or ring buffer). | ||||
2524 | */ | ||||
2525 | static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, | ||||
2526 | size_t size, loff_t *pos) | ||||
2527 | { | ||||
2528 | struct amdgpu_device *adev = file_inode(f)->i_private; | ||||
2529 | struct iommu_domain *dom; | ||||
2530 | ssize_t result = 0; | ||||
2531 | int r; | ||||
2532 | |||||
2533 | /* retrieve the IOMMU domain if any for this device */ | ||||
2534 | dom = iommu_get_domain_for_dev(adev->dev); | ||||
2535 | |||||
2536 | while (size) { | ||||
2537 | phys_addr_t addr = *pos & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
2538 | loff_t off = *pos & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
2539 | size_t bytes = PAGE_SIZE(1 << 12) - off; | ||||
2540 | unsigned long pfn; | ||||
2541 | struct vm_page *p; | ||||
2542 | void *ptr; | ||||
2543 | |||||
2544 | bytes = bytes < size ? bytes : size; | ||||
2545 | |||||
2546 | /* Translate the bus address to a physical address. If | ||||
2547 | * the domain is NULL it means there is no IOMMU active | ||||
2548 | * and the address translation is the identity | ||||
2549 | */ | ||||
2550 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; | ||||
2551 | |||||
2552 | pfn = addr >> PAGE_SHIFT12; | ||||
2553 | if (!pfn_valid(pfn)) | ||||
2554 | return -EPERM1; | ||||
2555 | |||||
2556 | p = pfn_to_page(pfn)(PHYS_TO_VM_PAGE(((paddr_t)(pfn) << 12))); | ||||
2557 | if (p->mapping != adev->mman.bdev.dev_mapping) | ||||
2558 | return -EPERM1; | ||||
2559 | |||||
2560 | ptr = kmap(p); | ||||
2561 | r = copy_to_user(buf, ptr + off, bytes); | ||||
2562 | kunmap(p); | ||||
2563 | if (r) | ||||
2564 | return -EFAULT14; | ||||
2565 | |||||
2566 | size -= bytes; | ||||
2567 | *pos += bytes; | ||||
2568 | result += bytes; | ||||
2569 | } | ||||
2570 | |||||
2571 | return result; | ||||
2572 | } | ||||
2573 | |||||
2574 | /** | ||||
2575 | * amdgpu_iomem_write - Virtual write access to GPU mapped memory | ||||
2576 | * | ||||
2577 | * This function is used to write memory that has been mapped to the | ||||
2578 | * GPU and the known addresses are not physical addresses but instead | ||||
2579 | * bus addresses (e.g., what you'd put in an IB or ring buffer). | ||||
2580 | */ | ||||
2581 | static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, | ||||
2582 | size_t size, loff_t *pos) | ||||
2583 | { | ||||
2584 | struct amdgpu_device *adev = file_inode(f)->i_private; | ||||
2585 | struct iommu_domain *dom; | ||||
2586 | ssize_t result = 0; | ||||
2587 | int r; | ||||
2588 | |||||
2589 | dom = iommu_get_domain_for_dev(adev->dev); | ||||
2590 | |||||
2591 | while (size) { | ||||
2592 | phys_addr_t addr = *pos & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
2593 | loff_t off = *pos & ~LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
2594 | size_t bytes = PAGE_SIZE(1 << 12) - off; | ||||
2595 | unsigned long pfn; | ||||
2596 | struct vm_page *p; | ||||
2597 | void *ptr; | ||||
2598 | |||||
2599 | bytes = bytes < size ? bytes : size; | ||||
2600 | |||||
2601 | addr = dom ? iommu_iova_to_phys(dom, addr) : addr; | ||||
2602 | |||||
2603 | pfn = addr >> PAGE_SHIFT12; | ||||
2604 | if (!pfn_valid(pfn)) | ||||
2605 | return -EPERM1; | ||||
2606 | |||||
2607 | p = pfn_to_page(pfn)(PHYS_TO_VM_PAGE(((paddr_t)(pfn) << 12))); | ||||
2608 | if (p->mapping != adev->mman.bdev.dev_mapping) | ||||
2609 | return -EPERM1; | ||||
2610 | |||||
2611 | ptr = kmap(p); | ||||
2612 | r = copy_from_user(ptr + off, buf, bytes); | ||||
2613 | kunmap(p); | ||||
2614 | if (r) | ||||
2615 | return -EFAULT14; | ||||
2616 | |||||
2617 | size -= bytes; | ||||
2618 | *pos += bytes; | ||||
2619 | result += bytes; | ||||
2620 | } | ||||
2621 | |||||
2622 | return result; | ||||
2623 | } | ||||
2624 | |||||
2625 | static const struct file_operations amdgpu_ttm_iomem_fops = { | ||||
2626 | .owner = THIS_MODULE((void *)0), | ||||
2627 | .read = amdgpu_iomem_read, | ||||
2628 | .write = amdgpu_iomem_write, | ||||
2629 | .llseek = default_llseek | ||||
2630 | }; | ||||
2631 | |||||
2632 | static const struct { | ||||
2633 | char *name; | ||||
2634 | const struct file_operations *fops; | ||||
2635 | int domain; | ||||
2636 | } ttm_debugfs_entries[] = { | ||||
2637 | { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM2 }, | ||||
2638 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS | ||||
2639 | { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT1 }, | ||||
2640 | #endif | ||||
2641 | { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM0 }, | ||||
2642 | }; | ||||
2643 | |||||
2644 | #endif | ||||
2645 | |||||
2646 | int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) | ||||
2647 | { | ||||
2648 | #if defined(CONFIG_DEBUG_FS) | ||||
2649 | unsigned count; | ||||
2650 | |||||
2651 | struct drm_minor *minor = adev_to_drm(adev)->primary; | ||||
2652 | struct dentry *ent, *root = minor->debugfs_root; | ||||
2653 | |||||
2654 | for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries)(sizeof((ttm_debugfs_entries)) / sizeof((ttm_debugfs_entries) [0])); count++) { | ||||
2655 | ent = debugfs_create_file( | ||||
2656 | ttm_debugfs_entries[count].name, | ||||
2657 | S_IFREG | S_IRUGO, root, | ||||
2658 | adev, | ||||
2659 | ttm_debugfs_entries[count].fops); | ||||
2660 | if (IS_ERR(ent)) | ||||
2661 | return PTR_ERR(ent); | ||||
2662 | if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM2) | ||||
2663 | i_size_write(ent->d_inode, adev->gmc.mc_vram_size); | ||||
2664 | else if (ttm_debugfs_entries[count].domain == TTM_PL_TT1) | ||||
2665 | i_size_write(ent->d_inode, adev->gmc.gart_size); | ||||
2666 | adev->mman.debugfs_entries[count] = ent; | ||||
2667 | } | ||||
2668 | |||||
2669 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list)(sizeof((amdgpu_ttm_debugfs_list)) / sizeof((amdgpu_ttm_debugfs_list )[0])); | ||||
2670 | |||||
2671 | #ifdef CONFIG_SWIOTLB | ||||
2672 | if (!(adev->need_swiotlb && swiotlb_nr_tbl())) | ||||
2673 | --count; | ||||
2674 | #endif | ||||
2675 | |||||
2676 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | ||||
2677 | #else | ||||
2678 | return 0; | ||||
2679 | #endif | ||||
2680 | } |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #ifndef __AMDGPU_OBJECT_H__ |
29 | #define __AMDGPU_OBJECT_H__ |
30 | |
31 | #include <drm/amdgpu_drm.h> |
32 | #include "amdgpu.h" |
33 | #ifdef CONFIG_MMU_NOTIFIER |
34 | #include <linux/mmu_notifier.h> |
35 | #endif |
36 | |
37 | #define AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL LONG_MAX0x7fffffffffffffffL |
38 | #define AMDGPU_BO_MAX_PLACEMENTS3 3 |
39 | |
40 | struct amdgpu_bo_param { |
41 | unsigned long size; |
42 | int byte_align; |
43 | u32 domain; |
44 | u32 preferred_domain; |
45 | u64 flags; |
46 | enum ttm_bo_type type; |
47 | bool_Bool no_wait_gpu; |
48 | struct dma_resv *resv; |
49 | }; |
50 | |
51 | /* bo virtual addresses in a vm */ |
52 | struct amdgpu_bo_va_mapping { |
53 | struct amdgpu_bo_va *bo_va; |
54 | struct list_head list; |
55 | struct rb_node rb; |
56 | uint64_t start; |
57 | uint64_t last; |
58 | uint64_t __subtree_last; |
59 | uint64_t offset; |
60 | uint64_t flags; |
61 | }; |
62 | |
63 | /* User space allocated BO in a VM */ |
64 | struct amdgpu_bo_va { |
65 | struct amdgpu_vm_bo_base base; |
66 | |
67 | /* protected by bo being reserved */ |
68 | unsigned ref_count; |
69 | |
70 | /* all other members protected by the VM PD being reserved */ |
71 | struct dma_fence *last_pt_update; |
72 | |
73 | /* mappings for this bo_va */ |
74 | struct list_head invalids; |
75 | struct list_head valids; |
76 | |
77 | /* If the mappings are cleared or filled */ |
78 | bool_Bool cleared; |
79 | |
80 | bool_Bool is_xgmi; |
81 | }; |
82 | |
83 | struct amdgpu_bo { |
84 | /* Protected by tbo.reserved */ |
85 | u32 preferred_domains; |
86 | u32 allowed_domains; |
87 | struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS3]; |
88 | struct ttm_placement placement; |
89 | struct ttm_buffer_object tbo; |
90 | struct ttm_bo_kmap_obj kmap; |
91 | u64 flags; |
92 | unsigned pin_count; |
93 | u64 tiling_flags; |
94 | u64 metadata_flags; |
95 | void *metadata; |
96 | u32 metadata_size; |
97 | unsigned prime_shared_count; |
98 | /* per VM structure for page tables and with virtual addresses */ |
99 | struct amdgpu_vm_bo_base *vm_bo; |
100 | /* Constant after initialization */ |
101 | struct amdgpu_device *adev; |
102 | struct amdgpu_bo *parent; |
103 | struct amdgpu_bo *shadow; |
104 | |
105 | struct ttm_bo_kmap_obj dma_buf_vmap; |
106 | struct amdgpu_mn *mn; |
107 | |
108 | |
109 | #ifdef CONFIG_MMU_NOTIFIER |
110 | struct mmu_interval_notifier notifier; |
111 | #endif |
112 | |
113 | struct list_head shadow_list; |
114 | |
115 | struct kgd_mem *kfd_bo; |
116 | }; |
117 | |
118 | static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) |
119 | { |
120 | return container_of(tbo, struct amdgpu_bo, tbo)({ const __typeof( ((struct amdgpu_bo *)0)->tbo ) *__mptr = (tbo); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo) );}); |
121 | } |
122 | |
123 | /** |
124 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type |
125 | * @mem_type: ttm memory type |
126 | * |
127 | * Returns corresponding domain of the ttm mem_type |
128 | */ |
129 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) |
130 | { |
131 | switch (mem_type) { |
132 | case TTM_PL_VRAM2: |
133 | return AMDGPU_GEM_DOMAIN_VRAM0x4; |
134 | case TTM_PL_TT1: |
135 | return AMDGPU_GEM_DOMAIN_GTT0x2; |
136 | case TTM_PL_SYSTEM0: |
137 | return AMDGPU_GEM_DOMAIN_CPU0x1; |
138 | case AMDGPU_PL_GDS(3 + 0): |
139 | return AMDGPU_GEM_DOMAIN_GDS0x8; |
140 | case AMDGPU_PL_GWS(3 + 1): |
141 | return AMDGPU_GEM_DOMAIN_GWS0x10; |
142 | case AMDGPU_PL_OA(3 + 2): |
143 | return AMDGPU_GEM_DOMAIN_OA0x20; |
144 | default: |
145 | break; |
146 | } |
147 | return 0; |
148 | } |
149 | |
150 | /** |
151 | * amdgpu_bo_reserve - reserve bo |
152 | * @bo: bo structure |
153 | * @no_intr: don't return -ERESTARTSYS on pending signal |
154 | * |
155 | * Returns: |
156 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
157 | * a signal. Release all buffer reservations and return to user-space. |
158 | */ |
159 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool_Bool no_intr) |
160 | { |
161 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
162 | int r; |
163 | |
164 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false0, NULL((void *)0)); |
165 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { |
166 | if (r != -ERESTARTSYS4) |
167 | dev_err(adev->dev, "%p reserve failed\n", bo)printf("drm:pid%d:%s *ERROR* " "%p reserve failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , bo); |
168 | return r; |
169 | } |
170 | return 0; |
171 | } |
172 | |
173 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) |
174 | { |
175 | ttm_bo_unreserve(&bo->tbo); |
176 | } |
177 | |
178 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) |
179 | { |
180 | return bo->tbo.num_pages << PAGE_SHIFT12; |
181 | } |
182 | |
183 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) |
184 | { |
185 | return (bo->tbo.num_pages << PAGE_SHIFT12) / AMDGPU_GPU_PAGE_SIZE4096; |
186 | } |
187 | |
188 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) |
189 | { |
190 | return (bo->tbo.mem.page_alignment << PAGE_SHIFT12) / AMDGPU_GPU_PAGE_SIZE4096; |
191 | } |
192 | |
193 | /** |
194 | * amdgpu_bo_mmap_offset - return mmap offset of bo |
195 | * @bo: amdgpu object for which we query the offset |
196 | * |
197 | * Returns mmap offset of the object. |
198 | */ |
199 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) |
200 | { |
201 | return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); |
202 | } |
203 | |
204 | /** |
205 | * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM |
206 | */ |
207 | static inline bool_Bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) |
208 | { |
209 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
210 | unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT12; |
211 | struct drm_mm_node *node = bo->tbo.mem.mm_node; |
212 | unsigned long pages_left; |
213 | |
214 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM2) |
215 | return false0; |
216 | |
217 | for (pages_left = bo->tbo.mem.num_pages; pages_left; |
218 | pages_left -= node->size, node++) |
219 | if (node->start < fpfn) |
220 | return true1; |
221 | |
222 | return false0; |
223 | } |
224 | |
225 | /** |
226 | * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced |
227 | */ |
228 | static inline bool_Bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) |
229 | { |
230 | return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC(1 << 7); |
231 | } |
232 | |
233 | /** |
234 | * amdgpu_bo_encrypted - test if the BO is encrypted |
235 | * @bo: pointer to a buffer object |
236 | * |
237 | * Return true if the buffer object is encrypted, false otherwise. |
238 | */ |
239 | static inline bool_Bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) |
240 | { |
241 | return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED(1 << 10); |
242 | } |
243 | |
244 | bool_Bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
245 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
246 | |
247 | int amdgpu_bo_create(struct amdgpu_device *adev, |
248 | struct amdgpu_bo_param *bp, |
249 | struct amdgpu_bo **bo_ptr); |
250 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
251 | unsigned long size, int align, |
252 | u32 domain, struct amdgpu_bo **bo_ptr, |
253 | u64 *gpu_addr, void **cpu_addr); |
254 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
255 | unsigned long size, int align, |
256 | u32 domain, struct amdgpu_bo **bo_ptr, |
257 | u64 *gpu_addr, void **cpu_addr); |
258 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
259 | uint64_t offset, uint64_t size, uint32_t domain, |
260 | struct amdgpu_bo **bo_ptr, void **cpu_addr); |
261 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
262 | void **cpu_addr); |
263 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
264 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); |
265 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
266 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); |
267 | void amdgpu_bo_unref(struct amdgpu_bo **bo); |
268 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); |
269 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
270 | u64 min_offset, u64 max_offset); |
271 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); |
272 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); |
273 | int amdgpu_bo_init(struct amdgpu_device *adev); |
274 | int amdgpu_bo_late_init(struct amdgpu_device *adev); |
275 | void amdgpu_bo_fini(struct amdgpu_device *adev); |
276 | #ifdef notyet |
277 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, |
278 | struct vm_area_struct *vma); |
279 | #endif |
280 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); |
281 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); |
282 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
283 | uint32_t metadata_size, uint64_t flags); |
284 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
285 | size_t buffer_size, uint32_t *metadata_size, |
286 | uint64_t *flags); |
287 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
288 | bool_Bool evict, |
289 | struct ttm_resource *new_mem); |
290 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); |
291 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
292 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
293 | bool_Bool shared); |
294 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
295 | enum amdgpu_sync_mode sync_mode, void *owner, |
296 | bool_Bool intr); |
297 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool_Bool intr); |
298 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
299 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); |
300 | int amdgpu_bo_validate(struct amdgpu_bo *bo); |
301 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, |
302 | struct dma_fence **fence); |
303 | uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, |
304 | uint32_t domain); |
305 | |
306 | /* |
307 | * sub allocation |
308 | */ |
309 | |
310 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) |
311 | { |
312 | return sa_bo->manager->gpu_addr + sa_bo->soffset; |
313 | } |
314 | |
315 | static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) |
316 | { |
317 | return sa_bo->manager->cpu_ptr + sa_bo->soffset; |
318 | } |
319 | |
320 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, |
321 | struct amdgpu_sa_manager *sa_manager, |
322 | unsigned size, u32 align, u32 domain); |
323 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, |
324 | struct amdgpu_sa_manager *sa_manager); |
325 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, |
326 | struct amdgpu_sa_manager *sa_manager); |
327 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
328 | struct amdgpu_sa_bo **sa_bo, |
329 | unsigned size, unsigned align); |
330 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
331 | struct amdgpu_sa_bo **sa_bo, |
332 | struct dma_fence *fence); |
333 | #if defined(CONFIG_DEBUG_FS) |
334 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
335 | struct seq_file *m); |
336 | #endif |
337 | int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); |
338 | |
339 | bool_Bool amdgpu_bo_support_uswc(u64 bo_flags); |
340 | |
341 | |
342 | #endif |