File: | dev/pci/drm/amd/amdgpu/amdgpu_vm.c |
Warning: | line 1047, column 26 Value stored to 'obj' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | |
29 | #include <linux/dma-fence-array.h> |
30 | #include <linux/interval_tree_generic.h> |
31 | #include <linux/idr.h> |
32 | #include <linux/dma-buf.h> |
33 | |
34 | #include <drm/amdgpu_drm.h> |
35 | #include <drm/drm_drv.h> |
36 | #include "amdgpu.h" |
37 | #include "amdgpu_trace.h" |
38 | #include "amdgpu_amdkfd.h" |
39 | #include "amdgpu_gmc.h" |
40 | #include "amdgpu_xgmi.h" |
41 | #include "amdgpu_dma_buf.h" |
42 | #include "amdgpu_res_cursor.h" |
43 | #include "../amdkfd/kfd_svm.h" |
44 | |
45 | /** |
46 | * DOC: GPUVM |
47 | * |
48 | * GPUVM is similar to the legacy gart on older asics, however |
49 | * rather than there being a single global gart table |
50 | * for the entire GPU, there are multiple VM page tables active |
51 | * at any given time. The VM page tables can contain a mix |
52 | * vram pages and system memory pages and system memory pages |
53 | * can be mapped as snooped (cached system pages) or unsnooped |
54 | * (uncached system pages). |
55 | * Each VM has an ID associated with it and there is a page table |
56 | * associated with each VMID. When executing a command buffer, |
57 | * the kernel tells the ring what VMID to use for that command |
58 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
59 | * The userspace drivers maintain their own address space and the kernel |
60 | * sets up their pages tables accordingly when they submit their |
61 | * command buffers and a VMID is assigned. |
62 | * Cayman/Trinity support up to 8 active VMs at any given time; |
63 | * SI supports 16. |
64 | */ |
65 | |
66 | #define START(node) ((node)->start) |
67 | #define LAST(node) ((node)->last) |
68 | |
69 | #ifdef __linux__ |
70 | INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, |
71 | START, LAST, static, amdgpu_vm_it) |
72 | #else |
73 | static struct amdgpu_bo_va_mapping * |
74 | amdgpu_vm_it_iter_first(struct rb_root_cached *root, uint64_t start, |
75 | uint64_t last) |
76 | { |
77 | struct amdgpu_bo_va_mapping *node; |
78 | struct rb_node *rb; |
79 | |
80 | for (rb = rb_first_cached(root)linux_root_RB_MINMAX((struct linux_root *)(&(root)->rb_root ), -1); rb; rb = rb_next(rb)linux_root_RB_NEXT((rb))) { |
81 | node = rb_entry(rb, typeof(*node), rb)({ const __typeof( ((typeof(*node) *)0)->rb ) *__mptr = (rb ); (typeof(*node) *)( (char *)__mptr - __builtin_offsetof(typeof (*node), rb) );}); |
82 | if (LAST(node) >= start && START(node) <= last) |
83 | return node; |
84 | } |
85 | return NULL((void *)0); |
86 | } |
87 | |
88 | static struct amdgpu_bo_va_mapping * |
89 | amdgpu_vm_it_iter_next(struct amdgpu_bo_va_mapping *node, uint64_t start, |
90 | uint64_t last) |
91 | { |
92 | struct rb_node *rb = &node->rb; |
93 | |
94 | for (rb = rb_next(rb)linux_root_RB_NEXT((rb)); rb; rb = rb_next(rb)linux_root_RB_NEXT((rb))) { |
95 | node = rb_entry(rb, typeof(*node), rb)({ const __typeof( ((typeof(*node) *)0)->rb ) *__mptr = (rb ); (typeof(*node) *)( (char *)__mptr - __builtin_offsetof(typeof (*node), rb) );}); |
96 | if (LAST(node) >= start && START(node) <= last) |
97 | return node; |
98 | } |
99 | return NULL((void *)0); |
100 | } |
101 | |
102 | static void |
103 | amdgpu_vm_it_remove(struct amdgpu_bo_va_mapping *node, |
104 | struct rb_root_cached *root) |
105 | { |
106 | rb_erase_cached(&node->rb, root)linux_root_RB_REMOVE((struct linux_root *)(&(root)->rb_root ), (&node->rb)); |
107 | } |
108 | |
109 | static void |
110 | amdgpu_vm_it_insert(struct amdgpu_bo_va_mapping *node, |
111 | struct rb_root_cached *root) |
112 | { |
113 | struct rb_node **iter = &root->rb_root.rb_node; |
114 | struct rb_node *parent = NULL((void *)0); |
115 | struct amdgpu_bo_va_mapping *iter_node; |
116 | |
117 | while (*iter) { |
118 | parent = *iter; |
119 | iter_node = rb_entry(*iter, struct amdgpu_bo_va_mapping, rb)({ const __typeof( ((struct amdgpu_bo_va_mapping *)0)->rb ) *__mptr = (*iter); (struct amdgpu_bo_va_mapping *)( (char *) __mptr - __builtin_offsetof(struct amdgpu_bo_va_mapping, rb) ) ;}); |
120 | |
121 | if (node->start < iter_node->start) |
122 | iter = &(*iter)->rb_left__entry.rbe_left; |
123 | else |
124 | iter = &(*iter)->rb_right__entry.rbe_right; |
125 | } |
126 | |
127 | rb_link_node(&node->rb, parent, iter); |
128 | rb_insert_color_cached(&node->rb, root, false)linux_root_RB_INSERT_COLOR((struct linux_root *)(&(root)-> rb_root), (&node->rb)); |
129 | } |
130 | #endif |
131 | |
132 | #undef START |
133 | #undef LAST |
134 | |
135 | /** |
136 | * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback |
137 | */ |
138 | struct amdgpu_prt_cb { |
139 | |
140 | /** |
141 | * @adev: amdgpu device |
142 | */ |
143 | struct amdgpu_device *adev; |
144 | |
145 | /** |
146 | * @cb: callback |
147 | */ |
148 | struct dma_fence_cb cb; |
149 | }; |
150 | |
151 | /** |
152 | * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence |
153 | */ |
154 | struct amdgpu_vm_tlb_seq_cb { |
155 | /** |
156 | * @vm: pointer to the amdgpu_vm structure to set the fence sequence on |
157 | */ |
158 | struct amdgpu_vm *vm; |
159 | |
160 | /** |
161 | * @cb: callback |
162 | */ |
163 | struct dma_fence_cb cb; |
164 | }; |
165 | |
166 | /** |
167 | * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping |
168 | * |
169 | * @adev: amdgpu_device pointer |
170 | * @vm: amdgpu_vm pointer |
171 | * @pasid: the pasid the VM is using on this GPU |
172 | * |
173 | * Set the pasid this VM is using on this GPU, can also be used to remove the |
174 | * pasid by passing in zero. |
175 | * |
176 | */ |
177 | int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
178 | u32 pasid) |
179 | { |
180 | int r; |
181 | |
182 | if (vm->pasid == pasid) |
183 | return 0; |
184 | |
185 | if (vm->pasid) { |
186 | r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); |
187 | if (r < 0) |
188 | return r; |
189 | |
190 | vm->pasid = 0; |
191 | } |
192 | |
193 | if (pasid) { |
194 | r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, |
195 | GFP_KERNEL(0x0001 | 0x0004))); |
196 | if (r < 0) |
197 | return r; |
198 | |
199 | vm->pasid = pasid; |
200 | } |
201 | |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | /** |
207 | * amdgpu_vm_bo_evicted - vm_bo is evicted |
208 | * |
209 | * @vm_bo: vm_bo which is evicted |
210 | * |
211 | * State for PDs/PTs and per VM BOs which are not at the location they should |
212 | * be. |
213 | */ |
214 | static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) |
215 | { |
216 | struct amdgpu_vm *vm = vm_bo->vm; |
217 | struct amdgpu_bo *bo = vm_bo->bo; |
218 | |
219 | vm_bo->moved = true1; |
220 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
221 | if (bo->tbo.type == ttm_bo_type_kernel) |
222 | list_move(&vm_bo->vm_status, &vm->evicted); |
223 | else |
224 | list_move_tail(&vm_bo->vm_status, &vm->evicted); |
225 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
226 | } |
227 | /** |
228 | * amdgpu_vm_bo_moved - vm_bo is moved |
229 | * |
230 | * @vm_bo: vm_bo which is moved |
231 | * |
232 | * State for per VM BOs which are moved, but that change is not yet reflected |
233 | * in the page tables. |
234 | */ |
235 | static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) |
236 | { |
237 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
238 | list_move(&vm_bo->vm_status, &vm_bo->vm->moved); |
239 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
240 | } |
241 | |
242 | /** |
243 | * amdgpu_vm_bo_idle - vm_bo is idle |
244 | * |
245 | * @vm_bo: vm_bo which is now idle |
246 | * |
247 | * State for PDs/PTs and per VM BOs which have gone through the state machine |
248 | * and are now idle. |
249 | */ |
250 | static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) |
251 | { |
252 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
253 | list_move(&vm_bo->vm_status, &vm_bo->vm->idle); |
254 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
255 | vm_bo->moved = false0; |
256 | } |
257 | |
258 | /** |
259 | * amdgpu_vm_bo_invalidated - vm_bo is invalidated |
260 | * |
261 | * @vm_bo: vm_bo which is now invalidated |
262 | * |
263 | * State for normal BOs which are invalidated and that change not yet reflected |
264 | * in the PTs. |
265 | */ |
266 | static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) |
267 | { |
268 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
269 | list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); |
270 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
271 | } |
272 | |
273 | /** |
274 | * amdgpu_vm_bo_relocated - vm_bo is reloacted |
275 | * |
276 | * @vm_bo: vm_bo which is relocated |
277 | * |
278 | * State for PDs/PTs which needs to update their parent PD. |
279 | * For the root PD, just move to idle state. |
280 | */ |
281 | static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) |
282 | { |
283 | if (vm_bo->bo->parent) { |
284 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
285 | list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); |
286 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
287 | } else { |
288 | amdgpu_vm_bo_idle(vm_bo); |
289 | } |
290 | } |
291 | |
292 | /** |
293 | * amdgpu_vm_bo_done - vm_bo is done |
294 | * |
295 | * @vm_bo: vm_bo which is now done |
296 | * |
297 | * State for normal BOs which are invalidated and that change has been updated |
298 | * in the PTs. |
299 | */ |
300 | static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) |
301 | { |
302 | spin_lock(&vm_bo->vm->status_lock)mtx_enter(&vm_bo->vm->status_lock); |
303 | list_move(&vm_bo->vm_status, &vm_bo->vm->done); |
304 | spin_unlock(&vm_bo->vm->status_lock)mtx_leave(&vm_bo->vm->status_lock); |
305 | } |
306 | |
307 | /** |
308 | * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm |
309 | * |
310 | * @base: base structure for tracking BO usage in a VM |
311 | * @vm: vm to which bo is to be added |
312 | * @bo: amdgpu buffer object |
313 | * |
314 | * Initialize a bo_va_base structure and add it to the appropriate lists |
315 | * |
316 | */ |
317 | void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, |
318 | struct amdgpu_vm *vm, struct amdgpu_bo *bo) |
319 | { |
320 | base->vm = vm; |
321 | base->bo = bo; |
322 | base->next = NULL((void *)0); |
323 | INIT_LIST_HEAD(&base->vm_status); |
324 | |
325 | if (!bo) |
326 | return; |
327 | base->next = bo->vm_bo; |
328 | bo->vm_bo = base; |
329 | |
330 | if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) |
331 | return; |
332 | |
333 | dma_resv_assert_held(vm->root.bo->tbo.base.resv)do { (void)(&(vm->root.bo->tbo.base.resv)->lock. base); } while(0); |
334 | |
335 | ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); |
336 | if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) |
337 | amdgpu_vm_bo_relocated(base); |
338 | else |
339 | amdgpu_vm_bo_idle(base); |
340 | |
341 | if (bo->preferred_domains & |
342 | amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) |
343 | return; |
344 | |
345 | /* |
346 | * we checked all the prerequisites, but it looks like this per vm bo |
347 | * is currently evicted. add the bo to the evicted list to make sure it |
348 | * is validated on next vm use to avoid fault. |
349 | * */ |
350 | amdgpu_vm_bo_evicted(base); |
351 | } |
352 | |
353 | /** |
354 | * amdgpu_vm_get_pd_bo - add the VM PD to a validation list |
355 | * |
356 | * @vm: vm providing the BOs |
357 | * @validated: head of validation list |
358 | * @entry: entry to add |
359 | * |
360 | * Add the page directory to the list of BOs to |
361 | * validate for command submission. |
362 | */ |
363 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
364 | struct list_head *validated, |
365 | struct amdgpu_bo_list_entry *entry) |
366 | { |
367 | entry->priority = 0; |
368 | entry->tv.bo = &vm->root.bo->tbo; |
369 | /* Two for VM updates, one for TTM and one for the CS job */ |
370 | entry->tv.num_shared = 4; |
371 | entry->user_pages = NULL((void *)0); |
372 | list_add(&entry->tv.head, validated); |
373 | } |
374 | |
375 | /** |
376 | * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU |
377 | * |
378 | * @adev: amdgpu device pointer |
379 | * @vm: vm providing the BOs |
380 | * |
381 | * Move all BOs to the end of LRU and remember their positions to put them |
382 | * together. |
383 | */ |
384 | void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, |
385 | struct amdgpu_vm *vm) |
386 | { |
387 | spin_lock(&adev->mman.bdev.lru_lock)mtx_enter(&adev->mman.bdev.lru_lock); |
388 | ttm_lru_bulk_move_tail(&vm->lru_bulk_move); |
389 | spin_unlock(&adev->mman.bdev.lru_lock)mtx_leave(&adev->mman.bdev.lru_lock); |
390 | } |
391 | |
392 | /** |
393 | * amdgpu_vm_validate_pt_bos - validate the page table BOs |
394 | * |
395 | * @adev: amdgpu device pointer |
396 | * @vm: vm providing the BOs |
397 | * @validate: callback to do the validation |
398 | * @param: parameter for the validation callback |
399 | * |
400 | * Validate the page table BOs on command submission if neccessary. |
401 | * |
402 | * Returns: |
403 | * Validation result. |
404 | */ |
405 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
406 | int (*validate)(void *p, struct amdgpu_bo *bo), |
407 | void *param) |
408 | { |
409 | struct amdgpu_vm_bo_base *bo_base; |
410 | struct amdgpu_bo *shadow; |
411 | struct amdgpu_bo *bo; |
412 | int r; |
413 | |
414 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
415 | while (!list_empty(&vm->evicted)) { |
416 | bo_base = list_first_entry(&vm->evicted,({ const __typeof( ((struct amdgpu_vm_bo_base *)0)->vm_status ) *__mptr = ((&vm->evicted)->next); (struct amdgpu_vm_bo_base *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_vm_bo_base , vm_status) );}) |
417 | struct amdgpu_vm_bo_base,({ const __typeof( ((struct amdgpu_vm_bo_base *)0)->vm_status ) *__mptr = ((&vm->evicted)->next); (struct amdgpu_vm_bo_base *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_vm_bo_base , vm_status) );}) |
418 | vm_status)({ const __typeof( ((struct amdgpu_vm_bo_base *)0)->vm_status ) *__mptr = ((&vm->evicted)->next); (struct amdgpu_vm_bo_base *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_vm_bo_base , vm_status) );}); |
419 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
420 | |
421 | bo = bo_base->bo; |
422 | shadow = amdgpu_bo_shadowed(bo); |
423 | |
424 | r = validate(param, bo); |
425 | if (r) |
426 | return r; |
427 | if (shadow) { |
428 | r = validate(param, shadow); |
429 | if (r) |
430 | return r; |
431 | } |
432 | |
433 | if (bo->tbo.type != ttm_bo_type_kernel) { |
434 | amdgpu_vm_bo_moved(bo_base); |
435 | } else { |
436 | vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );})); |
437 | amdgpu_vm_bo_relocated(bo_base); |
438 | } |
439 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
440 | } |
441 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
442 | |
443 | amdgpu_vm_eviction_lock(vm); |
444 | vm->evicting = false0; |
445 | amdgpu_vm_eviction_unlock(vm); |
446 | |
447 | return 0; |
448 | } |
449 | |
450 | /** |
451 | * amdgpu_vm_ready - check VM is ready for updates |
452 | * |
453 | * @vm: VM to check |
454 | * |
455 | * Check if all VM PDs/PTs are ready for updates |
456 | * |
457 | * Returns: |
458 | * True if VM is not evicting. |
459 | */ |
460 | bool_Bool amdgpu_vm_ready(struct amdgpu_vm *vm) |
461 | { |
462 | bool_Bool empty; |
463 | bool_Bool ret; |
464 | |
465 | amdgpu_vm_eviction_lock(vm); |
466 | ret = !vm->evicting; |
467 | amdgpu_vm_eviction_unlock(vm); |
468 | |
469 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
470 | empty = list_empty(&vm->evicted); |
471 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
472 | |
473 | return ret && empty; |
474 | } |
475 | |
476 | /** |
477 | * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug |
478 | * |
479 | * @adev: amdgpu_device pointer |
480 | */ |
481 | void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) |
482 | { |
483 | const struct amdgpu_ip_block *ip_block; |
484 | bool_Bool has_compute_vm_bug; |
485 | struct amdgpu_ring *ring; |
486 | int i; |
487 | |
488 | has_compute_vm_bug = false0; |
489 | |
490 | ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); |
491 | if (ip_block) { |
492 | /* Compute has a VM bug for GFX version < 7. |
493 | Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ |
494 | if (ip_block->version->major <= 7) |
495 | has_compute_vm_bug = true1; |
496 | else if (ip_block->version->major == 8) |
497 | if (adev->gfx.mec_fw_version < 673) |
498 | has_compute_vm_bug = true1; |
499 | } |
500 | |
501 | for (i = 0; i < adev->num_rings; i++) { |
502 | ring = adev->rings[i]; |
503 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) |
504 | /* only compute rings */ |
505 | ring->has_compute_vm_bug = has_compute_vm_bug; |
506 | else |
507 | ring->has_compute_vm_bug = false0; |
508 | } |
509 | } |
510 | |
511 | /** |
512 | * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. |
513 | * |
514 | * @ring: ring on which the job will be submitted |
515 | * @job: job to submit |
516 | * |
517 | * Returns: |
518 | * True if sync is needed. |
519 | */ |
520 | bool_Bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, |
521 | struct amdgpu_job *job) |
522 | { |
523 | struct amdgpu_device *adev = ring->adev; |
524 | unsigned vmhub = ring->funcs->vmhub; |
525 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
526 | struct amdgpu_vmid *id; |
527 | bool_Bool gds_switch_needed; |
528 | bool_Bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; |
529 | |
530 | if (job->vmid == 0) |
531 | return false0; |
532 | id = &id_mgr->ids[job->vmid]; |
533 | gds_switch_needed = ring->funcs->emit_gds_switch && ( |
534 | id->gds_base != job->gds_base || |
535 | id->gds_size != job->gds_size || |
536 | id->gws_base != job->gws_base || |
537 | id->gws_size != job->gws_size || |
538 | id->oa_base != job->oa_base || |
539 | id->oa_size != job->oa_size); |
540 | |
541 | if (amdgpu_vmid_had_gpu_reset(adev, id)) |
542 | return true1; |
543 | |
544 | return vm_flush_needed || gds_switch_needed; |
545 | } |
546 | |
547 | /** |
548 | * amdgpu_vm_flush - hardware flush the vm |
549 | * |
550 | * @ring: ring to use for flush |
551 | * @job: related job |
552 | * @need_pipe_sync: is pipe sync needed |
553 | * |
554 | * Emit a VM flush when it is necessary. |
555 | * |
556 | * Returns: |
557 | * 0 on success, errno otherwise. |
558 | */ |
559 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, |
560 | bool_Bool need_pipe_sync) |
561 | { |
562 | struct amdgpu_device *adev = ring->adev; |
563 | unsigned vmhub = ring->funcs->vmhub; |
564 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
565 | struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; |
566 | bool_Bool gds_switch_needed = ring->funcs->emit_gds_switch && ( |
567 | id->gds_base != job->gds_base || |
568 | id->gds_size != job->gds_size || |
569 | id->gws_base != job->gws_base || |
570 | id->gws_size != job->gws_size || |
571 | id->oa_base != job->oa_base || |
572 | id->oa_size != job->oa_size); |
573 | bool_Bool vm_flush_needed = job->vm_needs_flush; |
574 | struct dma_fence *fence = NULL((void *)0); |
575 | bool_Bool pasid_mapping_needed = false0; |
576 | unsigned patch_offset = 0; |
577 | bool_Bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL((void *)0))); |
578 | int r; |
579 | |
580 | if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) |
581 | adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); |
582 | |
583 | if (amdgpu_vmid_had_gpu_reset(adev, id)) { |
584 | gds_switch_needed = true1; |
585 | vm_flush_needed = true1; |
586 | pasid_mapping_needed = true1; |
587 | } |
588 | |
589 | mutex_lock(&id_mgr->lock)rw_enter_write(&id_mgr->lock); |
590 | if (id->pasid != job->pasid || !id->pasid_mapping || |
591 | !dma_fence_is_signaled(id->pasid_mapping)) |
592 | pasid_mapping_needed = true1; |
593 | mutex_unlock(&id_mgr->lock)rw_exit_write(&id_mgr->lock); |
594 | |
595 | gds_switch_needed &= !!ring->funcs->emit_gds_switch; |
596 | vm_flush_needed &= !!ring->funcs->emit_vm_flush && |
597 | job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL; |
598 | pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && |
599 | ring->funcs->emit_wreg; |
600 | |
601 | if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) |
602 | return 0; |
603 | |
604 | if (ring->funcs->init_cond_exec) |
605 | patch_offset = amdgpu_ring_init_cond_exec(ring)(ring)->funcs->init_cond_exec((ring)); |
606 | |
607 | if (need_pipe_sync) |
608 | amdgpu_ring_emit_pipeline_sync(ring)(ring)->funcs->emit_pipeline_sync((ring)); |
609 | |
610 | if (vm_flush_needed) { |
611 | trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); |
612 | amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr)(ring)->funcs->emit_vm_flush((ring), (job->vmid), (job ->vm_pd_addr)); |
613 | } |
614 | |
615 | if (pasid_mapping_needed) |
616 | amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid)(ring)->adev->gmc.gmc_funcs->emit_pasid_mapping((ring ), (job->vmid), (job->pasid)); |
617 | |
618 | if (vm_flush_needed || pasid_mapping_needed) { |
619 | r = amdgpu_fence_emit(ring, &fence, NULL((void *)0), 0); |
620 | if (r) |
621 | return r; |
622 | } |
623 | |
624 | if (vm_flush_needed) { |
625 | mutex_lock(&id_mgr->lock)rw_enter_write(&id_mgr->lock); |
626 | dma_fence_put(id->last_flush); |
627 | id->last_flush = dma_fence_get(fence); |
628 | id->current_gpu_reset_count = |
629 | atomic_read(&adev->gpu_reset_counter)({ typeof(*(&adev->gpu_reset_counter)) __tmp = *(volatile typeof(*(&adev->gpu_reset_counter)) *)&(*(&adev ->gpu_reset_counter)); membar_datadep_consumer(); __tmp; } ); |
630 | mutex_unlock(&id_mgr->lock)rw_exit_write(&id_mgr->lock); |
631 | } |
632 | |
633 | if (pasid_mapping_needed) { |
634 | mutex_lock(&id_mgr->lock)rw_enter_write(&id_mgr->lock); |
635 | id->pasid = job->pasid; |
636 | dma_fence_put(id->pasid_mapping); |
637 | id->pasid_mapping = dma_fence_get(fence); |
638 | mutex_unlock(&id_mgr->lock)rw_exit_write(&id_mgr->lock); |
639 | } |
640 | dma_fence_put(fence); |
641 | |
642 | if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && |
643 | gds_switch_needed) { |
644 | id->gds_base = job->gds_base; |
645 | id->gds_size = job->gds_size; |
646 | id->gws_base = job->gws_base; |
647 | id->gws_size = job->gws_size; |
648 | id->oa_base = job->oa_base; |
649 | id->oa_size = job->oa_size; |
650 | amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,(ring)->funcs->emit_gds_switch((ring), (job->vmid), ( job->gds_base), (job->gds_size), (job->gws_base), (job ->gws_size), (job->oa_base), (job->oa_size)) |
651 | job->gds_size, job->gws_base,(ring)->funcs->emit_gds_switch((ring), (job->vmid), ( job->gds_base), (job->gds_size), (job->gws_base), (job ->gws_size), (job->oa_base), (job->oa_size)) |
652 | job->gws_size, job->oa_base,(ring)->funcs->emit_gds_switch((ring), (job->vmid), ( job->gds_base), (job->gds_size), (job->gws_base), (job ->gws_size), (job->oa_base), (job->oa_size)) |
653 | job->oa_size)(ring)->funcs->emit_gds_switch((ring), (job->vmid), ( job->gds_base), (job->gds_size), (job->gws_base), (job ->gws_size), (job->oa_base), (job->oa_size)); |
654 | } |
655 | |
656 | if (ring->funcs->patch_cond_exec) |
657 | amdgpu_ring_patch_cond_exec(ring, patch_offset)(ring)->funcs->patch_cond_exec((ring),(patch_offset)); |
658 | |
659 | /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ |
660 | if (ring->funcs->emit_switch_buffer) { |
661 | amdgpu_ring_emit_switch_buffer(ring)(ring)->funcs->emit_switch_buffer((ring)); |
662 | amdgpu_ring_emit_switch_buffer(ring)(ring)->funcs->emit_switch_buffer((ring)); |
663 | } |
664 | return 0; |
665 | } |
666 | |
667 | /** |
668 | * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo |
669 | * |
670 | * @vm: requested vm |
671 | * @bo: requested buffer object |
672 | * |
673 | * Find @bo inside the requested vm. |
674 | * Search inside the @bos vm list for the requested vm |
675 | * Returns the found bo_va or NULL if none is found |
676 | * |
677 | * Object has to be reserved! |
678 | * |
679 | * Returns: |
680 | * Found bo_va or NULL. |
681 | */ |
682 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, |
683 | struct amdgpu_bo *bo) |
684 | { |
685 | struct amdgpu_vm_bo_base *base; |
686 | |
687 | for (base = bo->vm_bo; base; base = base->next) { |
688 | if (base->vm != vm) |
689 | continue; |
690 | |
691 | return container_of(base, struct amdgpu_bo_va, base)({ const __typeof( ((struct amdgpu_bo_va *)0)->base ) *__mptr = (base); (struct amdgpu_bo_va *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_va, base) );}); |
692 | } |
693 | return NULL((void *)0); |
694 | } |
695 | |
696 | /** |
697 | * amdgpu_vm_map_gart - Resolve gart mapping of addr |
698 | * |
699 | * @pages_addr: optional DMA address to use for lookup |
700 | * @addr: the unmapped addr |
701 | * |
702 | * Look up the physical address of the page that the pte resolves |
703 | * to. |
704 | * |
705 | * Returns: |
706 | * The pointer for the page table entry. |
707 | */ |
708 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) |
709 | { |
710 | uint64_t result; |
711 | |
712 | /* page table offset */ |
713 | result = pages_addr[addr >> PAGE_SHIFT12]; |
714 | |
715 | /* in case cpu page size != gpu page size*/ |
716 | result |= addr & (~LINUX_PAGE_MASK(~((1 << 12) - 1))); |
717 | |
718 | result &= 0xFFFFFFFFFFFFF000ULL; |
719 | |
720 | return result; |
721 | } |
722 | |
723 | /** |
724 | * amdgpu_vm_update_pdes - make sure that all directories are valid |
725 | * |
726 | * @adev: amdgpu_device pointer |
727 | * @vm: requested vm |
728 | * @immediate: submit immediately to the paging queue |
729 | * |
730 | * Makes sure all directories are up to date. |
731 | * |
732 | * Returns: |
733 | * 0 for success, error for failure. |
734 | */ |
735 | int amdgpu_vm_update_pdes(struct amdgpu_device *adev, |
736 | struct amdgpu_vm *vm, bool_Bool immediate) |
737 | { |
738 | struct amdgpu_vm_update_params params; |
739 | struct amdgpu_vm_bo_base *entry; |
740 | bool_Bool flush_tlb_needed = false0; |
741 | DRM_LIST_HEAD(relocated)struct list_head relocated = { &(relocated), &(relocated ) }; |
742 | int r, idx; |
743 | |
744 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
745 | list_splice_init(&vm->relocated, &relocated); |
746 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
747 | |
748 | if (list_empty(&relocated)) |
749 | return 0; |
750 | |
751 | if (!drm_dev_enter(adev_to_drm(adev), &idx)) |
752 | return -ENODEV19; |
753 | |
754 | memset(¶ms, 0, sizeof(params))__builtin_memset((¶ms), (0), (sizeof(params))); |
755 | params.adev = adev; |
756 | params.vm = vm; |
757 | params.immediate = immediate; |
758 | |
759 | r = vm->update_funcs->prepare(¶ms, NULL((void *)0), AMDGPU_SYNC_EXPLICIT); |
760 | if (r) |
761 | goto error; |
762 | |
763 | list_for_each_entry(entry, &relocated, vm_status)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->vm_status ) *__mptr = ((&relocated)->next); (__typeof(*entry) * )( (char *)__mptr - __builtin_offsetof(__typeof(*entry), vm_status ) );}); &entry->vm_status != (&relocated); entry = ({ const __typeof( ((__typeof(*entry) *)0)->vm_status ) * __mptr = (entry->vm_status.next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof(*entry), vm_status) ) ;})) { |
764 | /* vm_flush_needed after updating moved PDEs */ |
765 | flush_tlb_needed |= entry->moved; |
766 | |
767 | r = amdgpu_vm_pde_update(¶ms, entry); |
768 | if (r) |
769 | goto error; |
770 | } |
771 | |
772 | r = vm->update_funcs->commit(¶ms, &vm->last_update); |
773 | if (r) |
774 | goto error; |
775 | |
776 | if (flush_tlb_needed) |
777 | atomic64_inc(&vm->tlb_seq)__sync_fetch_and_add_8(&vm->tlb_seq, 1); |
778 | |
779 | while (!list_empty(&relocated)) { |
780 | entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,({ const __typeof( ((struct amdgpu_vm_bo_base *)0)->vm_status ) *__mptr = ((&relocated)->next); (struct amdgpu_vm_bo_base *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_vm_bo_base , vm_status) );}) |
781 | vm_status)({ const __typeof( ((struct amdgpu_vm_bo_base *)0)->vm_status ) *__mptr = ((&relocated)->next); (struct amdgpu_vm_bo_base *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_vm_bo_base , vm_status) );}); |
782 | amdgpu_vm_bo_idle(entry); |
783 | } |
784 | |
785 | error: |
786 | drm_dev_exit(idx); |
787 | return r; |
788 | } |
789 | |
790 | /** |
791 | * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence |
792 | * @fence: unused |
793 | * @cb: the callback structure |
794 | * |
795 | * Increments the tlb sequence to make sure that future CS execute a VM flush. |
796 | */ |
797 | static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, |
798 | struct dma_fence_cb *cb) |
799 | { |
800 | struct amdgpu_vm_tlb_seq_cb *tlb_cb; |
801 | |
802 | tlb_cb = container_of(cb, typeof(*tlb_cb), cb)({ const __typeof( ((typeof(*tlb_cb) *)0)->cb ) *__mptr = ( cb); (typeof(*tlb_cb) *)( (char *)__mptr - __builtin_offsetof (typeof(*tlb_cb), cb) );}); |
803 | atomic64_inc(&tlb_cb->vm->tlb_seq)__sync_fetch_and_add_8(&tlb_cb->vm->tlb_seq, 1); |
804 | kfree(tlb_cb); |
805 | } |
806 | |
807 | /** |
808 | * amdgpu_vm_update_range - update a range in the vm page table |
809 | * |
810 | * @adev: amdgpu_device pointer to use for commands |
811 | * @vm: the VM to update the range |
812 | * @immediate: immediate submission in a page fault |
813 | * @unlocked: unlocked invalidation during MM callback |
814 | * @flush_tlb: trigger tlb invalidation after update completed |
815 | * @resv: fences we need to sync to |
816 | * @start: start of mapped range |
817 | * @last: last mapped entry |
818 | * @flags: flags for the entries |
819 | * @offset: offset into nodes and pages_addr |
820 | * @vram_base: base for vram mappings |
821 | * @res: ttm_resource to map |
822 | * @pages_addr: DMA addresses to use for mapping |
823 | * @fence: optional resulting fence |
824 | * |
825 | * Fill in the page table entries between @start and @last. |
826 | * |
827 | * Returns: |
828 | * 0 for success, negative erro code for failure. |
829 | */ |
830 | int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
831 | bool_Bool immediate, bool_Bool unlocked, bool_Bool flush_tlb, |
832 | struct dma_resv *resv, uint64_t start, uint64_t last, |
833 | uint64_t flags, uint64_t offset, uint64_t vram_base, |
834 | struct ttm_resource *res, dma_addr_t *pages_addr, |
835 | struct dma_fence **fence) |
836 | { |
837 | struct amdgpu_vm_update_params params; |
838 | struct amdgpu_vm_tlb_seq_cb *tlb_cb; |
839 | struct amdgpu_res_cursor cursor; |
840 | enum amdgpu_sync_mode sync_mode; |
841 | int r, idx; |
842 | |
843 | if (!drm_dev_enter(adev_to_drm(adev), &idx)) |
844 | return -ENODEV19; |
845 | |
846 | tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL(0x0001 | 0x0004)); |
847 | if (!tlb_cb) { |
848 | r = -ENOMEM12; |
849 | goto error_unlock; |
850 | } |
851 | |
852 | /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache, |
853 | * heavy-weight flush TLB unconditionally. |
854 | */ |
855 | flush_tlb |= adev->gmc.xgmi.num_physical_nodes && |
856 | adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)(((9) << 16) | ((4) << 8) | (0)); |
857 | |
858 | /* |
859 | * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB |
860 | */ |
861 | flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0)(((9) << 16) | ((0) << 8) | (0)); |
862 | |
863 | memset(¶ms, 0, sizeof(params))__builtin_memset((¶ms), (0), (sizeof(params))); |
864 | params.adev = adev; |
865 | params.vm = vm; |
866 | params.immediate = immediate; |
867 | params.pages_addr = pages_addr; |
868 | params.unlocked = unlocked; |
869 | |
870 | /* Implicitly sync to command submissions in the same VM before |
871 | * unmapping. Sync to moving fences before mapping. |
872 | */ |
873 | if (!(flags & AMDGPU_PTE_VALID(1ULL << 0))) |
874 | sync_mode = AMDGPU_SYNC_EQ_OWNER; |
875 | else |
876 | sync_mode = AMDGPU_SYNC_EXPLICIT; |
877 | |
878 | amdgpu_vm_eviction_lock(vm); |
879 | if (vm->evicting) { |
880 | r = -EBUSY16; |
881 | goto error_free; |
882 | } |
883 | |
884 | if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { |
885 | struct dma_fence *tmp = dma_fence_get_stub(); |
886 | |
887 | amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true1); |
888 | swap(vm->last_unlocked, tmp)do { __typeof(vm->last_unlocked) __tmp = (vm->last_unlocked ); (vm->last_unlocked) = (tmp); (tmp) = __tmp; } while(0); |
889 | dma_fence_put(tmp); |
890 | } |
891 | |
892 | r = vm->update_funcs->prepare(¶ms, resv, sync_mode); |
893 | if (r) |
894 | goto error_free; |
895 | |
896 | amdgpu_res_first(pages_addr ? NULL((void *)0) : res, offset, |
897 | (last - start + 1) * AMDGPU_GPU_PAGE_SIZE4096, &cursor); |
898 | while (cursor.remaining) { |
899 | uint64_t tmp, num_entries, addr; |
900 | |
901 | num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT12; |
902 | if (pages_addr) { |
903 | bool_Bool contiguous = true1; |
904 | |
905 | if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE((1 << 12) / 4096)) { |
906 | uint64_t pfn = cursor.start >> PAGE_SHIFT12; |
907 | uint64_t count; |
908 | |
909 | contiguous = pages_addr[pfn + 1] == |
910 | pages_addr[pfn] + PAGE_SIZE(1 << 12); |
911 | |
912 | tmp = num_entries / |
913 | AMDGPU_GPU_PAGES_IN_CPU_PAGE((1 << 12) / 4096); |
914 | for (count = 2; count < tmp; ++count) { |
915 | uint64_t idx = pfn + count; |
916 | |
917 | if (contiguous != (pages_addr[idx] == |
918 | pages_addr[idx - 1] + PAGE_SIZE(1 << 12))) |
919 | break; |
920 | } |
921 | num_entries = count * |
922 | AMDGPU_GPU_PAGES_IN_CPU_PAGE((1 << 12) / 4096); |
923 | } |
924 | |
925 | if (!contiguous) { |
926 | addr = cursor.start; |
927 | params.pages_addr = pages_addr; |
928 | } else { |
929 | addr = pages_addr[cursor.start >> PAGE_SHIFT12]; |
930 | params.pages_addr = NULL((void *)0); |
931 | } |
932 | |
933 | } else if (flags & (AMDGPU_PTE_VALID(1ULL << 0) | AMDGPU_PTE_PRT(1ULL << 51))) { |
934 | addr = vram_base + cursor.start; |
935 | } else { |
936 | addr = 0; |
937 | } |
938 | |
939 | tmp = start + num_entries; |
940 | r = amdgpu_vm_ptes_update(¶ms, start, tmp, addr, flags); |
941 | if (r) |
942 | goto error_free; |
943 | |
944 | amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE4096); |
945 | start = tmp; |
946 | } |
947 | |
948 | r = vm->update_funcs->commit(¶ms, fence); |
949 | |
950 | if (flush_tlb || params.table_freed) { |
951 | tlb_cb->vm = vm; |
952 | if (fence && *fence && |
953 | !dma_fence_add_callback(*fence, &tlb_cb->cb, |
954 | amdgpu_vm_tlb_seq_cb)) { |
955 | dma_fence_put(vm->last_tlb_flush); |
956 | vm->last_tlb_flush = dma_fence_get(*fence); |
957 | } else { |
958 | amdgpu_vm_tlb_seq_cb(NULL((void *)0), &tlb_cb->cb); |
959 | } |
960 | tlb_cb = NULL((void *)0); |
961 | } |
962 | |
963 | error_free: |
964 | kfree(tlb_cb); |
965 | |
966 | error_unlock: |
967 | amdgpu_vm_eviction_unlock(vm); |
968 | drm_dev_exit(idx); |
969 | return r; |
970 | } |
971 | |
972 | void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, |
973 | uint64_t *gtt_mem, uint64_t *cpu_mem) |
974 | { |
975 | struct amdgpu_bo_va *bo_va, *tmp; |
976 | |
977 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
978 | list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->idle)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->idle); bo_va = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp-> base.vm_status.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), base.vm_status) );})) { |
979 | if (!bo_va->base.bo) |
980 | continue; |
981 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
982 | gtt_mem, cpu_mem); |
983 | } |
984 | list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->evicted)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->evicted); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
985 | if (!bo_va->base.bo) |
986 | continue; |
987 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
988 | gtt_mem, cpu_mem); |
989 | } |
990 | list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->relocated)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->relocated); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
991 | if (!bo_va->base.bo) |
992 | continue; |
993 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
994 | gtt_mem, cpu_mem); |
995 | } |
996 | list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->moved)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->moved); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
997 | if (!bo_va->base.bo) |
998 | continue; |
999 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
1000 | gtt_mem, cpu_mem); |
1001 | } |
1002 | list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->invalidated)->next); ( __typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof (*bo_va), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof (*bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base. vm_status.next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->invalidated); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) * __mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( ( char *)__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status ) );})) { |
1003 | if (!bo_va->base.bo) |
1004 | continue; |
1005 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
1006 | gtt_mem, cpu_mem); |
1007 | } |
1008 | list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->done)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->done); bo_va = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp-> base.vm_status.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), base.vm_status) );})) { |
1009 | if (!bo_va->base.bo) |
1010 | continue; |
1011 | amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, |
1012 | gtt_mem, cpu_mem); |
1013 | } |
1014 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
1015 | } |
1016 | /** |
1017 | * amdgpu_vm_bo_update - update all BO mappings in the vm page table |
1018 | * |
1019 | * @adev: amdgpu_device pointer |
1020 | * @bo_va: requested BO and VM object |
1021 | * @clear: if true clear the entries |
1022 | * |
1023 | * Fill in the page table entries for @bo_va. |
1024 | * |
1025 | * Returns: |
1026 | * 0 for success, -EINVAL for failure. |
1027 | */ |
1028 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, |
1029 | bool_Bool clear) |
1030 | { |
1031 | struct amdgpu_bo *bo = bo_va->base.bo; |
1032 | struct amdgpu_vm *vm = bo_va->base.vm; |
1033 | struct amdgpu_bo_va_mapping *mapping; |
1034 | dma_addr_t *pages_addr = NULL((void *)0); |
1035 | struct ttm_resource *mem; |
1036 | struct dma_fence **last_update; |
1037 | bool_Bool flush_tlb = clear; |
1038 | struct dma_resv *resv; |
1039 | uint64_t vram_base; |
1040 | uint64_t flags; |
1041 | int r; |
1042 | |
1043 | if (clear || !bo) { |
1044 | mem = NULL((void *)0); |
1045 | resv = vm->root.bo->tbo.base.resv; |
1046 | } else { |
1047 | struct drm_gem_object *obj = &bo->tbo.base; |
Value stored to 'obj' during its initialization is never read | |
1048 | |
1049 | resv = bo->tbo.base.resv; |
1050 | #ifdef notyet |
1051 | if (obj->import_attach && bo_va->is_xgmi) { |
1052 | struct dma_buf *dma_buf = obj->import_attach->dmabuf; |
1053 | struct drm_gem_object *gobj = dma_buf->priv; |
1054 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
1055 | |
1056 | if (abo->tbo.resource->mem_type == TTM_PL_VRAM2) |
1057 | bo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
1058 | } |
1059 | #endif |
1060 | mem = bo->tbo.resource; |
1061 | if (mem->mem_type == TTM_PL_TT1 || |
1062 | mem->mem_type == AMDGPU_PL_PREEMPT(3 + 3)) |
1063 | pages_addr = bo->tbo.ttm->dma_address; |
1064 | } |
1065 | |
1066 | if (bo) { |
1067 | struct amdgpu_device *bo_adev; |
1068 | |
1069 | flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); |
1070 | |
1071 | if (amdgpu_bo_encrypted(bo)) |
1072 | flags |= AMDGPU_PTE_TMZ(1ULL << 3); |
1073 | |
1074 | bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1075 | vram_base = bo_adev->vm_manager.vram_base_offset; |
1076 | } else { |
1077 | flags = 0x0; |
1078 | vram_base = 0; |
1079 | } |
1080 | |
1081 | if (clear || (bo && bo->tbo.base.resv == |
1082 | vm->root.bo->tbo.base.resv)) |
1083 | last_update = &vm->last_update; |
1084 | else |
1085 | last_update = &bo_va->last_pt_update; |
1086 | |
1087 | if (!clear && bo_va->base.moved) { |
1088 | flush_tlb = true1; |
1089 | list_splice_init(&bo_va->valids, &bo_va->invalids); |
1090 | |
1091 | } else if (bo_va->cleared != clear) { |
1092 | list_splice_init(&bo_va->valids, &bo_va->invalids); |
1093 | } |
1094 | |
1095 | list_for_each_entry(mapping, &bo_va->invalids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->invalids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> invalids); mapping = ({ const __typeof( ((__typeof(*mapping) * )0)->list ) *__mptr = (mapping->list.next); (__typeof(* mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping ), list) );})) { |
1096 | uint64_t update_flags = flags; |
1097 | |
1098 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here |
1099 | * but in case of something, we filter the flags in first place |
1100 | */ |
1101 | if (!(mapping->flags & AMDGPU_PTE_READABLE(1ULL << 5))) |
1102 | update_flags &= ~AMDGPU_PTE_READABLE(1ULL << 5); |
1103 | if (!(mapping->flags & AMDGPU_PTE_WRITEABLE(1ULL << 6))) |
1104 | update_flags &= ~AMDGPU_PTE_WRITEABLE(1ULL << 6); |
1105 | |
1106 | /* Apply ASIC specific mapping flags */ |
1107 | amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags)(adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (& update_flags)); |
1108 | |
1109 | trace_amdgpu_vm_bo_update(mapping); |
1110 | |
1111 | r = amdgpu_vm_update_range(adev, vm, false0, false0, flush_tlb, |
1112 | resv, mapping->start, mapping->last, |
1113 | update_flags, mapping->offset, |
1114 | vram_base, mem, pages_addr, |
1115 | last_update); |
1116 | if (r) |
1117 | return r; |
1118 | } |
1119 | |
1120 | /* If the BO is not in its preferred location add it back to |
1121 | * the evicted list so that it gets validated again on the |
1122 | * next command submission. |
1123 | */ |
1124 | if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { |
1125 | uint32_t mem_type = bo->tbo.resource->mem_type; |
1126 | |
1127 | if (!(bo->preferred_domains & |
1128 | amdgpu_mem_type_to_domain(mem_type))) |
1129 | amdgpu_vm_bo_evicted(&bo_va->base); |
1130 | else |
1131 | amdgpu_vm_bo_idle(&bo_va->base); |
1132 | } else { |
1133 | amdgpu_vm_bo_done(&bo_va->base); |
1134 | } |
1135 | |
1136 | list_splice_init(&bo_va->invalids, &bo_va->valids); |
1137 | bo_va->cleared = clear; |
1138 | bo_va->base.moved = false0; |
1139 | |
1140 | if (trace_amdgpu_vm_bo_mapping_enabled()) { |
1141 | list_for_each_entry(mapping, &bo_va->valids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->valids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> valids); mapping = ({ const __typeof( ((__typeof(*mapping) *) 0)->list ) *__mptr = (mapping->list.next); (__typeof(*mapping ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping), list) );})) |
1142 | trace_amdgpu_vm_bo_mapping(mapping); |
1143 | } |
1144 | |
1145 | return 0; |
1146 | } |
1147 | |
1148 | /** |
1149 | * amdgpu_vm_update_prt_state - update the global PRT state |
1150 | * |
1151 | * @adev: amdgpu_device pointer |
1152 | */ |
1153 | static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) |
1154 | { |
1155 | unsigned long flags; |
1156 | bool_Bool enable; |
1157 | |
1158 | spin_lock_irqsave(&adev->vm_manager.prt_lock, flags)do { flags = 0; mtx_enter(&adev->vm_manager.prt_lock); } while (0); |
1159 | enable = !!atomic_read(&adev->vm_manager.num_prt_users)({ typeof(*(&adev->vm_manager.num_prt_users)) __tmp = * (volatile typeof(*(&adev->vm_manager.num_prt_users)) * )&(*(&adev->vm_manager.num_prt_users)); membar_datadep_consumer (); __tmp; }); |
1160 | adev->gmc.gmc_funcs->set_prt(adev, enable); |
1161 | spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags)do { (void)(flags); mtx_leave(&adev->vm_manager.prt_lock ); } while (0); |
1162 | } |
1163 | |
1164 | /** |
1165 | * amdgpu_vm_prt_get - add a PRT user |
1166 | * |
1167 | * @adev: amdgpu_device pointer |
1168 | */ |
1169 | static void amdgpu_vm_prt_get(struct amdgpu_device *adev) |
1170 | { |
1171 | if (!adev->gmc.gmc_funcs->set_prt) |
1172 | return; |
1173 | |
1174 | if (atomic_inc_return(&adev->vm_manager.num_prt_users)__sync_add_and_fetch((&adev->vm_manager.num_prt_users) , 1) == 1) |
1175 | amdgpu_vm_update_prt_state(adev); |
1176 | } |
1177 | |
1178 | /** |
1179 | * amdgpu_vm_prt_put - drop a PRT user |
1180 | * |
1181 | * @adev: amdgpu_device pointer |
1182 | */ |
1183 | static void amdgpu_vm_prt_put(struct amdgpu_device *adev) |
1184 | { |
1185 | if (atomic_dec_return(&adev->vm_manager.num_prt_users)__sync_sub_and_fetch((&adev->vm_manager.num_prt_users) , 1) == 0) |
1186 | amdgpu_vm_update_prt_state(adev); |
1187 | } |
1188 | |
1189 | /** |
1190 | * amdgpu_vm_prt_cb - callback for updating the PRT status |
1191 | * |
1192 | * @fence: fence for the callback |
1193 | * @_cb: the callback function |
1194 | */ |
1195 | static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) |
1196 | { |
1197 | struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb)({ const __typeof( ((struct amdgpu_prt_cb *)0)->cb ) *__mptr = (_cb); (struct amdgpu_prt_cb *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_prt_cb, cb) );}); |
1198 | |
1199 | amdgpu_vm_prt_put(cb->adev); |
1200 | kfree(cb); |
1201 | } |
1202 | |
1203 | /** |
1204 | * amdgpu_vm_add_prt_cb - add callback for updating the PRT status |
1205 | * |
1206 | * @adev: amdgpu_device pointer |
1207 | * @fence: fence for the callback |
1208 | */ |
1209 | static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, |
1210 | struct dma_fence *fence) |
1211 | { |
1212 | struct amdgpu_prt_cb *cb; |
1213 | |
1214 | if (!adev->gmc.gmc_funcs->set_prt) |
1215 | return; |
1216 | |
1217 | cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL(0x0001 | 0x0004)); |
1218 | if (!cb) { |
1219 | /* Last resort when we are OOM */ |
1220 | if (fence) |
1221 | dma_fence_wait(fence, false0); |
1222 | |
1223 | amdgpu_vm_prt_put(adev); |
1224 | } else { |
1225 | cb->adev = adev; |
1226 | if (!fence || dma_fence_add_callback(fence, &cb->cb, |
1227 | amdgpu_vm_prt_cb)) |
1228 | amdgpu_vm_prt_cb(fence, &cb->cb); |
1229 | } |
1230 | } |
1231 | |
1232 | /** |
1233 | * amdgpu_vm_free_mapping - free a mapping |
1234 | * |
1235 | * @adev: amdgpu_device pointer |
1236 | * @vm: requested vm |
1237 | * @mapping: mapping to be freed |
1238 | * @fence: fence of the unmap operation |
1239 | * |
1240 | * Free a mapping and make sure we decrease the PRT usage count if applicable. |
1241 | */ |
1242 | static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, |
1243 | struct amdgpu_vm *vm, |
1244 | struct amdgpu_bo_va_mapping *mapping, |
1245 | struct dma_fence *fence) |
1246 | { |
1247 | if (mapping->flags & AMDGPU_PTE_PRT(1ULL << 51)) |
1248 | amdgpu_vm_add_prt_cb(adev, fence); |
1249 | kfree(mapping); |
1250 | } |
1251 | |
1252 | /** |
1253 | * amdgpu_vm_prt_fini - finish all prt mappings |
1254 | * |
1255 | * @adev: amdgpu_device pointer |
1256 | * @vm: requested vm |
1257 | * |
1258 | * Register a cleanup callback to disable PRT support after VM dies. |
1259 | */ |
1260 | static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
1261 | { |
1262 | struct dma_resv *resv = vm->root.bo->tbo.base.resv; |
1263 | struct dma_resv_iter cursor; |
1264 | struct dma_fence *fence; |
1265 | |
1266 | dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence)for (dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP ), fence = dma_resv_iter_first(&cursor); fence; fence = dma_resv_iter_next (&cursor)) { |
1267 | /* Add a callback for each fence in the reservation object */ |
1268 | amdgpu_vm_prt_get(adev); |
1269 | amdgpu_vm_add_prt_cb(adev, fence); |
1270 | } |
1271 | } |
1272 | |
1273 | /** |
1274 | * amdgpu_vm_clear_freed - clear freed BOs in the PT |
1275 | * |
1276 | * @adev: amdgpu_device pointer |
1277 | * @vm: requested vm |
1278 | * @fence: optional resulting fence (unchanged if no work needed to be done |
1279 | * or if an error occurred) |
1280 | * |
1281 | * Make sure all freed BOs are cleared in the PT. |
1282 | * PTs have to be reserved and mutex must be locked! |
1283 | * |
1284 | * Returns: |
1285 | * 0 for success. |
1286 | * |
1287 | */ |
1288 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, |
1289 | struct amdgpu_vm *vm, |
1290 | struct dma_fence **fence) |
1291 | { |
1292 | struct dma_resv *resv = vm->root.bo->tbo.base.resv; |
1293 | struct amdgpu_bo_va_mapping *mapping; |
1294 | uint64_t init_pte_value = 0; |
1295 | struct dma_fence *f = NULL((void *)0); |
1296 | int r; |
1297 | |
1298 | while (!list_empty(&vm->freed)) { |
1299 | mapping = list_first_entry(&vm->freed,({ const __typeof( ((struct amdgpu_bo_va_mapping *)0)->list ) *__mptr = ((&vm->freed)->next); (struct amdgpu_bo_va_mapping *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va_mapping , list) );}) |
1300 | struct amdgpu_bo_va_mapping, list)({ const __typeof( ((struct amdgpu_bo_va_mapping *)0)->list ) *__mptr = ((&vm->freed)->next); (struct amdgpu_bo_va_mapping *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va_mapping , list) );}); |
1301 | list_del(&mapping->list); |
1302 | |
1303 | if (vm->pte_support_ats && |
1304 | mapping->start < AMDGPU_GMC_HOLE_START0x0000800000000000ULL) |
1305 | init_pte_value = AMDGPU_PTE_DEFAULT_ATC((1ULL << 1) | (1ULL << 2) | (1ULL << 4) | ( 1ULL << 5) | (1ULL << 6) | ((uint64_t)(2) << 57)); |
1306 | |
1307 | r = amdgpu_vm_update_range(adev, vm, false0, false0, true1, resv, |
1308 | mapping->start, mapping->last, |
1309 | init_pte_value, 0, 0, NULL((void *)0), NULL((void *)0), |
1310 | &f); |
1311 | amdgpu_vm_free_mapping(adev, vm, mapping, f); |
1312 | if (r) { |
1313 | dma_fence_put(f); |
1314 | return r; |
1315 | } |
1316 | } |
1317 | |
1318 | if (fence && f) { |
1319 | dma_fence_put(*fence); |
1320 | *fence = f; |
1321 | } else { |
1322 | dma_fence_put(f); |
1323 | } |
1324 | |
1325 | return 0; |
1326 | |
1327 | } |
1328 | |
1329 | /** |
1330 | * amdgpu_vm_handle_moved - handle moved BOs in the PT |
1331 | * |
1332 | * @adev: amdgpu_device pointer |
1333 | * @vm: requested vm |
1334 | * |
1335 | * Make sure all BOs which are moved are updated in the PTs. |
1336 | * |
1337 | * Returns: |
1338 | * 0 for success. |
1339 | * |
1340 | * PTs have to be reserved! |
1341 | */ |
1342 | int amdgpu_vm_handle_moved(struct amdgpu_device *adev, |
1343 | struct amdgpu_vm *vm) |
1344 | { |
1345 | struct amdgpu_bo_va *bo_va; |
1346 | struct dma_resv *resv; |
1347 | bool_Bool clear; |
1348 | int r; |
1349 | |
1350 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
1351 | while (!list_empty(&vm->moved)) { |
1352 | bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,({ const __typeof( ((struct amdgpu_bo_va *)0)->base.vm_status ) *__mptr = ((&vm->moved)->next); (struct amdgpu_bo_va *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va, base.vm_status) );}) |
1353 | base.vm_status)({ const __typeof( ((struct amdgpu_bo_va *)0)->base.vm_status ) *__mptr = ((&vm->moved)->next); (struct amdgpu_bo_va *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va, base.vm_status) );}); |
1354 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
1355 | |
1356 | /* Per VM BOs never need to bo cleared in the page tables */ |
1357 | r = amdgpu_vm_bo_update(adev, bo_va, false0); |
1358 | if (r) |
1359 | return r; |
1360 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
1361 | } |
1362 | |
1363 | while (!list_empty(&vm->invalidated)) { |
1364 | bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,({ const __typeof( ((struct amdgpu_bo_va *)0)->base.vm_status ) *__mptr = ((&vm->invalidated)->next); (struct amdgpu_bo_va *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va, base.vm_status) );}) |
1365 | base.vm_status)({ const __typeof( ((struct amdgpu_bo_va *)0)->base.vm_status ) *__mptr = ((&vm->invalidated)->next); (struct amdgpu_bo_va *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_va, base.vm_status) );}); |
1366 | resv = bo_va->base.bo->tbo.base.resv; |
1367 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
1368 | |
1369 | /* Try to reserve the BO to avoid clearing its ptes */ |
1370 | if (!amdgpu_vm_debug && dma_resv_trylock(resv)) |
1371 | clear = false0; |
1372 | /* Somebody else is using the BO right now */ |
1373 | else |
1374 | clear = true1; |
1375 | |
1376 | r = amdgpu_vm_bo_update(adev, bo_va, clear); |
1377 | if (r) |
1378 | return r; |
1379 | |
1380 | if (!clear) |
1381 | dma_resv_unlock(resv); |
1382 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
1383 | } |
1384 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
1385 | |
1386 | return 0; |
1387 | } |
1388 | |
1389 | /** |
1390 | * amdgpu_vm_bo_add - add a bo to a specific vm |
1391 | * |
1392 | * @adev: amdgpu_device pointer |
1393 | * @vm: requested vm |
1394 | * @bo: amdgpu buffer object |
1395 | * |
1396 | * Add @bo into the requested vm. |
1397 | * Add @bo to the list of bos associated with the vm |
1398 | * |
1399 | * Returns: |
1400 | * Newly added bo_va or NULL for failure |
1401 | * |
1402 | * Object has to be reserved! |
1403 | */ |
1404 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, |
1405 | struct amdgpu_vm *vm, |
1406 | struct amdgpu_bo *bo) |
1407 | { |
1408 | struct amdgpu_bo_va *bo_va; |
1409 | |
1410 | bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL(0x0001 | 0x0004)); |
1411 | if (bo_va == NULL((void *)0)) { |
1412 | return NULL((void *)0); |
1413 | } |
1414 | amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); |
1415 | |
1416 | bo_va->ref_count = 1; |
1417 | bo_va->last_pt_update = dma_fence_get_stub(); |
1418 | INIT_LIST_HEAD(&bo_va->valids); |
1419 | INIT_LIST_HEAD(&bo_va->invalids); |
1420 | |
1421 | if (!bo) |
1422 | return bo_va; |
1423 | |
1424 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
1425 | if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { |
1426 | bo_va->is_xgmi = true1; |
1427 | /* Power up XGMI if it can be potentially used */ |
1428 | amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); |
1429 | } |
1430 | |
1431 | return bo_va; |
1432 | } |
1433 | |
1434 | |
1435 | /** |
1436 | * amdgpu_vm_bo_insert_map - insert a new mapping |
1437 | * |
1438 | * @adev: amdgpu_device pointer |
1439 | * @bo_va: bo_va to store the address |
1440 | * @mapping: the mapping to insert |
1441 | * |
1442 | * Insert a new mapping into all structures. |
1443 | */ |
1444 | static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, |
1445 | struct amdgpu_bo_va *bo_va, |
1446 | struct amdgpu_bo_va_mapping *mapping) |
1447 | { |
1448 | struct amdgpu_vm *vm = bo_va->base.vm; |
1449 | struct amdgpu_bo *bo = bo_va->base.bo; |
1450 | |
1451 | mapping->bo_va = bo_va; |
1452 | list_add(&mapping->list, &bo_va->invalids); |
1453 | amdgpu_vm_it_insert(mapping, &vm->va); |
1454 | |
1455 | if (mapping->flags & AMDGPU_PTE_PRT(1ULL << 51)) |
1456 | amdgpu_vm_prt_get(adev); |
1457 | |
1458 | if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && |
1459 | !bo_va->base.moved) { |
1460 | amdgpu_vm_bo_moved(&bo_va->base); |
1461 | } |
1462 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
1463 | } |
1464 | |
1465 | /** |
1466 | * amdgpu_vm_bo_map - map bo inside a vm |
1467 | * |
1468 | * @adev: amdgpu_device pointer |
1469 | * @bo_va: bo_va to store the address |
1470 | * @saddr: where to map the BO |
1471 | * @offset: requested offset in the BO |
1472 | * @size: BO size in bytes |
1473 | * @flags: attributes of pages (read/write/valid/etc.) |
1474 | * |
1475 | * Add a mapping of the BO at the specefied addr into the VM. |
1476 | * |
1477 | * Returns: |
1478 | * 0 for success, error for failure. |
1479 | * |
1480 | * Object has to be reserved and unreserved outside! |
1481 | */ |
1482 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
1483 | struct amdgpu_bo_va *bo_va, |
1484 | uint64_t saddr, uint64_t offset, |
1485 | uint64_t size, uint64_t flags) |
1486 | { |
1487 | struct amdgpu_bo_va_mapping *mapping, *tmp; |
1488 | struct amdgpu_bo *bo = bo_va->base.bo; |
1489 | struct amdgpu_vm *vm = bo_va->base.vm; |
1490 | uint64_t eaddr; |
1491 | |
1492 | /* validate the parameters */ |
1493 | if (saddr & ~LINUX_PAGE_MASK(~((1 << 12) - 1)) || offset & ~LINUX_PAGE_MASK(~((1 << 12) - 1)) || size & ~LINUX_PAGE_MASK(~((1 << 12) - 1))) |
1494 | return -EINVAL22; |
1495 | if (saddr + size <= saddr || offset + size <= offset) |
1496 | return -EINVAL22; |
1497 | |
1498 | /* make sure object fit at this offset */ |
1499 | eaddr = saddr + size - 1; |
1500 | if ((bo && offset + size > amdgpu_bo_size(bo)) || |
1501 | (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT12)) |
1502 | return -EINVAL22; |
1503 | |
1504 | saddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1505 | eaddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1506 | |
1507 | tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); |
1508 | if (tmp) { |
1509 | /* bo and tmp overlap, invalid addr */ |
1510 | dev_err(adev->dev, "bo %p va 0x%010llx-0x%010llx conflict with "printf("drm:pid%d:%s *ERROR* " "bo %p va 0x%010llx-0x%010llx conflict with " "0x%010llx-0x%010llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , bo, saddr, eaddr, tmp->start, tmp->last + 1 ) |
1511 | "0x%010llx-0x%010llx\n", bo, saddr, eaddr,printf("drm:pid%d:%s *ERROR* " "bo %p va 0x%010llx-0x%010llx conflict with " "0x%010llx-0x%010llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , bo, saddr, eaddr, tmp->start, tmp->last + 1 ) |
1512 | tmp->start, tmp->last + 1)printf("drm:pid%d:%s *ERROR* " "bo %p va 0x%010llx-0x%010llx conflict with " "0x%010llx-0x%010llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , bo, saddr, eaddr, tmp->start, tmp->last + 1 ); |
1513 | return -EINVAL22; |
1514 | } |
1515 | |
1516 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL(0x0001 | 0x0004)); |
1517 | if (!mapping) |
1518 | return -ENOMEM12; |
1519 | |
1520 | mapping->start = saddr; |
1521 | mapping->last = eaddr; |
1522 | mapping->offset = offset; |
1523 | mapping->flags = flags; |
1524 | |
1525 | amdgpu_vm_bo_insert_map(adev, bo_va, mapping); |
1526 | |
1527 | return 0; |
1528 | } |
1529 | |
1530 | /** |
1531 | * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings |
1532 | * |
1533 | * @adev: amdgpu_device pointer |
1534 | * @bo_va: bo_va to store the address |
1535 | * @saddr: where to map the BO |
1536 | * @offset: requested offset in the BO |
1537 | * @size: BO size in bytes |
1538 | * @flags: attributes of pages (read/write/valid/etc.) |
1539 | * |
1540 | * Add a mapping of the BO at the specefied addr into the VM. Replace existing |
1541 | * mappings as we do so. |
1542 | * |
1543 | * Returns: |
1544 | * 0 for success, error for failure. |
1545 | * |
1546 | * Object has to be reserved and unreserved outside! |
1547 | */ |
1548 | int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, |
1549 | struct amdgpu_bo_va *bo_va, |
1550 | uint64_t saddr, uint64_t offset, |
1551 | uint64_t size, uint64_t flags) |
1552 | { |
1553 | struct amdgpu_bo_va_mapping *mapping; |
1554 | struct amdgpu_bo *bo = bo_va->base.bo; |
1555 | uint64_t eaddr; |
1556 | int r; |
1557 | |
1558 | /* validate the parameters */ |
1559 | if (saddr & ~LINUX_PAGE_MASK(~((1 << 12) - 1)) || offset & ~LINUX_PAGE_MASK(~((1 << 12) - 1)) || size & ~LINUX_PAGE_MASK(~((1 << 12) - 1))) |
1560 | return -EINVAL22; |
1561 | if (saddr + size <= saddr || offset + size <= offset) |
1562 | return -EINVAL22; |
1563 | |
1564 | /* make sure object fit at this offset */ |
1565 | eaddr = saddr + size - 1; |
1566 | if ((bo && offset + size > amdgpu_bo_size(bo)) || |
1567 | (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT12)) |
1568 | return -EINVAL22; |
1569 | |
1570 | /* Allocate all the needed memory */ |
1571 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL(0x0001 | 0x0004)); |
1572 | if (!mapping) |
1573 | return -ENOMEM12; |
1574 | |
1575 | r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); |
1576 | if (r) { |
1577 | kfree(mapping); |
1578 | return r; |
1579 | } |
1580 | |
1581 | saddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1582 | eaddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1583 | |
1584 | mapping->start = saddr; |
1585 | mapping->last = eaddr; |
1586 | mapping->offset = offset; |
1587 | mapping->flags = flags; |
1588 | |
1589 | amdgpu_vm_bo_insert_map(adev, bo_va, mapping); |
1590 | |
1591 | return 0; |
1592 | } |
1593 | |
1594 | /** |
1595 | * amdgpu_vm_bo_unmap - remove bo mapping from vm |
1596 | * |
1597 | * @adev: amdgpu_device pointer |
1598 | * @bo_va: bo_va to remove the address from |
1599 | * @saddr: where to the BO is mapped |
1600 | * |
1601 | * Remove a mapping of the BO at the specefied addr from the VM. |
1602 | * |
1603 | * Returns: |
1604 | * 0 for success, error for failure. |
1605 | * |
1606 | * Object has to be reserved and unreserved outside! |
1607 | */ |
1608 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
1609 | struct amdgpu_bo_va *bo_va, |
1610 | uint64_t saddr) |
1611 | { |
1612 | struct amdgpu_bo_va_mapping *mapping; |
1613 | struct amdgpu_vm *vm = bo_va->base.vm; |
1614 | bool_Bool valid = true1; |
1615 | |
1616 | saddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1617 | |
1618 | list_for_each_entry(mapping, &bo_va->valids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->valids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> valids); mapping = ({ const __typeof( ((__typeof(*mapping) *) 0)->list ) *__mptr = (mapping->list.next); (__typeof(*mapping ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping), list) );})) { |
1619 | if (mapping->start == saddr) |
1620 | break; |
1621 | } |
1622 | |
1623 | if (&mapping->list == &bo_va->valids) { |
1624 | valid = false0; |
1625 | |
1626 | list_for_each_entry(mapping, &bo_va->invalids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->invalids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> invalids); mapping = ({ const __typeof( ((__typeof(*mapping) * )0)->list ) *__mptr = (mapping->list.next); (__typeof(* mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping ), list) );})) { |
1627 | if (mapping->start == saddr) |
1628 | break; |
1629 | } |
1630 | |
1631 | if (&mapping->list == &bo_va->invalids) |
1632 | return -ENOENT2; |
1633 | } |
1634 | |
1635 | list_del(&mapping->list); |
1636 | amdgpu_vm_it_remove(mapping, &vm->va); |
1637 | mapping->bo_va = NULL((void *)0); |
1638 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1639 | |
1640 | if (valid) |
1641 | list_add(&mapping->list, &vm->freed); |
1642 | else |
1643 | amdgpu_vm_free_mapping(adev, vm, mapping, |
1644 | bo_va->last_pt_update); |
1645 | |
1646 | return 0; |
1647 | } |
1648 | |
1649 | /** |
1650 | * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range |
1651 | * |
1652 | * @adev: amdgpu_device pointer |
1653 | * @vm: VM structure to use |
1654 | * @saddr: start of the range |
1655 | * @size: size of the range |
1656 | * |
1657 | * Remove all mappings in a range, split them as appropriate. |
1658 | * |
1659 | * Returns: |
1660 | * 0 for success, error for failure. |
1661 | */ |
1662 | int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, |
1663 | struct amdgpu_vm *vm, |
1664 | uint64_t saddr, uint64_t size) |
1665 | { |
1666 | struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; |
1667 | DRM_LIST_HEAD(removed)struct list_head removed = { &(removed), &(removed) }; |
1668 | uint64_t eaddr; |
1669 | |
1670 | eaddr = saddr + size - 1; |
1671 | saddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1672 | eaddr /= AMDGPU_GPU_PAGE_SIZE4096; |
1673 | |
1674 | /* Allocate all the needed memory */ |
1675 | before = kzalloc(sizeof(*before), GFP_KERNEL(0x0001 | 0x0004)); |
1676 | if (!before) |
1677 | return -ENOMEM12; |
1678 | INIT_LIST_HEAD(&before->list); |
1679 | |
1680 | after = kzalloc(sizeof(*after), GFP_KERNEL(0x0001 | 0x0004)); |
1681 | if (!after) { |
1682 | kfree(before); |
1683 | return -ENOMEM12; |
1684 | } |
1685 | INIT_LIST_HEAD(&after->list); |
1686 | |
1687 | /* Now gather all removed mappings */ |
1688 | tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); |
1689 | while (tmp) { |
1690 | /* Remember mapping split at the start */ |
1691 | if (tmp->start < saddr) { |
1692 | before->start = tmp->start; |
1693 | before->last = saddr - 1; |
1694 | before->offset = tmp->offset; |
1695 | before->flags = tmp->flags; |
1696 | before->bo_va = tmp->bo_va; |
1697 | list_add(&before->list, &tmp->bo_va->invalids); |
1698 | } |
1699 | |
1700 | /* Remember mapping split at the end */ |
1701 | if (tmp->last > eaddr) { |
1702 | after->start = eaddr + 1; |
1703 | after->last = tmp->last; |
1704 | after->offset = tmp->offset; |
1705 | after->offset += (after->start - tmp->start) << PAGE_SHIFT12; |
1706 | after->flags = tmp->flags; |
1707 | after->bo_va = tmp->bo_va; |
1708 | list_add(&after->list, &tmp->bo_va->invalids); |
1709 | } |
1710 | |
1711 | list_del(&tmp->list); |
1712 | list_add(&tmp->list, &removed); |
1713 | |
1714 | tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); |
1715 | } |
1716 | |
1717 | /* And free them up */ |
1718 | list_for_each_entry_safe(tmp, next, &removed, list)for (tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->list ) *__mptr = ((&removed)->next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof(__typeof(*tmp), list) );}), next = ({ const __typeof( ((__typeof(*tmp) *)0)->list ) *__mptr = (tmp->list.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), list) );}); &tmp->list != (&removed ); tmp = next, next = ({ const __typeof( ((__typeof(*next) *) 0)->list ) *__mptr = (next->list.next); (__typeof(*next ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*next), list ) );})) { |
1719 | amdgpu_vm_it_remove(tmp, &vm->va); |
1720 | list_del(&tmp->list); |
1721 | |
1722 | if (tmp->start < saddr) |
1723 | tmp->start = saddr; |
1724 | if (tmp->last > eaddr) |
1725 | tmp->last = eaddr; |
1726 | |
1727 | tmp->bo_va = NULL((void *)0); |
1728 | list_add(&tmp->list, &vm->freed); |
1729 | trace_amdgpu_vm_bo_unmap(NULL((void *)0), tmp); |
1730 | } |
1731 | |
1732 | /* Insert partial mapping before the range */ |
1733 | if (!list_empty(&before->list)) { |
1734 | struct amdgpu_bo *bo = before->bo_va->base.bo; |
1735 | |
1736 | amdgpu_vm_it_insert(before, &vm->va); |
1737 | if (before->flags & AMDGPU_PTE_PRT(1ULL << 51)) |
1738 | amdgpu_vm_prt_get(adev); |
1739 | |
1740 | if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && |
1741 | !before->bo_va->base.moved) |
1742 | amdgpu_vm_bo_moved(&before->bo_va->base); |
1743 | } else { |
1744 | kfree(before); |
1745 | } |
1746 | |
1747 | /* Insert partial mapping after the range */ |
1748 | if (!list_empty(&after->list)) { |
1749 | struct amdgpu_bo *bo = after->bo_va->base.bo; |
1750 | |
1751 | amdgpu_vm_it_insert(after, &vm->va); |
1752 | if (after->flags & AMDGPU_PTE_PRT(1ULL << 51)) |
1753 | amdgpu_vm_prt_get(adev); |
1754 | |
1755 | if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && |
1756 | !after->bo_va->base.moved) |
1757 | amdgpu_vm_bo_moved(&after->bo_va->base); |
1758 | } else { |
1759 | kfree(after); |
1760 | } |
1761 | |
1762 | return 0; |
1763 | } |
1764 | |
1765 | /** |
1766 | * amdgpu_vm_bo_lookup_mapping - find mapping by address |
1767 | * |
1768 | * @vm: the requested VM |
1769 | * @addr: the address |
1770 | * |
1771 | * Find a mapping by it's address. |
1772 | * |
1773 | * Returns: |
1774 | * The amdgpu_bo_va_mapping matching for addr or NULL |
1775 | * |
1776 | */ |
1777 | struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, |
1778 | uint64_t addr) |
1779 | { |
1780 | return amdgpu_vm_it_iter_first(&vm->va, addr, addr); |
1781 | } |
1782 | |
1783 | /** |
1784 | * amdgpu_vm_bo_trace_cs - trace all reserved mappings |
1785 | * |
1786 | * @vm: the requested vm |
1787 | * @ticket: CS ticket |
1788 | * |
1789 | * Trace all mappings of BOs reserved during a command submission. |
1790 | */ |
1791 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) |
1792 | { |
1793 | struct amdgpu_bo_va_mapping *mapping; |
1794 | |
1795 | if (!trace_amdgpu_vm_bo_cs_enabled()) |
1796 | return; |
1797 | |
1798 | for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX0xffffffffffffffffULL); mapping; |
1799 | mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX0xffffffffffffffffULL)) { |
1800 | if (mapping->bo_va && mapping->bo_va->base.bo) { |
1801 | struct amdgpu_bo *bo; |
1802 | |
1803 | bo = mapping->bo_va->base.bo; |
1804 | if (dma_resv_locking_ctx(bo->tbo.base.resv) != |
1805 | ticket) |
1806 | continue; |
1807 | } |
1808 | |
1809 | trace_amdgpu_vm_bo_cs(mapping); |
1810 | } |
1811 | } |
1812 | |
1813 | /** |
1814 | * amdgpu_vm_bo_del - remove a bo from a specific vm |
1815 | * |
1816 | * @adev: amdgpu_device pointer |
1817 | * @bo_va: requested bo_va |
1818 | * |
1819 | * Remove @bo_va->bo from the requested vm. |
1820 | * |
1821 | * Object have to be reserved! |
1822 | */ |
1823 | void amdgpu_vm_bo_del(struct amdgpu_device *adev, |
1824 | struct amdgpu_bo_va *bo_va) |
1825 | { |
1826 | struct amdgpu_bo_va_mapping *mapping, *next; |
1827 | struct amdgpu_bo *bo = bo_va->base.bo; |
1828 | struct amdgpu_vm *vm = bo_va->base.vm; |
1829 | struct amdgpu_vm_bo_base **base; |
1830 | |
1831 | dma_resv_assert_held(vm->root.bo->tbo.base.resv)do { (void)(&(vm->root.bo->tbo.base.resv)->lock. base); } while(0); |
1832 | |
1833 | if (bo) { |
1834 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
1835 | if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) |
1836 | ttm_bo_set_bulk_move(&bo->tbo, NULL((void *)0)); |
1837 | |
1838 | for (base = &bo_va->base.bo->vm_bo; *base; |
1839 | base = &(*base)->next) { |
1840 | if (*base != &bo_va->base) |
1841 | continue; |
1842 | |
1843 | *base = bo_va->base.next; |
1844 | break; |
1845 | } |
1846 | } |
1847 | |
1848 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
1849 | list_del(&bo_va->base.vm_status); |
1850 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
1851 | |
1852 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->valids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}), next = ({ const __typeof( ((__typeof(*mapping ) *)0)->list ) *__mptr = (mapping->list.next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> valids); mapping = next, next = ({ const __typeof( ((__typeof (*next) *)0)->list ) *__mptr = (next->list.next); (__typeof (*next) *)( (char *)__mptr - __builtin_offsetof(__typeof(*next ), list) );})) { |
1853 | list_del(&mapping->list); |
1854 | amdgpu_vm_it_remove(mapping, &vm->va); |
1855 | mapping->bo_va = NULL((void *)0); |
1856 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1857 | list_add(&mapping->list, &vm->freed); |
1858 | } |
1859 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&bo_va->invalids)->next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}), next = ({ const __typeof( ((__typeof(*mapping ) *)0)->list ) *__mptr = (mapping->list.next); (__typeof (*mapping) *)( (char *)__mptr - __builtin_offsetof(__typeof(* mapping), list) );}); &mapping->list != (&bo_va-> invalids); mapping = next, next = ({ const __typeof( ((__typeof (*next) *)0)->list ) *__mptr = (next->list.next); (__typeof (*next) *)( (char *)__mptr - __builtin_offsetof(__typeof(*next ), list) );})) { |
1860 | list_del(&mapping->list); |
1861 | amdgpu_vm_it_remove(mapping, &vm->va); |
1862 | amdgpu_vm_free_mapping(adev, vm, mapping, |
1863 | bo_va->last_pt_update); |
1864 | } |
1865 | |
1866 | dma_fence_put(bo_va->last_pt_update); |
1867 | |
1868 | if (bo && bo_va->is_xgmi) |
1869 | amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); |
1870 | |
1871 | kfree(bo_va); |
1872 | } |
1873 | |
1874 | /** |
1875 | * amdgpu_vm_evictable - check if we can evict a VM |
1876 | * |
1877 | * @bo: A page table of the VM. |
1878 | * |
1879 | * Check if it is possible to evict a VM. |
1880 | */ |
1881 | bool_Bool amdgpu_vm_evictable(struct amdgpu_bo *bo) |
1882 | { |
1883 | struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; |
1884 | |
1885 | /* Page tables of a destroyed VM can go away immediately */ |
1886 | if (!bo_base || !bo_base->vm) |
1887 | return true1; |
1888 | |
1889 | /* Don't evict VM page tables while they are busy */ |
1890 | if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) |
1891 | return false0; |
1892 | |
1893 | /* Try to block ongoing updates */ |
1894 | if (!amdgpu_vm_eviction_trylock(bo_base->vm)) |
1895 | return false0; |
1896 | |
1897 | /* Don't evict VM page tables while they are updated */ |
1898 | if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { |
1899 | amdgpu_vm_eviction_unlock(bo_base->vm); |
1900 | return false0; |
1901 | } |
1902 | |
1903 | bo_base->vm->evicting = true1; |
1904 | amdgpu_vm_eviction_unlock(bo_base->vm); |
1905 | return true1; |
1906 | } |
1907 | |
1908 | /** |
1909 | * amdgpu_vm_bo_invalidate - mark the bo as invalid |
1910 | * |
1911 | * @adev: amdgpu_device pointer |
1912 | * @bo: amdgpu buffer object |
1913 | * @evicted: is the BO evicted |
1914 | * |
1915 | * Mark @bo as invalid. |
1916 | */ |
1917 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, |
1918 | struct amdgpu_bo *bo, bool_Bool evicted) |
1919 | { |
1920 | struct amdgpu_vm_bo_base *bo_base; |
1921 | |
1922 | /* shadow bo doesn't have bo base, its validation needs its parent */ |
1923 | if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) |
1924 | bo = bo->parent; |
1925 | |
1926 | for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { |
1927 | struct amdgpu_vm *vm = bo_base->vm; |
1928 | |
1929 | if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { |
1930 | amdgpu_vm_bo_evicted(bo_base); |
1931 | continue; |
1932 | } |
1933 | |
1934 | if (bo_base->moved) |
1935 | continue; |
1936 | bo_base->moved = true1; |
1937 | |
1938 | if (bo->tbo.type == ttm_bo_type_kernel) |
1939 | amdgpu_vm_bo_relocated(bo_base); |
1940 | else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) |
1941 | amdgpu_vm_bo_moved(bo_base); |
1942 | else |
1943 | amdgpu_vm_bo_invalidated(bo_base); |
1944 | } |
1945 | } |
1946 | |
1947 | /** |
1948 | * amdgpu_vm_get_block_size - calculate VM page table size as power of two |
1949 | * |
1950 | * @vm_size: VM size |
1951 | * |
1952 | * Returns: |
1953 | * VM page table as power of two |
1954 | */ |
1955 | static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) |
1956 | { |
1957 | /* Total bits covered by PD + PTs */ |
1958 | unsigned bits = ilog2(vm_size)((sizeof(vm_size) <= 4) ? (fls(vm_size) - 1) : (flsl(vm_size ) - 1)) + 18; |
1959 | |
1960 | /* Make sure the PD is 4K in size up to 8GB address space. |
1961 | Above that split equal between PD and PTs */ |
1962 | if (vm_size <= 8) |
1963 | return (bits - 9); |
1964 | else |
1965 | return ((bits + 3) / 2); |
1966 | } |
1967 | |
1968 | /** |
1969 | * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size |
1970 | * |
1971 | * @adev: amdgpu_device pointer |
1972 | * @min_vm_size: the minimum vm size in GB if it's set auto |
1973 | * @fragment_size_default: Default PTE fragment size |
1974 | * @max_level: max VMPT level |
1975 | * @max_bits: max address space size in bits |
1976 | * |
1977 | */ |
1978 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
1979 | uint32_t fragment_size_default, unsigned max_level, |
1980 | unsigned max_bits) |
1981 | { |
1982 | unsigned int max_size = 1 << (max_bits - 30); |
1983 | unsigned int vm_size; |
1984 | uint64_t tmp; |
1985 | |
1986 | /* adjust vm size first */ |
1987 | if (amdgpu_vm_size != -1) { |
1988 | vm_size = amdgpu_vm_size; |
1989 | if (vm_size > max_size) { |
1990 | dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",printf("drm:pid%d:%s *WARNING* " "VM size (%d) too large, max is %u GB\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , amdgpu_vm_size , max_size) |
1991 | amdgpu_vm_size, max_size)printf("drm:pid%d:%s *WARNING* " "VM size (%d) too large, max is %u GB\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , amdgpu_vm_size , max_size); |
1992 | vm_size = max_size; |
1993 | } |
1994 | } else { |
1995 | #ifdef __linux__ |
1996 | struct sysinfo si; |
1997 | #endif |
1998 | unsigned int phys_ram_gb; |
1999 | |
2000 | /* Optimal VM size depends on the amount of physical |
2001 | * RAM available. Underlying requirements and |
2002 | * assumptions: |
2003 | * |
2004 | * - Need to map system memory and VRAM from all GPUs |
2005 | * - VRAM from other GPUs not known here |
2006 | * - Assume VRAM <= system memory |
2007 | * - On GFX8 and older, VM space can be segmented for |
2008 | * different MTYPEs |
2009 | * - Need to allow room for fragmentation, guard pages etc. |
2010 | * |
2011 | * This adds up to a rough guess of system memory x3. |
2012 | * Round up to power of two to maximize the available |
2013 | * VM size with the given page table size. |
2014 | */ |
2015 | #ifdef __linux__ |
2016 | si_meminfo(&si); |
2017 | phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + |
2018 | (1 << 30) - 1) >> 30; |
2019 | #else |
2020 | phys_ram_gb = ((uint64_t)ptoa(physmem)((paddr_t)(physmem) << 12) + |
2021 | (1 << 30) - 1) >> 30; |
2022 | #endif |
2023 | vm_size = roundup_pow_of_two( |
2024 | min(max(phys_ram_gb * 3, min_vm_size), max_size)((((((phys_ram_gb * 3)>(min_vm_size))?(phys_ram_gb * 3):(min_vm_size )))<(max_size))?((((phys_ram_gb * 3)>(min_vm_size))?(phys_ram_gb * 3):(min_vm_size))):(max_size))); |
2025 | } |
2026 | |
2027 | adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; |
2028 | |
2029 | tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); |
2030 | if (amdgpu_vm_block_size != -1) |
2031 | tmp >>= amdgpu_vm_block_size - 9; |
2032 | tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9)(((fls64(tmp) - 1) + ((9) - 1)) / (9)) - 1; |
2033 | adev->vm_manager.num_level = min(max_level, (unsigned)tmp)(((max_level)<((unsigned)tmp))?(max_level):((unsigned)tmp) ); |
2034 | switch (adev->vm_manager.num_level) { |
2035 | case 3: |
2036 | adev->vm_manager.root_level = AMDGPU_VM_PDB2; |
2037 | break; |
2038 | case 2: |
2039 | adev->vm_manager.root_level = AMDGPU_VM_PDB1; |
2040 | break; |
2041 | case 1: |
2042 | adev->vm_manager.root_level = AMDGPU_VM_PDB0; |
2043 | break; |
2044 | default: |
2045 | dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n")printf("drm:pid%d:%s *ERROR* " "VMPT only supports 2~4+1 levels\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2046 | } |
2047 | /* block size depends on vm size and hw setup*/ |
2048 | if (amdgpu_vm_block_size != -1) |
2049 | adev->vm_manager.block_size = |
2050 | min((unsigned)amdgpu_vm_block_size, max_bits((((unsigned)amdgpu_vm_block_size)<(max_bits - 12 - 9 * adev ->vm_manager.num_level))?((unsigned)amdgpu_vm_block_size): (max_bits - 12 - 9 * adev->vm_manager.num_level)) |
2051 | - AMDGPU_GPU_PAGE_SHIFT((((unsigned)amdgpu_vm_block_size)<(max_bits - 12 - 9 * adev ->vm_manager.num_level))?((unsigned)amdgpu_vm_block_size): (max_bits - 12 - 9 * adev->vm_manager.num_level)) |
2052 | - 9 * adev->vm_manager.num_level)((((unsigned)amdgpu_vm_block_size)<(max_bits - 12 - 9 * adev ->vm_manager.num_level))?((unsigned)amdgpu_vm_block_size): (max_bits - 12 - 9 * adev->vm_manager.num_level)); |
2053 | else if (adev->vm_manager.num_level > 1) |
2054 | adev->vm_manager.block_size = 9; |
2055 | else |
2056 | adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); |
2057 | |
2058 | if (amdgpu_vm_fragment_size == -1) |
2059 | adev->vm_manager.fragment_size = fragment_size_default; |
2060 | else |
2061 | adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; |
2062 | |
2063 | DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",printk("\0016" "[" "drm" "] " "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n" , vm_size, adev->vm_manager.num_level + 1, adev->vm_manager .block_size, adev->vm_manager.fragment_size) |
2064 | vm_size, adev->vm_manager.num_level + 1,printk("\0016" "[" "drm" "] " "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n" , vm_size, adev->vm_manager.num_level + 1, adev->vm_manager .block_size, adev->vm_manager.fragment_size) |
2065 | adev->vm_manager.block_size,printk("\0016" "[" "drm" "] " "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n" , vm_size, adev->vm_manager.num_level + 1, adev->vm_manager .block_size, adev->vm_manager.fragment_size) |
2066 | adev->vm_manager.fragment_size)printk("\0016" "[" "drm" "] " "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n" , vm_size, adev->vm_manager.num_level + 1, adev->vm_manager .block_size, adev->vm_manager.fragment_size); |
2067 | } |
2068 | |
2069 | /** |
2070 | * amdgpu_vm_wait_idle - wait for the VM to become idle |
2071 | * |
2072 | * @vm: VM object to wait for |
2073 | * @timeout: timeout to wait for VM to become idle |
2074 | */ |
2075 | long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) |
2076 | { |
2077 | timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, |
2078 | DMA_RESV_USAGE_BOOKKEEP, |
2079 | true1, timeout); |
2080 | if (timeout <= 0) |
2081 | return timeout; |
2082 | |
2083 | return dma_fence_wait_timeout(vm->last_unlocked, true1, timeout); |
2084 | } |
2085 | |
2086 | /** |
2087 | * amdgpu_vm_init - initialize a vm instance |
2088 | * |
2089 | * @adev: amdgpu_device pointer |
2090 | * @vm: requested vm |
2091 | * |
2092 | * Init @vm fields. |
2093 | * |
2094 | * Returns: |
2095 | * 0 for success, error for failure. |
2096 | */ |
2097 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
2098 | { |
2099 | struct amdgpu_bo *root_bo; |
2100 | struct amdgpu_bo_vm *root; |
2101 | int r, i; |
2102 | |
2103 | vm->va = RB_ROOT_CACHED(struct rb_root_cached) { ((void *)0) }; |
2104 | for (i = 0; i < AMDGPU_MAX_VMHUBS3; i++) |
2105 | vm->reserved_vmid[i] = NULL((void *)0); |
2106 | INIT_LIST_HEAD(&vm->evicted); |
2107 | INIT_LIST_HEAD(&vm->relocated); |
2108 | INIT_LIST_HEAD(&vm->moved); |
2109 | INIT_LIST_HEAD(&vm->idle); |
2110 | INIT_LIST_HEAD(&vm->invalidated); |
2111 | mtx_init(&vm->status_lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&vm-> status_lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); |
2112 | INIT_LIST_HEAD(&vm->freed); |
2113 | INIT_LIST_HEAD(&vm->done); |
2114 | INIT_LIST_HEAD(&vm->pt_freed); |
2115 | INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); |
2116 | |
2117 | /* create scheduler entities for page table updates */ |
2118 | r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, |
2119 | adev->vm_manager.vm_pte_scheds, |
2120 | adev->vm_manager.vm_pte_num_scheds, NULL((void *)0)); |
2121 | if (r) |
2122 | return r; |
2123 | |
2124 | r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, |
2125 | adev->vm_manager.vm_pte_scheds, |
2126 | adev->vm_manager.vm_pte_num_scheds, NULL((void *)0)); |
2127 | if (r) |
2128 | goto error_free_immediate; |
2129 | |
2130 | vm->pte_support_ats = false0; |
2131 | vm->is_compute_context = false0; |
2132 | |
2133 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
2134 | AMDGPU_VM_USE_CPU_FOR_GFX(1 << 0)); |
2135 | |
2136 | DRM_DEBUG_DRIVER("VM update mode is %s\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "VM update mode is %s\n" , vm->use_cpu_for_update ? "CPU" : "SDMA") |
2137 | vm->use_cpu_for_update ? "CPU" : "SDMA")___drm_dbg(((void *)0), DRM_UT_DRIVER, "VM update mode is %s\n" , vm->use_cpu_for_update ? "CPU" : "SDMA"); |
2138 | WARN_ONCE((vm->use_cpu_for_update &&({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
2139 | !amdgpu_gmc_vram_full_visible(&adev->gmc)),({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
2140 | "CPU update of VM recommended only for large BAR system\n")({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
2141 | |
2142 | if (vm->use_cpu_for_update) |
2143 | vm->update_funcs = &amdgpu_vm_cpu_funcs; |
2144 | else |
2145 | vm->update_funcs = &amdgpu_vm_sdma_funcs; |
2146 | |
2147 | vm->last_update = dma_fence_get_stub(); |
2148 | vm->last_unlocked = dma_fence_get_stub(); |
2149 | vm->last_tlb_flush = dma_fence_get_stub(); |
2150 | |
2151 | rw_init(&vm->eviction_lock, "avmev")_rw_init_flags(&vm->eviction_lock, "avmev", 0, ((void * )0)); |
2152 | vm->evicting = false0; |
2153 | |
2154 | r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, |
2155 | false0, &root); |
2156 | if (r) |
2157 | goto error_free_delayed; |
2158 | root_bo = &root->bo; |
2159 | r = amdgpu_bo_reserve(root_bo, true1); |
2160 | if (r) |
2161 | goto error_free_root; |
2162 | |
2163 | r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); |
2164 | if (r) |
2165 | goto error_unreserve; |
2166 | |
2167 | amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); |
2168 | |
2169 | r = amdgpu_vm_pt_clear(adev, vm, root, false0); |
2170 | if (r) |
2171 | goto error_unreserve; |
2172 | |
2173 | amdgpu_bo_unreserve(vm->root.bo); |
2174 | |
2175 | #ifdef __linux__ |
2176 | INIT_KFIFO(vm->faults); |
2177 | #else |
2178 | SIMPLEQ_INIT(&vm->faults)do { (&vm->faults)->sqh_first = ((void *)0); (& vm->faults)->sqh_last = &(&vm->faults)->sqh_first ; } while (0); |
2179 | #endif |
2180 | |
2181 | return 0; |
2182 | |
2183 | error_unreserve: |
2184 | amdgpu_bo_unreserve(vm->root.bo); |
2185 | |
2186 | error_free_root: |
2187 | amdgpu_bo_unref(&root->shadow); |
2188 | amdgpu_bo_unref(&root_bo); |
2189 | vm->root.bo = NULL((void *)0); |
2190 | |
2191 | error_free_delayed: |
2192 | dma_fence_put(vm->last_tlb_flush); |
2193 | dma_fence_put(vm->last_unlocked); |
2194 | drm_sched_entity_destroy(&vm->delayed); |
2195 | |
2196 | error_free_immediate: |
2197 | drm_sched_entity_destroy(&vm->immediate); |
2198 | |
2199 | return r; |
2200 | } |
2201 | |
2202 | /** |
2203 | * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM |
2204 | * |
2205 | * @adev: amdgpu_device pointer |
2206 | * @vm: requested vm |
2207 | * |
2208 | * This only works on GFX VMs that don't have any BOs added and no |
2209 | * page tables allocated yet. |
2210 | * |
2211 | * Changes the following VM parameters: |
2212 | * - use_cpu_for_update |
2213 | * - pte_supports_ats |
2214 | * |
2215 | * Reinitializes the page directory to reflect the changed ATS |
2216 | * setting. |
2217 | * |
2218 | * Returns: |
2219 | * 0 for success, -errno for errors. |
2220 | */ |
2221 | int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
2222 | { |
2223 | bool_Bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); |
2224 | int r; |
2225 | |
2226 | r = amdgpu_bo_reserve(vm->root.bo, true1); |
2227 | if (r) |
2228 | return r; |
2229 | |
2230 | /* Sanity checks */ |
2231 | if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { |
2232 | r = -EINVAL22; |
2233 | goto unreserve_bo; |
2234 | } |
2235 | |
2236 | /* Check if PD needs to be reinitialized and do it before |
2237 | * changing any other state, in case it fails. |
2238 | */ |
2239 | if (pte_support_ats != vm->pte_support_ats) { |
2240 | vm->pte_support_ats = pte_support_ats; |
2241 | r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((vm->root.bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_vm, bo) );}), |
2242 | false0); |
2243 | if (r) |
2244 | goto unreserve_bo; |
2245 | } |
2246 | |
2247 | /* Update VM state */ |
2248 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
2249 | AMDGPU_VM_USE_CPU_FOR_COMPUTE(1 << 1)); |
2250 | DRM_DEBUG_DRIVER("VM update mode is %s\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "VM update mode is %s\n" , vm->use_cpu_for_update ? "CPU" : "SDMA") |
2251 | vm->use_cpu_for_update ? "CPU" : "SDMA")___drm_dbg(((void *)0), DRM_UT_DRIVER, "VM update mode is %s\n" , vm->use_cpu_for_update ? "CPU" : "SDMA"); |
2252 | WARN_ONCE((vm->use_cpu_for_update &&({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
2253 | !amdgpu_gmc_vram_full_visible(&adev->gmc)),({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
2254 | "CPU update of VM recommended only for large BAR system\n")({ static int __warned; int __ret = !!((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)) ); if (__ret && !__warned) { printf("CPU update of VM recommended only for large BAR system\n" ); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
2255 | |
2256 | if (vm->use_cpu_for_update) { |
2257 | /* Sync with last SDMA update/clear before switching to CPU */ |
2258 | r = amdgpu_bo_sync_wait(vm->root.bo, |
2259 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), true1); |
2260 | if (r) |
2261 | goto unreserve_bo; |
2262 | |
2263 | vm->update_funcs = &amdgpu_vm_cpu_funcs; |
2264 | } else { |
2265 | vm->update_funcs = &amdgpu_vm_sdma_funcs; |
2266 | } |
2267 | /* |
2268 | * Make sure root PD gets mapped. As vm_update_mode could be changed |
2269 | * when turning a GFX VM into a compute VM. |
2270 | */ |
2271 | r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((vm->root.bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_vm, bo) );})); |
2272 | if (r) |
2273 | goto unreserve_bo; |
2274 | |
2275 | dma_fence_put(vm->last_update); |
2276 | vm->last_update = dma_fence_get_stub(); |
2277 | vm->is_compute_context = true1; |
2278 | |
2279 | /* Free the shadow bo for compute VM */ |
2280 | amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((vm->root.bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo_vm, bo) );})->shadow); |
2281 | |
2282 | goto unreserve_bo; |
2283 | |
2284 | unreserve_bo: |
2285 | amdgpu_bo_unreserve(vm->root.bo); |
2286 | return r; |
2287 | } |
2288 | |
2289 | /** |
2290 | * amdgpu_vm_release_compute - release a compute vm |
2291 | * @adev: amdgpu_device pointer |
2292 | * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute |
2293 | * |
2294 | * This is a correspondant of amdgpu_vm_make_compute. It decouples compute |
2295 | * pasid from vm. Compute should stop use of vm after this call. |
2296 | */ |
2297 | void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
2298 | { |
2299 | amdgpu_vm_set_pasid(adev, vm, 0); |
2300 | vm->is_compute_context = false0; |
2301 | } |
2302 | |
2303 | /** |
2304 | * amdgpu_vm_fini - tear down a vm instance |
2305 | * |
2306 | * @adev: amdgpu_device pointer |
2307 | * @vm: requested vm |
2308 | * |
2309 | * Tear down @vm. |
2310 | * Unbind the VM and remove all bos from the vm bo list |
2311 | */ |
2312 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) |
2313 | { |
2314 | struct amdgpu_bo_va_mapping *mapping, *tmp; |
2315 | bool_Bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; |
2316 | struct amdgpu_bo *root; |
2317 | unsigned long flags; |
2318 | int i; |
2319 | |
2320 | amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); |
2321 | |
2322 | flush_work(&vm->pt_free_work); |
2323 | |
2324 | root = amdgpu_bo_ref(vm->root.bo); |
2325 | amdgpu_bo_reserve(root, true1); |
2326 | amdgpu_vm_set_pasid(adev, vm, 0); |
2327 | dma_fence_wait(vm->last_unlocked, false0); |
2328 | dma_fence_put(vm->last_unlocked); |
2329 | dma_fence_wait(vm->last_tlb_flush, false0); |
2330 | /* Make sure that all fence callbacks have completed */ |
2331 | spin_lock_irqsave(vm->last_tlb_flush->lock, flags)do { flags = 0; mtx_enter(vm->last_tlb_flush->lock); } while (0); |
2332 | spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags)do { (void)(flags); mtx_leave(vm->last_tlb_flush->lock) ; } while (0); |
2333 | dma_fence_put(vm->last_tlb_flush); |
2334 | |
2335 | list_for_each_entry_safe(mapping, tmp, &vm->freed, list)for (mapping = ({ const __typeof( ((__typeof(*mapping) *)0)-> list ) *__mptr = ((&vm->freed)->next); (__typeof(*mapping ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping), list) );}), tmp = ({ const __typeof( ((__typeof(*mapping) *) 0)->list ) *__mptr = (mapping->list.next); (__typeof(*mapping ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*mapping), list) );}); &mapping->list != (&vm->freed); mapping = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->list ) *__mptr = (tmp->list.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), list) );})) { |
2336 | if (mapping->flags & AMDGPU_PTE_PRT(1ULL << 51) && prt_fini_needed) { |
2337 | amdgpu_vm_prt_fini(adev, vm); |
2338 | prt_fini_needed = false0; |
2339 | } |
2340 | |
2341 | list_del(&mapping->list); |
2342 | amdgpu_vm_free_mapping(adev, vm, mapping, NULL((void *)0)); |
2343 | } |
2344 | |
2345 | amdgpu_vm_pt_free_root(adev, vm); |
2346 | amdgpu_bo_unreserve(root); |
2347 | amdgpu_bo_unref(&root); |
2348 | WARN_ON(vm->root.bo)({ int __ret = !!(vm->root.bo); if (__ret) printf("WARNING %s failed at %s:%d\n" , "vm->root.bo", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c" , 2348); __builtin_expect(!!(__ret), 0); }); |
2349 | |
2350 | drm_sched_entity_destroy(&vm->immediate); |
2351 | drm_sched_entity_destroy(&vm->delayed); |
2352 | |
2353 | if (!RB_EMPTY_ROOT(&vm->va.rb_root)((&vm->va.rb_root)->rb_node == ((void *)0))) { |
2354 | dev_err(adev->dev, "still active bo inside vm\n")printf("drm:pid%d:%s *ERROR* " "still active bo inside vm\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2355 | } |
2356 | rbtree_postorder_for_each_entry_safe(mapping, tmp,for ((mapping) = (__rb_deepest_left((&vm->va.rb_root)-> rb_node) ? ({ const __typeof( ((__typeof(*mapping) *)0)->rb ) *__mptr = (__rb_deepest_left((&vm->va.rb_root)-> rb_node)); (__typeof(*mapping) *)( (char *)__mptr - __builtin_offsetof (__typeof(*mapping), rb) );}) : ((void *)0)); ((mapping) != ( (void *)0)) && ((tmp) = (rb_next_postorder(&mapping ->rb) ? ({ const __typeof( ((typeof(*mapping) *)0)->rb ) *__mptr = (rb_next_postorder(&mapping->rb)); (typeof( *mapping) *)( (char *)__mptr - __builtin_offsetof(typeof(*mapping ), rb) );}) : ((void *)0)), 1); (mapping) = (tmp)) |
2357 | &vm->va.rb_root, rb)for ((mapping) = (__rb_deepest_left((&vm->va.rb_root)-> rb_node) ? ({ const __typeof( ((__typeof(*mapping) *)0)->rb ) *__mptr = (__rb_deepest_left((&vm->va.rb_root)-> rb_node)); (__typeof(*mapping) *)( (char *)__mptr - __builtin_offsetof (__typeof(*mapping), rb) );}) : ((void *)0)); ((mapping) != ( (void *)0)) && ((tmp) = (rb_next_postorder(&mapping ->rb) ? ({ const __typeof( ((typeof(*mapping) *)0)->rb ) *__mptr = (rb_next_postorder(&mapping->rb)); (typeof( *mapping) *)( (char *)__mptr - __builtin_offsetof(typeof(*mapping ), rb) );}) : ((void *)0)), 1); (mapping) = (tmp)) { |
2358 | /* Don't remove the mapping here, we don't want to trigger a |
2359 | * rebalance and the tree is about to be destroyed anyway. |
2360 | */ |
2361 | list_del(&mapping->list); |
2362 | kfree(mapping); |
2363 | } |
2364 | |
2365 | dma_fence_put(vm->last_update); |
2366 | for (i = 0; i < AMDGPU_MAX_VMHUBS3; i++) |
2367 | amdgpu_vmid_free_reserved(adev, vm, i); |
2368 | } |
2369 | |
2370 | /** |
2371 | * amdgpu_vm_manager_init - init the VM manager |
2372 | * |
2373 | * @adev: amdgpu_device pointer |
2374 | * |
2375 | * Initialize the VM manager structures |
2376 | */ |
2377 | void amdgpu_vm_manager_init(struct amdgpu_device *adev) |
2378 | { |
2379 | unsigned i; |
2380 | |
2381 | /* Concurrent flushes are only possible starting with Vega10 and |
2382 | * are broken on Navi10 and Navi14. |
2383 | */ |
2384 | adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || |
2385 | adev->asic_type == CHIP_NAVI10 || |
2386 | adev->asic_type == CHIP_NAVI14); |
2387 | amdgpu_vmid_mgr_init(adev); |
2388 | |
2389 | adev->vm_manager.fence_context = |
2390 | dma_fence_context_alloc(AMDGPU_MAX_RINGS28); |
2391 | for (i = 0; i < AMDGPU_MAX_RINGS28; ++i) |
2392 | adev->vm_manager.seqno[i] = 0; |
2393 | |
2394 | mtx_init(&adev->vm_manager.prt_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&adev-> vm_manager.prt_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
2395 | atomic_set(&adev->vm_manager.num_prt_users, 0)({ typeof(*(&adev->vm_manager.num_prt_users)) __tmp = ( (0)); *(volatile typeof(*(&adev->vm_manager.num_prt_users )) *)&(*(&adev->vm_manager.num_prt_users)) = __tmp ; __tmp; }); |
2396 | |
2397 | /* If not overridden by the user, by default, only in large BAR systems |
2398 | * Compute VM tables will be updated by CPU |
2399 | */ |
2400 | #ifdef CONFIG_X86_641 |
2401 | if (amdgpu_vm_update_mode == -1) { |
2402 | /* For asic with VF MMIO access protection |
2403 | * avoid using CPU for VM table updates |
2404 | */ |
2405 | if (amdgpu_gmc_vram_full_visible(&adev->gmc) && |
2406 | !amdgpu_sriov_vf_mmio_access_protection(adev)((adev)->virt.caps & (1 << 5))) |
2407 | adev->vm_manager.vm_update_mode = |
2408 | AMDGPU_VM_USE_CPU_FOR_COMPUTE(1 << 1); |
2409 | else |
2410 | adev->vm_manager.vm_update_mode = 0; |
2411 | } else |
2412 | adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; |
2413 | #else |
2414 | adev->vm_manager.vm_update_mode = 0; |
2415 | #endif |
2416 | |
2417 | xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ4); |
2418 | } |
2419 | |
2420 | /** |
2421 | * amdgpu_vm_manager_fini - cleanup VM manager |
2422 | * |
2423 | * @adev: amdgpu_device pointer |
2424 | * |
2425 | * Cleanup the VM manager and free resources. |
2426 | */ |
2427 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) |
2428 | { |
2429 | WARN_ON(!xa_empty(&adev->vm_manager.pasids))({ int __ret = !!(!xa_empty(&adev->vm_manager.pasids)) ; if (__ret) printf("WARNING %s failed at %s:%d\n", "!xa_empty(&adev->vm_manager.pasids)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c", 2429); __builtin_expect (!!(__ret), 0); }); |
2430 | xa_destroy(&adev->vm_manager.pasids); |
2431 | |
2432 | amdgpu_vmid_mgr_fini(adev); |
2433 | } |
2434 | |
2435 | /** |
2436 | * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. |
2437 | * |
2438 | * @dev: drm device pointer |
2439 | * @data: drm_amdgpu_vm |
2440 | * @filp: drm file pointer |
2441 | * |
2442 | * Returns: |
2443 | * 0 for success, -errno for errors. |
2444 | */ |
2445 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
2446 | { |
2447 | union drm_amdgpu_vm *args = data; |
2448 | struct amdgpu_device *adev = drm_to_adev(dev); |
2449 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
2450 | long timeout = msecs_to_jiffies(2000)(((uint64_t)(2000)) * hz / 1000); |
2451 | int r; |
2452 | |
2453 | /* No valid flags defined yet */ |
2454 | if (args->in.flags) |
2455 | return -EINVAL22; |
2456 | |
2457 | switch (args->in.op) { |
2458 | case AMDGPU_VM_OP_RESERVE_VMID1: |
2459 | /* We only have requirement to reserve vmid from gfxhub */ |
2460 | r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, |
2461 | AMDGPU_GFXHUB_00); |
2462 | if (r) |
2463 | return r; |
2464 | break; |
2465 | case AMDGPU_VM_OP_UNRESERVE_VMID2: |
2466 | if (amdgpu_sriov_runtime(adev)((adev)->virt.caps & (1 << 4))) |
2467 | timeout = 8 * timeout; |
2468 | |
2469 | /* Wait vm idle to make sure the vmid set in SPM_VMID is |
2470 | * not referenced anymore. |
2471 | */ |
2472 | r = amdgpu_bo_reserve(fpriv->vm.root.bo, true1); |
2473 | if (r) |
2474 | return r; |
2475 | |
2476 | r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); |
2477 | if (r < 0) |
2478 | return r; |
2479 | |
2480 | amdgpu_bo_unreserve(fpriv->vm.root.bo); |
2481 | amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_00); |
2482 | break; |
2483 | default: |
2484 | return -EINVAL22; |
2485 | } |
2486 | |
2487 | return 0; |
2488 | } |
2489 | |
2490 | /** |
2491 | * amdgpu_vm_get_task_info - Extracts task info for a PASID. |
2492 | * |
2493 | * @adev: drm device pointer |
2494 | * @pasid: PASID identifier for VM |
2495 | * @task_info: task_info to fill. |
2496 | */ |
2497 | void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, |
2498 | struct amdgpu_task_info *task_info) |
2499 | { |
2500 | struct amdgpu_vm *vm; |
2501 | unsigned long flags; |
2502 | |
2503 | xa_lock_irqsave(&adev->vm_manager.pasids, flags)do { flags = 0; mtx_enter(&(&adev->vm_manager.pasids )->xa_lock); } while (0); |
2504 | |
2505 | vm = xa_load(&adev->vm_manager.pasids, pasid); |
2506 | if (vm) |
2507 | *task_info = vm->task_info; |
2508 | |
2509 | xa_unlock_irqrestore(&adev->vm_manager.pasids, flags)do { (void)(flags); mtx_leave(&(&adev->vm_manager. pasids)->xa_lock); } while (0); |
2510 | } |
2511 | |
2512 | /** |
2513 | * amdgpu_vm_set_task_info - Sets VMs task info. |
2514 | * |
2515 | * @vm: vm for which to set the info |
2516 | */ |
2517 | void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) |
2518 | { |
2519 | if (vm->task_info.pid) |
2520 | return; |
2521 | |
2522 | #ifdef __linux__ |
2523 | vm->task_info.pid = current->pid; |
2524 | get_task_comm(vm->task_info.task_name, current); |
2525 | |
2526 | if (current->group_leader->mm != current->mm) |
2527 | return; |
2528 | |
2529 | vm->task_info.tgid = current->group_leader->pid; |
2530 | get_task_comm(vm->task_info.process_name, current->group_leader); |
2531 | #else |
2532 | /* thread */ |
2533 | vm->task_info.pid = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_tid; |
2534 | strlcpy(vm->task_info.task_name, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_comm, |
2535 | sizeof(vm->task_info.task_name)); |
2536 | |
2537 | /* process */ |
2538 | vm->task_info.tgid = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid; |
2539 | strlcpy(vm->task_info.process_name, curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_comm, |
2540 | sizeof(vm->task_info.process_name)); |
2541 | #endif |
2542 | } |
2543 | |
2544 | /** |
2545 | * amdgpu_vm_handle_fault - graceful handling of VM faults. |
2546 | * @adev: amdgpu device pointer |
2547 | * @pasid: PASID of the VM |
2548 | * @addr: Address of the fault |
2549 | * @write_fault: true is write fault, false is read fault |
2550 | * |
2551 | * Try to gracefully handle a VM fault. Return true if the fault was handled and |
2552 | * shouldn't be reported any more. |
2553 | */ |
2554 | bool_Bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, |
2555 | uint64_t addr, bool_Bool write_fault) |
2556 | { |
2557 | bool_Bool is_compute_context = false0; |
2558 | struct amdgpu_bo *root; |
2559 | unsigned long irqflags; |
2560 | uint64_t value, flags; |
2561 | struct amdgpu_vm *vm; |
2562 | int r; |
2563 | |
2564 | xa_lock_irqsave(&adev->vm_manager.pasids, irqflags)do { irqflags = 0; mtx_enter(&(&adev->vm_manager.pasids )->xa_lock); } while (0); |
2565 | vm = xa_load(&adev->vm_manager.pasids, pasid); |
2566 | if (vm) { |
2567 | root = amdgpu_bo_ref(vm->root.bo); |
2568 | is_compute_context = vm->is_compute_context; |
2569 | } else { |
2570 | root = NULL((void *)0); |
2571 | } |
2572 | xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags)do { (void)(irqflags); mtx_leave(&(&adev->vm_manager .pasids)->xa_lock); } while (0); |
2573 | |
2574 | if (!root) |
2575 | return false0; |
2576 | |
2577 | addr /= AMDGPU_GPU_PAGE_SIZE4096; |
2578 | |
2579 | if (is_compute_context && |
2580 | !svm_range_restore_pages(adev, pasid, addr, write_fault)) { |
2581 | amdgpu_bo_unref(&root); |
2582 | return true1; |
2583 | } |
2584 | |
2585 | r = amdgpu_bo_reserve(root, true1); |
2586 | if (r) |
2587 | goto error_unref; |
2588 | |
2589 | /* Double check that the VM still exists */ |
2590 | xa_lock_irqsave(&adev->vm_manager.pasids, irqflags)do { irqflags = 0; mtx_enter(&(&adev->vm_manager.pasids )->xa_lock); } while (0); |
2591 | vm = xa_load(&adev->vm_manager.pasids, pasid); |
2592 | if (vm && vm->root.bo != root) |
2593 | vm = NULL((void *)0); |
2594 | xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags)do { (void)(irqflags); mtx_leave(&(&adev->vm_manager .pasids)->xa_lock); } while (0); |
2595 | if (!vm) |
2596 | goto error_unlock; |
2597 | |
2598 | flags = AMDGPU_PTE_VALID(1ULL << 0) | AMDGPU_PTE_SNOOPED(1ULL << 2) | |
2599 | AMDGPU_PTE_SYSTEM(1ULL << 1); |
2600 | |
2601 | if (is_compute_context) { |
2602 | /* Intentionally setting invalid PTE flag |
2603 | * combination to force a no-retry-fault |
2604 | */ |
2605 | flags = AMDGPU_PTE_SNOOPED(1ULL << 2) | AMDGPU_PTE_PRT(1ULL << 51); |
2606 | value = 0; |
2607 | } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER0) { |
2608 | /* Redirect the access to the dummy page */ |
2609 | value = adev->dummy_page_addr; |
2610 | flags |= AMDGPU_PTE_EXECUTABLE(1ULL << 4) | AMDGPU_PTE_READABLE(1ULL << 5) | |
2611 | AMDGPU_PTE_WRITEABLE(1ULL << 6); |
2612 | |
2613 | } else { |
2614 | /* Let the hw retry silently on the PTE */ |
2615 | value = 0; |
2616 | } |
2617 | |
2618 | r = dma_resv_reserve_fences(root->tbo.base.resv, 1); |
2619 | if (r) { |
2620 | pr_debug("failed %d to reserve fence slot\n", r)do { } while(0); |
2621 | goto error_unlock; |
2622 | } |
2623 | |
2624 | r = amdgpu_vm_update_range(adev, vm, true1, false0, false0, NULL((void *)0), addr, |
2625 | addr, flags, value, 0, NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
2626 | if (r) |
2627 | goto error_unlock; |
2628 | |
2629 | r = amdgpu_vm_update_pdes(adev, vm, true1); |
2630 | |
2631 | error_unlock: |
2632 | amdgpu_bo_unreserve(root); |
2633 | if (r < 0) |
2634 | DRM_ERROR("Can't handle page fault (%d)\n", r)__drm_err("Can't handle page fault (%d)\n", r); |
2635 | |
2636 | error_unref: |
2637 | amdgpu_bo_unref(&root); |
2638 | |
2639 | return false0; |
2640 | } |
2641 | |
2642 | #if defined(CONFIG_DEBUG_FS) |
2643 | /** |
2644 | * amdgpu_debugfs_vm_bo_info - print BO info for the VM |
2645 | * |
2646 | * @vm: Requested VM for printing BO info |
2647 | * @m: debugfs file |
2648 | * |
2649 | * Print BO information in debugfs file for the VM |
2650 | */ |
2651 | void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) |
2652 | { |
2653 | struct amdgpu_bo_va *bo_va, *tmp; |
2654 | u64 total_idle = 0; |
2655 | u64 total_evicted = 0; |
2656 | u64 total_relocated = 0; |
2657 | u64 total_moved = 0; |
2658 | u64 total_invalidated = 0; |
2659 | u64 total_done = 0; |
2660 | unsigned int total_idle_objs = 0; |
2661 | unsigned int total_evicted_objs = 0; |
2662 | unsigned int total_relocated_objs = 0; |
2663 | unsigned int total_moved_objs = 0; |
2664 | unsigned int total_invalidated_objs = 0; |
2665 | unsigned int total_done_objs = 0; |
2666 | unsigned int id = 0; |
2667 | |
2668 | spin_lock(&vm->status_lock)mtx_enter(&vm->status_lock); |
2669 | seq_puts(m, "\tIdle BOs:\n"); |
2670 | list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->idle)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->idle); bo_va = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp-> base.vm_status.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), base.vm_status) );})) { |
2671 | if (!bo_va->base.bo) |
2672 | continue; |
2673 | total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2674 | } |
2675 | total_idle_objs = id; |
2676 | id = 0; |
2677 | |
2678 | seq_puts(m, "\tEvicted BOs:\n"); |
2679 | list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->evicted)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->evicted); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
2680 | if (!bo_va->base.bo) |
2681 | continue; |
2682 | total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2683 | } |
2684 | total_evicted_objs = id; |
2685 | id = 0; |
2686 | |
2687 | seq_puts(m, "\tRelocated BOs:\n"); |
2688 | list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->relocated)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->relocated); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
2689 | if (!bo_va->base.bo) |
2690 | continue; |
2691 | total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2692 | } |
2693 | total_relocated_objs = id; |
2694 | id = 0; |
2695 | |
2696 | seq_puts(m, "\tMoved BOs:\n"); |
2697 | list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->moved)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->moved); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( (char * )__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status) ) ;})) { |
2698 | if (!bo_va->base.bo) |
2699 | continue; |
2700 | total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2701 | } |
2702 | total_moved_objs = id; |
2703 | id = 0; |
2704 | |
2705 | seq_puts(m, "\tInvalidated BOs:\n"); |
2706 | list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->invalidated)->next); ( __typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof (*bo_va), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof (*bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base. vm_status.next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->invalidated); bo_va = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->base.vm_status ) * __mptr = (tmp->base.vm_status.next); (__typeof(*tmp) *)( ( char *)__mptr - __builtin_offsetof(__typeof(*tmp), base.vm_status ) );})) { |
2707 | if (!bo_va->base.bo) |
2708 | continue; |
2709 | total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2710 | } |
2711 | total_invalidated_objs = id; |
2712 | id = 0; |
2713 | |
2714 | seq_puts(m, "\tDone BOs:\n"); |
2715 | list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)for (bo_va = ({ const __typeof( ((__typeof(*bo_va) *)0)->base .vm_status ) *__mptr = ((&vm->done)->next); (__typeof (*bo_va) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo_va ), base.vm_status) );}), tmp = ({ const __typeof( ((__typeof( *bo_va) *)0)->base.vm_status ) *__mptr = (bo_va->base.vm_status .next); (__typeof(*bo_va) *)( (char *)__mptr - __builtin_offsetof (__typeof(*bo_va), base.vm_status) );}); &bo_va->base. vm_status != (&vm->done); bo_va = tmp, tmp = ({ const __typeof ( ((__typeof(*tmp) *)0)->base.vm_status ) *__mptr = (tmp-> base.vm_status.next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), base.vm_status) );})) { |
2716 | if (!bo_va->base.bo) |
2717 | continue; |
2718 | total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); |
2719 | } |
2720 | spin_unlock(&vm->status_lock)mtx_leave(&vm->status_lock); |
2721 | total_done_objs = id; |
2722 | |
2723 | seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, |
2724 | total_idle_objs); |
2725 | seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted, |
2726 | total_evicted_objs); |
2727 | seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated, |
2728 | total_relocated_objs); |
2729 | seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved, |
2730 | total_moved_objs); |
2731 | seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated, |
2732 | total_invalidated_objs); |
2733 | seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done, |
2734 | total_done_objs); |
2735 | } |
2736 | #endif |