File: | dev/pci/drm/ttm/ttm_bo_vm.c |
Warning: | line 960, column 3 Value stored to 'ret' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
2 | /************************************************************************** |
3 | * |
4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | **************************************************************************/ |
28 | /* |
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
30 | */ |
31 | |
32 | #define pr_fmt(fmt)"[TTM] " fmt "[TTM] " fmt |
33 | |
34 | #include <drm/ttm/ttm_module.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> |
36 | #include <drm/ttm/ttm_placement.h> |
37 | #include <drm/drm_vma_manager.h> |
38 | #include <linux/mm.h> |
39 | #include <linux/pfn_t.h> |
40 | #include <linux/rbtree.h> |
41 | #include <linux/module.h> |
42 | #include <linux/uaccess.h> |
43 | #include <linux/mem_encrypt.h> |
44 | |
45 | #ifdef __linux__ |
46 | |
47 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
48 | struct vm_fault *vmf) |
49 | { |
50 | vm_fault_t ret = 0; |
51 | int err = 0; |
52 | |
53 | if (likely(!bo->moving)__builtin_expect(!!(!bo->moving), 1)) |
54 | goto out_unlock; |
55 | |
56 | /* |
57 | * Quick non-stalling check for idle. |
58 | */ |
59 | if (dma_fence_is_signaled(bo->moving)) |
60 | goto out_clear; |
61 | |
62 | /* |
63 | * If possible, avoid waiting for GPU with mmap_lock |
64 | * held. We only do this if the fault allows retry and this |
65 | * is the first attempt. |
66 | */ |
67 | if (fault_flag_allow_retry_first(vmf->flags)) { |
68 | ret = VM_FAULT_RETRY3; |
69 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
70 | goto out_unlock; |
71 | |
72 | ttm_bo_get(bo); |
73 | mmap_read_unlock(vmf->vma->vm_mm); |
74 | (void) dma_fence_wait(bo->moving, true1); |
75 | dma_resv_unlock(bo->base.resv); |
76 | ttm_bo_put(bo); |
77 | goto out_unlock; |
78 | } |
79 | |
80 | /* |
81 | * Ordinary wait. |
82 | */ |
83 | err = dma_fence_wait(bo->moving, true1); |
84 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) { |
85 | ret = (err != -ERESTARTSYS4) ? VM_FAULT_SIGBUS2 : |
86 | VM_FAULT_NOPAGE1; |
87 | goto out_unlock; |
88 | } |
89 | |
90 | out_clear: |
91 | dma_fence_put(bo->moving); |
92 | bo->moving = NULL((void *)0); |
93 | |
94 | out_unlock: |
95 | return ret; |
96 | } |
97 | |
98 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
99 | unsigned long page_offset) |
100 | { |
101 | struct ttm_bo_device *bdev = bo->bdev; |
102 | |
103 | if (bdev->driver->io_mem_pfn) |
104 | return bdev->driver->io_mem_pfn(bo, page_offset); |
105 | |
106 | return (bo->mem.bus.offset >> PAGE_SHIFT12) + page_offset; |
107 | } |
108 | |
109 | /** |
110 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback |
111 | * @bo: The buffer object |
112 | * @vmf: The fault structure handed to the callback |
113 | * |
114 | * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped |
115 | * during long waits, and after the wait the callback will be restarted. This |
116 | * is to allow other threads using the same virtual memory space concurrent |
117 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer |
118 | * object reservations sometimes wait for GPU and should therefore be |
119 | * considered long waits. This function reserves the buffer object interruptibly |
120 | * taking this into account. Starvation is avoided by the vm system not |
121 | * allowing too many repeated restarts. |
122 | * This function is intended to be used in customized fault() and _mkwrite() |
123 | * handlers. |
124 | * |
125 | * Return: |
126 | * 0 on success and the bo was reserved. |
127 | * VM_FAULT_RETRY if blocking wait. |
128 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. |
129 | */ |
130 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, |
131 | struct vm_fault *vmf) |
132 | { |
133 | /* |
134 | * Work around locking order reversal in fault / nopfn |
135 | * between mmap_lock and bo_reserve: Perform a trylock operation |
136 | * for reserve, and if it fails, retry the fault after waiting |
137 | * for the buffer to become unreserved. |
138 | */ |
139 | if (unlikely(!dma_resv_trylock(bo->base.resv))__builtin_expect(!!(!dma_resv_trylock(bo->base.resv)), 0)) { |
140 | /* |
141 | * If the fault allows retry and this is the first |
142 | * fault attempt, we try to release the mmap_lock |
143 | * before waiting |
144 | */ |
145 | if (fault_flag_allow_retry_first(vmf->flags)) { |
146 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
147 | ttm_bo_get(bo); |
148 | mmap_read_unlock(vmf->vma->vm_mm); |
149 | if (!dma_resv_lock_interruptible(bo->base.resv, |
150 | NULL((void *)0))) |
151 | dma_resv_unlock(bo->base.resv); |
152 | ttm_bo_put(bo); |
153 | } |
154 | |
155 | return VM_FAULT_RETRY3; |
156 | } |
157 | |
158 | if (dma_resv_lock_interruptible(bo->base.resv, NULL((void *)0))) |
159 | return VM_FAULT_NOPAGE1; |
160 | } |
161 | |
162 | return 0; |
163 | } |
164 | EXPORT_SYMBOL(ttm_bo_vm_reserve); |
165 | |
166 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
167 | /** |
168 | * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults |
169 | * @vmf: Fault data |
170 | * @bo: The buffer object |
171 | * @page_offset: Page offset from bo start |
172 | * @fault_page_size: The size of the fault in pages. |
173 | * @pgprot: The page protections. |
174 | * Does additional checking whether it's possible to insert a PUD or PMD |
175 | * pfn and performs the insertion. |
176 | * |
177 | * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if |
178 | * a huge fault was not possible, or on insertion error. |
179 | */ |
180 | static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, |
181 | struct ttm_buffer_object *bo, |
182 | pgoff_t page_offset, |
183 | pgoff_t fault_page_size, |
184 | pgprot_t pgprot) |
185 | { |
186 | pgoff_t i; |
187 | vm_fault_t ret; |
188 | unsigned long pfn; |
189 | pfn_t pfnt; |
190 | struct ttm_tt *ttm = bo->ttm; |
191 | bool_Bool write = vmf->flags & FAULT_FLAG_WRITE; |
192 | |
193 | /* Fault should not cross bo boundary. */ |
194 | page_offset &= ~(fault_page_size - 1); |
195 | if (page_offset + fault_page_size > bo->num_pages) |
196 | goto out_fallback; |
197 | |
198 | if (bo->mem.bus.is_iomem) |
199 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
200 | else |
201 | pfn = page_to_pfn(ttm->pages[page_offset])(((ttm->pages[page_offset])->phys_addr) / (1 << 12 )); |
202 | |
203 | /* pfn must be fault_page_size aligned. */ |
204 | if ((pfn & (fault_page_size - 1)) != 0) |
205 | goto out_fallback; |
206 | |
207 | /* Check that memory is contiguous. */ |
208 | if (!bo->mem.bus.is_iomem) { |
209 | for (i = 1; i < fault_page_size; ++i) { |
210 | if (page_to_pfn(ttm->pages[page_offset + i])(((ttm->pages[page_offset + i])->phys_addr) / (1 << 12)) != pfn + i) |
211 | goto out_fallback; |
212 | } |
213 | } else if (bo->bdev->driver->io_mem_pfn) { |
214 | for (i = 1; i < fault_page_size; ++i) { |
215 | if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) |
216 | goto out_fallback; |
217 | } |
218 | } |
219 | |
220 | pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); |
221 | if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT12)) |
222 | ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); |
223 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
224 | else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT12)) |
225 | ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); |
226 | #endif |
227 | else |
228 | WARN_ON_ONCE(ret = VM_FAULT_FALLBACK)({ static int __warned; int __ret = !!(ret = VM_FAULT_FALLBACK ); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "ret = VM_FAULT_FALLBACK", "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_vm.c" , 228); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
229 | |
230 | if (ret != VM_FAULT_NOPAGE1) |
231 | goto out_fallback; |
232 | |
233 | return VM_FAULT_NOPAGE1; |
234 | out_fallback: |
235 | count_vm_event(THP_FAULT_FALLBACK); |
236 | return VM_FAULT_FALLBACK; |
237 | } |
238 | #else |
239 | static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, |
240 | struct ttm_buffer_object *bo, |
241 | pgoff_t page_offset, |
242 | pgoff_t fault_page_size, |
243 | pgprot_t pgprot) |
244 | { |
245 | return VM_FAULT_FALLBACK; |
246 | } |
247 | #endif |
248 | |
249 | /** |
250 | * ttm_bo_vm_fault_reserved - TTM fault helper |
251 | * @vmf: The struct vm_fault given as argument to the fault callback |
252 | * @prot: The page protection to be used for this memory area. |
253 | * @num_prefault: Maximum number of prefault pages. The caller may want to |
254 | * specify this based on madvice settings and the size of the GPU object |
255 | * backed by the memory. |
256 | * @fault_page_size: The size of the fault in pages. |
257 | * |
258 | * This function inserts one or more page table entries pointing to the |
259 | * memory backing the buffer object, and then returns a return code |
260 | * instructing the caller to retry the page access. |
261 | * |
262 | * Return: |
263 | * VM_FAULT_NOPAGE on success or pending signal |
264 | * VM_FAULT_SIGBUS on unspecified error |
265 | * VM_FAULT_OOM on out-of-memory |
266 | * VM_FAULT_RETRY if retryable wait |
267 | */ |
268 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, |
269 | pgprot_t prot, |
270 | pgoff_t num_prefault, |
271 | pgoff_t fault_page_size) |
272 | { |
273 | struct vm_area_struct *vma = vmf->vma; |
274 | struct ttm_buffer_object *bo = vma->vm_private_data; |
275 | struct ttm_bo_device *bdev = bo->bdev; |
276 | unsigned long page_offset; |
277 | unsigned long page_last; |
278 | unsigned long pfn; |
279 | struct ttm_tt *ttm = NULL((void *)0); |
280 | struct vm_page *page; |
281 | int err; |
282 | pgoff_t i; |
283 | vm_fault_t ret = VM_FAULT_NOPAGE1; |
284 | unsigned long address = vmf->address; |
285 | |
286 | /* |
287 | * Refuse to fault imported pages. This should be handled |
288 | * (if at all) by redirecting mmap to the exporter. |
289 | */ |
290 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG(1 << 8))) |
291 | return VM_FAULT_SIGBUS2; |
292 | |
293 | if (bdev->driver->fault_reserve_notify) { |
294 | struct dma_fence *moving = dma_fence_get(bo->moving); |
295 | |
296 | err = bdev->driver->fault_reserve_notify(bo); |
297 | switch (err) { |
298 | case 0: |
299 | break; |
300 | case -EBUSY16: |
301 | case -ERESTARTSYS4: |
302 | dma_fence_put(moving); |
303 | return VM_FAULT_NOPAGE1; |
304 | default: |
305 | dma_fence_put(moving); |
306 | return VM_FAULT_SIGBUS2; |
307 | } |
308 | |
309 | if (bo->moving != moving) { |
310 | ttm_bo_move_to_lru_tail_unlocked(bo); |
311 | } |
312 | dma_fence_put(moving); |
313 | } |
314 | |
315 | /* |
316 | * Wait for buffer data in transit, due to a pipelined |
317 | * move. |
318 | */ |
319 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
320 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) |
321 | return ret; |
322 | |
323 | err = ttm_mem_io_reserve(bdev, &bo->mem); |
324 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) |
325 | return VM_FAULT_SIGBUS2; |
326 | |
327 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT12) + |
328 | vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); |
329 | page_last = vma_pages(vma) + vma->vm_pgoff - |
330 | drm_vma_node_start(&bo->base.vma_node); |
331 | |
332 | if (unlikely(page_offset >= bo->num_pages)__builtin_expect(!!(page_offset >= bo->num_pages), 0)) |
333 | return VM_FAULT_SIGBUS2; |
334 | |
335 | prot = ttm_io_prot(bo->mem.placement, prot); |
336 | if (!bo->mem.bus.is_iomem) { |
337 | struct ttm_operation_ctx ctx = { |
338 | .interruptible = false0, |
339 | .no_wait_gpu = false0, |
340 | .flags = TTM_OPT_FLAG_FORCE_ALLOC0x2 |
341 | |
342 | }; |
343 | |
344 | ttm = bo->ttm; |
345 | if (ttm_tt_populate(bdev, bo->ttm, &ctx)) |
346 | return VM_FAULT_OOM4; |
347 | } else { |
348 | /* Iomem should not be marked encrypted */ |
349 | prot = pgprot_decrypted(prot); |
350 | } |
351 | |
352 | /* We don't prefault on huge faults. Yet. */ |
353 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)0 && fault_page_size != 1) |
354 | return ttm_bo_vm_insert_huge(vmf, bo, page_offset, |
355 | fault_page_size, prot); |
356 | |
357 | /* |
358 | * Speculatively prefault a number of pages. Only error on |
359 | * first page. |
360 | */ |
361 | for (i = 0; i < num_prefault; ++i) { |
362 | if (bo->mem.bus.is_iomem) { |
363 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
364 | } else { |
365 | page = ttm->pages[page_offset]; |
366 | if (unlikely(!page && i == 0)__builtin_expect(!!(!page && i == 0), 0)) { |
367 | return VM_FAULT_OOM4; |
368 | } else if (unlikely(!page)__builtin_expect(!!(!page), 0)) { |
369 | break; |
370 | } |
371 | page->index = drm_vma_node_start(&bo->base.vma_node) + |
372 | page_offset; |
373 | pfn = page_to_pfn(page)(((page)->phys_addr) / (1 << 12)); |
374 | } |
375 | |
376 | /* |
377 | * Note that the value of @prot at this point may differ from |
378 | * the value of @vma->vm_page_prot in the caching- and |
379 | * encryption bits. This is because the exact location of the |
380 | * data may not be known at mmap() time and may also change |
381 | * at arbitrary times while the data is mmap'ed. |
382 | * See vmf_insert_mixed_prot() for a discussion. |
383 | */ |
384 | if (vma->vm_flags & VM_MIXEDMAP) |
385 | ret = vmf_insert_mixed_prot(vma, address, |
386 | __pfn_to_pfn_t(pfn, PFN_DEV), |
387 | prot); |
388 | else |
389 | ret = vmf_insert_pfn_prot(vma, address, pfn, prot); |
390 | |
391 | /* Never error on prefaulted PTEs */ |
392 | if (unlikely((ret & VM_FAULT_ERROR))__builtin_expect(!!((ret & VM_FAULT_ERROR)), 0)) { |
393 | if (i == 0) |
394 | return VM_FAULT_NOPAGE1; |
395 | else |
396 | break; |
397 | } |
398 | |
399 | address += PAGE_SIZE(1 << 12); |
400 | if (unlikely(++page_offset >= page_last)__builtin_expect(!!(++page_offset >= page_last), 0)) |
401 | break; |
402 | } |
403 | return ret; |
404 | } |
405 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); |
406 | |
407 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
408 | { |
409 | struct vm_area_struct *vma = vmf->vma; |
410 | pgprot_t prot; |
411 | struct ttm_buffer_object *bo = vma->vm_private_data; |
412 | vm_fault_t ret; |
413 | |
414 | ret = ttm_bo_vm_reserve(bo, vmf); |
415 | if (ret) |
416 | return ret; |
417 | |
418 | prot = vma->vm_page_prot; |
419 | ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT16, 1); |
420 | if (ret == VM_FAULT_RETRY3 && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
421 | return ret; |
422 | |
423 | dma_resv_unlock(bo->base.resv); |
424 | |
425 | return ret; |
426 | } |
427 | EXPORT_SYMBOL(ttm_bo_vm_fault); |
428 | |
429 | #else /* !__linux__ */ |
430 | |
431 | #define VM_FAULT_NOPAGE1 1 |
432 | #define VM_FAULT_SIGBUS2 2 |
433 | #define VM_FAULT_RETRY3 3 |
434 | #define VM_FAULT_OOM4 4 |
435 | |
436 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
437 | struct uvm_faultinfo *ufi) |
438 | { |
439 | vm_fault_t ret = 0; |
440 | int err = 0; |
441 | |
442 | if (likely(!bo->moving)__builtin_expect(!!(!bo->moving), 1)) |
443 | goto out_unlock; |
444 | |
445 | /* |
446 | * Quick non-stalling check for idle. |
447 | */ |
448 | if (dma_fence_is_signaled(bo->moving)) |
449 | goto out_clear; |
450 | |
451 | #ifdef __linux__ |
452 | /* |
453 | * If possible, avoid waiting for GPU with mmap_lock |
454 | * held. We only do this if the fault allows retry and this |
455 | * is the first attempt. |
456 | */ |
457 | if (fault_flag_allow_retry_first(vmf->flags)) { |
458 | ret = VM_FAULT_RETRY3; |
459 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
460 | goto out_unlock; |
461 | |
462 | ttm_bo_get(bo); |
463 | mmap_read_unlock(vmf->vma->vm_mm); |
464 | (void) dma_fence_wait(bo->moving, true1); |
465 | dma_resv_unlock(bo->base.resv); |
466 | ttm_bo_put(bo); |
467 | goto out_unlock; |
468 | } |
469 | #endif |
470 | |
471 | /* |
472 | * Ordinary wait. |
473 | */ |
474 | err = dma_fence_wait(bo->moving, true1); |
475 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) { |
476 | ret = (err != -ERESTARTSYS4) ? VM_FAULT_SIGBUS2 : |
477 | VM_FAULT_NOPAGE1; |
478 | goto out_unlock; |
479 | } |
480 | |
481 | out_clear: |
482 | dma_fence_put(bo->moving); |
483 | bo->moving = NULL((void *)0); |
484 | |
485 | out_unlock: |
486 | return ret; |
487 | } |
488 | |
489 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, |
490 | unsigned long page_offset) |
491 | { |
492 | struct ttm_bo_device *bdev = bo->bdev; |
493 | |
494 | if (bdev->driver->io_mem_pfn) |
495 | return bdev->driver->io_mem_pfn(bo, page_offset); |
496 | |
497 | return (bo->mem.bus.offset >> PAGE_SHIFT12) + page_offset; |
498 | } |
499 | |
500 | /** |
501 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback |
502 | * @bo: The buffer object |
503 | * @vmf: The fault structure handed to the callback |
504 | * |
505 | * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped |
506 | * during long waits, and after the wait the callback will be restarted. This |
507 | * is to allow other threads using the same virtual memory space concurrent |
508 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer |
509 | * object reservations sometimes wait for GPU and should therefore be |
510 | * considered long waits. This function reserves the buffer object interruptibly |
511 | * taking this into account. Starvation is avoided by the vm system not |
512 | * allowing too many repeated restarts. |
513 | * This function is intended to be used in customized fault() and _mkwrite() |
514 | * handlers. |
515 | * |
516 | * Return: |
517 | * 0 on success and the bo was reserved. |
518 | * VM_FAULT_RETRY if blocking wait. |
519 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. |
520 | */ |
521 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo) |
522 | { |
523 | /* |
524 | * Work around locking order reversal in fault / nopfn |
525 | * between mmap_lock and bo_reserve: Perform a trylock operation |
526 | * for reserve, and if it fails, retry the fault after waiting |
527 | * for the buffer to become unreserved. |
528 | */ |
529 | if (unlikely(!dma_resv_trylock(bo->base.resv))__builtin_expect(!!(!dma_resv_trylock(bo->base.resv)), 0)) { |
530 | #ifdef __linux__ |
531 | /* |
532 | * If the fault allows retry and this is the first |
533 | * fault attempt, we try to release the mmap_lock |
534 | * before waiting |
535 | */ |
536 | if (fault_flag_allow_retry_first(vmf->flags)) { |
537 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
538 | ttm_bo_get(bo); |
539 | mmap_read_unlock(vmf->vma->vm_mm); |
540 | if (!dma_resv_lock_interruptible(bo->base.resv, |
541 | NULL((void *)0))) |
542 | dma_resv_unlock(bo->base.resv); |
543 | ttm_bo_put(bo); |
544 | } |
545 | |
546 | return VM_FAULT_RETRY3; |
547 | } |
548 | #endif |
549 | |
550 | if (dma_resv_lock_interruptible(bo->base.resv, NULL((void *)0))) |
551 | return VM_FAULT_NOPAGE1; |
552 | } |
553 | |
554 | return 0; |
555 | } |
556 | |
557 | vm_fault_t ttm_bo_vm_fault_reserved(struct uvm_faultinfo *ufi, |
558 | vaddr_t vaddr, |
559 | pgoff_t num_prefault, |
560 | pgoff_t fault_page_size) |
561 | { |
562 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; |
563 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; |
564 | struct ttm_bo_device *bdev = bo->bdev; |
565 | unsigned long page_offset; |
566 | unsigned long page_last; |
567 | unsigned long pfn; |
568 | struct ttm_tt *ttm = NULL((void *)0); |
569 | struct vm_page *page; |
570 | bus_addr_t addr; |
571 | paddr_t paddr; |
572 | vm_prot_t prot; |
573 | int pmap_flags; |
574 | int err; |
575 | pgoff_t i; |
576 | vm_fault_t ret = VM_FAULT_NOPAGE1; |
577 | unsigned long address = (unsigned long)vaddr; |
578 | |
579 | /* |
580 | * Refuse to fault imported pages. This should be handled |
581 | * (if at all) by redirecting mmap to the exporter. |
582 | */ |
583 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG(1 << 8))) |
584 | return VM_FAULT_SIGBUS2; |
585 | |
586 | if (bdev->driver->fault_reserve_notify) { |
587 | struct dma_fence *moving = dma_fence_get(bo->moving); |
588 | |
589 | err = bdev->driver->fault_reserve_notify(bo); |
590 | switch (err) { |
591 | case 0: |
592 | break; |
593 | case -EBUSY16: |
594 | case -ERESTARTSYS4: |
595 | dma_fence_put(moving); |
596 | return VM_FAULT_NOPAGE1; |
597 | default: |
598 | dma_fence_put(moving); |
599 | return VM_FAULT_SIGBUS2; |
600 | } |
601 | |
602 | if (bo->moving != moving) { |
603 | ttm_bo_move_to_lru_tail_unlocked(bo); |
604 | } |
605 | dma_fence_put(moving); |
606 | } |
607 | |
608 | /* |
609 | * Wait for buffer data in transit, due to a pipelined |
610 | * move. |
611 | */ |
612 | ret = ttm_bo_vm_fault_idle(bo, ufi); |
613 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) |
614 | return ret; |
615 | ret = VM_FAULT_NOPAGE1; |
616 | |
617 | err = ttm_mem_io_reserve(bdev, &bo->mem); |
618 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) |
619 | return VM_FAULT_SIGBUS2; |
620 | |
621 | page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT12) + |
622 | drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT12); |
623 | page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT12) + |
624 | drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT12); |
625 | |
626 | if (unlikely(page_offset >= bo->num_pages)__builtin_expect(!!(page_offset >= bo->num_pages), 0)) |
627 | return VM_FAULT_SIGBUS2; |
628 | |
629 | prot = ufi->entry->protection; |
630 | pmap_flags = ttm_io_prot(bo->mem.placement, 0); |
631 | if (!bo->mem.bus.is_iomem) { |
632 | struct ttm_operation_ctx ctx = { |
633 | .interruptible = false0, |
634 | .no_wait_gpu = false0, |
635 | .flags = TTM_OPT_FLAG_FORCE_ALLOC0x2 |
636 | |
637 | }; |
638 | |
639 | ttm = bo->ttm; |
640 | if (ttm_tt_populate(bdev, bo->ttm, &ctx)) |
641 | return VM_FAULT_OOM4; |
642 | } |
643 | |
644 | #ifdef __linux__ |
645 | /* We don't prefault on huge faults. Yet. */ |
646 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)0 && fault_page_size != 1) |
647 | return ttm_bo_vm_insert_huge(vmf, bo, page_offset, |
648 | fault_page_size, prot); |
649 | #endif |
650 | |
651 | /* |
652 | * Speculatively prefault a number of pages. Only error on |
653 | * first page. |
654 | */ |
655 | for (i = 0; i < num_prefault; ++i) { |
656 | if (bo->mem.bus.is_iomem) { |
657 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
658 | addr = pfn << PAGE_SHIFT12; |
659 | paddr = bus_space_mmap(bdev->memt, addr, 0, prot, 0)((bdev->memt)->mmap((addr), (0), (prot), (0))); |
660 | } else { |
661 | page = ttm->pages[page_offset]; |
662 | if (unlikely(!page && i == 0)__builtin_expect(!!(!page && i == 0), 0)) { |
663 | return VM_FAULT_OOM4; |
664 | } else if (unlikely(!page)__builtin_expect(!!(!page), 0)) { |
665 | break; |
666 | } |
667 | paddr = VM_PAGE_TO_PHYS(page)((page)->phys_addr); |
668 | } |
669 | |
670 | err = pmap_enter(ufi->orig_map->pmap, address, |
671 | paddr | pmap_flags, prot, PMAP_CANFAIL0x00000020 | prot); |
672 | |
673 | /* Never error on prefaulted PTEs */ |
674 | if (unlikely(err)__builtin_expect(!!(err), 0)) { |
675 | ret = VM_FAULT_OOM4; |
676 | if (i == 0) |
677 | return VM_FAULT_NOPAGE1; |
678 | else |
679 | break; |
680 | } |
681 | |
682 | address += PAGE_SIZE(1 << 12); |
683 | if (unlikely(++page_offset >= page_last)__builtin_expect(!!(++page_offset >= page_last), 0)) |
684 | break; |
685 | } |
686 | pmap_update(ufi->orig_map->pmap); |
687 | return ret; |
688 | } |
689 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); |
690 | |
691 | int |
692 | ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, |
693 | int npages, int centeridx, vm_fault_t fault_type, |
694 | vm_prot_t access_type, int flags) |
695 | { |
696 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; |
697 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; |
698 | vm_fault_t ret; |
699 | |
700 | ret = ttm_bo_vm_reserve(bo); |
701 | if (ret) { |
702 | switch (ret) { |
703 | case VM_FAULT_NOPAGE1: |
704 | ret = VM_PAGER_OK0; |
705 | break; |
706 | case VM_FAULT_RETRY3: |
707 | ret = VM_PAGER_REFAULT7; |
708 | break; |
709 | default: |
710 | ret = VM_PAGER_BAD1; |
711 | break; |
712 | } |
713 | |
714 | uvmfault_unlockall(ufi, NULL((void *)0), uobj); |
715 | return ret; |
716 | } |
717 | |
718 | ret = ttm_bo_vm_fault_reserved(ufi, vaddr, TTM_BO_VM_NUM_PREFAULT16, 1); |
719 | switch (ret) { |
720 | case VM_FAULT_NOPAGE1: |
721 | ret = VM_PAGER_OK0; |
722 | break; |
723 | case VM_FAULT_RETRY3: |
724 | ret = VM_PAGER_REFAULT7; |
725 | break; |
726 | default: |
727 | ret = VM_PAGER_BAD1; |
728 | break; |
729 | } |
730 | |
731 | dma_resv_unlock(bo->base.resv); |
732 | |
733 | uvmfault_unlockall(ufi, NULL((void *)0), uobj); |
734 | return ret; |
735 | } |
736 | EXPORT_SYMBOL(ttm_bo_vm_fault); |
737 | |
738 | #endif /* !__linux__ */ |
739 | |
740 | #ifdef notyet |
741 | void ttm_bo_vm_open(struct vm_area_struct *vma) |
742 | { |
743 | struct ttm_buffer_object *bo = vma->vm_private_data; |
744 | |
745 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping)({ int __ret = !!(bo->bdev->dev_mapping != vma->vm_file ->f_mapping); if (__ret) printf("WARNING %s failed at %s:%d\n" , "bo->bdev->dev_mapping != vma->vm_file->f_mapping" , "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_vm.c", 745); __builtin_expect (!!(__ret), 0); }); |
746 | |
747 | ttm_bo_get(bo); |
748 | } |
749 | EXPORT_SYMBOL(ttm_bo_vm_open); |
750 | |
751 | void ttm_bo_vm_close(struct vm_area_struct *vma) |
752 | { |
753 | struct ttm_buffer_object *bo = vma->vm_private_data; |
754 | |
755 | ttm_bo_put(bo); |
756 | vma->vm_private_data = NULL((void *)0); |
757 | } |
758 | EXPORT_SYMBOL(ttm_bo_vm_close); |
759 | |
760 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, |
761 | unsigned long offset, |
762 | uint8_t *buf, int len, int write) |
763 | { |
764 | unsigned long page = offset >> PAGE_SHIFT12; |
765 | unsigned long bytes_left = len; |
766 | int ret; |
767 | |
768 | /* Copy a page at a time, that way no extra virtual address |
769 | * mapping is needed |
770 | */ |
771 | offset -= page << PAGE_SHIFT12; |
772 | do { |
773 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset)(((bytes_left)<((1 << 12) - offset))?(bytes_left):(( 1 << 12) - offset)); |
774 | struct ttm_bo_kmap_obj map; |
775 | void *ptr; |
776 | bool_Bool is_iomem; |
777 | |
778 | ret = ttm_bo_kmap(bo, page, 1, &map); |
779 | if (ret) |
780 | return ret; |
781 | |
782 | ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; |
783 | WARN_ON_ONCE(is_iomem)({ static int __warned; int __ret = !!(is_iomem); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "is_iomem" , "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_vm.c", 783); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
784 | if (write) |
785 | memcpy(ptr, buf, bytes)__builtin_memcpy((ptr), (buf), (bytes)); |
786 | else |
787 | memcpy(buf, ptr, bytes)__builtin_memcpy((buf), (ptr), (bytes)); |
788 | ttm_bo_kunmap(&map); |
789 | |
790 | page++; |
791 | buf += bytes; |
792 | bytes_left -= bytes; |
793 | offset = 0; |
794 | } while (bytes_left); |
795 | |
796 | return len; |
797 | } |
798 | |
799 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, |
800 | void *buf, int len, int write) |
801 | { |
802 | struct ttm_buffer_object *bo = vma->vm_private_data; |
803 | unsigned long offset = (addr) - vma->vm_start + |
804 | ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) |
805 | << PAGE_SHIFT12); |
806 | int ret; |
807 | |
808 | if (len < 1 || (offset + len) >> PAGE_SHIFT12 > bo->num_pages) |
809 | return -EIO5; |
810 | |
811 | ret = ttm_bo_reserve(bo, true1, false0, NULL((void *)0)); |
812 | if (ret) |
813 | return ret; |
814 | |
815 | switch (bo->mem.mem_type) { |
816 | case TTM_PL_SYSTEM0: |
817 | fallthroughdo {} while (0); |
818 | case TTM_PL_TT1: |
819 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); |
820 | break; |
821 | default: |
822 | if (bo->bdev->driver->access_memory) |
823 | ret = bo->bdev->driver->access_memory( |
824 | bo, offset, buf, len, write); |
825 | else |
826 | ret = -EIO5; |
827 | } |
828 | |
829 | ttm_bo_unreserve(bo); |
830 | |
831 | return ret; |
832 | } |
833 | EXPORT_SYMBOL(ttm_bo_vm_access); |
834 | |
835 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
836 | .fault = ttm_bo_vm_fault, |
837 | .open = ttm_bo_vm_open, |
838 | .close = ttm_bo_vm_close, |
839 | .access = ttm_bo_vm_access, |
840 | }; |
841 | #endif |
842 | |
843 | void |
844 | ttm_bo_vm_reference(struct uvm_object *uobj) |
845 | { |
846 | struct ttm_buffer_object *bo = |
847 | (struct ttm_buffer_object *)uobj; |
848 | |
849 | ttm_bo_get(bo); |
850 | } |
851 | |
852 | void |
853 | ttm_bo_vm_detach(struct uvm_object *uobj) |
854 | { |
855 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; |
856 | |
857 | ttm_bo_put(bo); |
858 | } |
859 | |
860 | const struct uvm_pagerops ttm_bo_vm_ops = { |
861 | .pgo_fault = ttm_bo_vm_fault, |
862 | .pgo_reference = ttm_bo_vm_reference, |
863 | .pgo_detach = ttm_bo_vm_detach |
864 | }; |
865 | |
866 | static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
867 | unsigned long offset, |
868 | unsigned long pages) |
869 | { |
870 | struct drm_vma_offset_node *node; |
871 | struct ttm_buffer_object *bo = NULL((void *)0); |
872 | |
873 | drm_vma_offset_lock_lookup(bdev->vma_manager); |
874 | |
875 | node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); |
876 | if (likely(node)__builtin_expect(!!(node), 1)) { |
877 | bo = container_of(node, struct ttm_buffer_object,({ const __typeof( ((struct ttm_buffer_object *)0)->base.vma_node ) *__mptr = (node); (struct ttm_buffer_object *)( (char *)__mptr - __builtin_offsetof(struct ttm_buffer_object, base.vma_node ) );}) |
878 | base.vma_node)({ const __typeof( ((struct ttm_buffer_object *)0)->base.vma_node ) *__mptr = (node); (struct ttm_buffer_object *)( (char *)__mptr - __builtin_offsetof(struct ttm_buffer_object, base.vma_node ) );}); |
879 | bo = ttm_bo_get_unless_zero(bo); |
880 | } |
881 | |
882 | drm_vma_offset_unlock_lookup(bdev->vma_manager); |
883 | |
884 | if (!bo) |
885 | pr_err("Could not find buffer object to map\n")printk("\0013" "[TTM] " "Could not find buffer object to map\n" ); |
886 | |
887 | return bo; |
888 | } |
889 | |
890 | #ifdef notyet |
891 | static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) |
892 | { |
893 | vma->vm_ops = &ttm_bo_vm_ops; |
894 | |
895 | /* |
896 | * Note: We're transferring the bo reference to |
897 | * vma->vm_private_data here. |
898 | */ |
899 | |
900 | vma->vm_private_data = bo; |
901 | |
902 | /* |
903 | * We'd like to use VM_PFNMAP on shared mappings, where |
904 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
905 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
906 | * bad for performance. Until that has been sorted out, use |
907 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 |
908 | */ |
909 | vma->vm_flags |= VM_MIXEDMAP; |
910 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
911 | } |
912 | #endif |
913 | |
914 | #ifdef __linux__ |
915 | int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
916 | struct ttm_bo_device *bdev) |
917 | { |
918 | struct ttm_bo_driver *driver; |
919 | struct ttm_buffer_object *bo; |
920 | int ret; |
921 | |
922 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)__builtin_expect(!!(vma->vm_pgoff < ((0xFFFFFFFFUL >> 12) + 1)), 0)) |
923 | return -EINVAL22; |
924 | |
925 | bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
926 | if (unlikely(!bo)__builtin_expect(!!(!bo), 0)) |
927 | return -EINVAL22; |
928 | |
929 | driver = bo->bdev->driver; |
930 | if (unlikely(!driver->verify_access)__builtin_expect(!!(!driver->verify_access), 0)) { |
931 | ret = -EPERM1; |
932 | goto out_unref; |
933 | } |
934 | ret = driver->verify_access(bo, filp); |
935 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) |
936 | goto out_unref; |
937 | |
938 | ttm_bo_mmap_vma_setup(bo, vma); |
939 | return 0; |
940 | out_unref: |
941 | ttm_bo_put(bo); |
942 | return ret; |
943 | } |
944 | EXPORT_SYMBOL(ttm_bo_mmap); |
945 | #else |
946 | struct uvm_object * |
947 | ttm_bo_mmap(struct file *filp, voff_t off, vsize_t size, |
948 | struct ttm_bo_device *bdev) |
949 | { |
950 | struct ttm_bo_driver *driver; |
951 | struct ttm_buffer_object *bo; |
952 | int ret; |
953 | |
954 | bo = ttm_bo_vm_lookup(bdev, off >> PAGE_SHIFT12, size >> PAGE_SHIFT12); |
955 | if (unlikely(!bo)__builtin_expect(!!(!bo), 0)) |
956 | return NULL((void *)0); |
957 | |
958 | driver = bo->bdev->driver; |
959 | if (unlikely(!driver->verify_access)__builtin_expect(!!(!driver->verify_access), 0)) { |
960 | ret = -EPERM1; |
Value stored to 'ret' is never read | |
961 | goto out_unref; |
962 | } |
963 | ret = driver->verify_access(bo, filp); |
964 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) |
965 | goto out_unref; |
966 | |
967 | if (bo->base.uobj.pgops == NULL((void *)0)) |
968 | uvm_obj_init(&bo->base.uobj, &ttm_bo_vm_ops, 1); |
969 | return &bo->base.uobj; |
970 | out_unref: |
971 | ttm_bo_put(bo); |
972 | return NULL((void *)0); |
973 | } |
974 | #endif |
975 | |
976 | #ifdef notyet |
977 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) |
978 | { |
979 | ttm_bo_get(bo); |
980 | ttm_bo_mmap_vma_setup(bo, vma); |
981 | return 0; |
982 | } |
983 | EXPORT_SYMBOL(ttm_bo_mmap_obj); |
984 | #endif |