| File: | dev/pci/drm/ttm/ttm_bo_vm.c |
| Warning: | line 551, column 11 Access to field 'pages' results in a dereference of a null pointer (loaded from variable 'ttm') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ | |||
| 2 | /************************************************************************** | |||
| 3 | * | |||
| 4 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |||
| 5 | * All Rights Reserved. | |||
| 6 | * | |||
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 8 | * copy of this software and associated documentation files (the | |||
| 9 | * "Software"), to deal in the Software without restriction, including | |||
| 10 | * without limitation the rights to use, copy, modify, merge, publish, | |||
| 11 | * distribute, sub license, and/or sell copies of the Software, and to | |||
| 12 | * permit persons to whom the Software is furnished to do so, subject to | |||
| 13 | * the following conditions: | |||
| 14 | * | |||
| 15 | * The above copyright notice and this permission notice (including the | |||
| 16 | * next paragraph) shall be included in all copies or substantial portions | |||
| 17 | * of the Software. | |||
| 18 | * | |||
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |||
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |||
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |||
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |||
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| 26 | * | |||
| 27 | **************************************************************************/ | |||
| 28 | /* | |||
| 29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |||
| 30 | */ | |||
| 31 | ||||
| 32 | #define pr_fmt(fmt)"[TTM] " fmt "[TTM] " fmt | |||
| 33 | ||||
| 34 | #include <drm/ttm/ttm_bo_driver.h> | |||
| 35 | #include <drm/ttm/ttm_placement.h> | |||
| 36 | #include <drm/drm_vma_manager.h> | |||
| 37 | #include <drm/drm_drv.h> | |||
| 38 | #include <drm/drm_managed.h> | |||
| 39 | #include <linux/mm.h> | |||
| 40 | #include <linux/pfn_t.h> | |||
| 41 | #include <linux/rbtree.h> | |||
| 42 | #include <linux/module.h> | |||
| 43 | #include <linux/uaccess.h> | |||
| 44 | #include <linux/mem_encrypt.h> | |||
| 45 | ||||
| 46 | #ifdef __linux__ | |||
| 47 | ||||
| 48 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
| 49 | struct vm_fault *vmf) | |||
| 50 | { | |||
| 51 | long err = 0; | |||
| 52 | ||||
| 53 | /* | |||
| 54 | * Quick non-stalling check for idle. | |||
| 55 | */ | |||
| 56 | if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) | |||
| 57 | return 0; | |||
| 58 | ||||
| 59 | /* | |||
| 60 | * If possible, avoid waiting for GPU with mmap_lock | |||
| 61 | * held. We only do this if the fault allows retry and this | |||
| 62 | * is the first attempt. | |||
| 63 | */ | |||
| 64 | if (fault_flag_allow_retry_first(vmf->flags)) { | |||
| 65 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | |||
| 66 | return VM_FAULT_RETRY3; | |||
| 67 | ||||
| 68 | ttm_bo_get(bo); | |||
| 69 | mmap_read_unlock(vmf->vma->vm_mm); | |||
| 70 | (void)dma_resv_wait_timeout(bo->base.resv, | |||
| 71 | DMA_RESV_USAGE_KERNEL, true1, | |||
| 72 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | |||
| 73 | dma_resv_unlock(bo->base.resv); | |||
| 74 | ttm_bo_put(bo); | |||
| 75 | return VM_FAULT_RETRY3; | |||
| 76 | } | |||
| 77 | ||||
| 78 | /* | |||
| 79 | * Ordinary wait. | |||
| 80 | */ | |||
| 81 | err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true1, | |||
| 82 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | |||
| 83 | if (unlikely(err < 0)__builtin_expect(!!(err < 0), 0)) { | |||
| 84 | return (err != -ERESTARTSYS4) ? VM_FAULT_SIGBUS2 : | |||
| 85 | VM_FAULT_NOPAGE1; | |||
| 86 | } | |||
| 87 | ||||
| 88 | return 0; | |||
| 89 | } | |||
| 90 | ||||
| 91 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, | |||
| 92 | unsigned long page_offset) | |||
| 93 | { | |||
| 94 | struct ttm_device *bdev = bo->bdev; | |||
| 95 | ||||
| 96 | if (bdev->funcs->io_mem_pfn) | |||
| 97 | return bdev->funcs->io_mem_pfn(bo, page_offset); | |||
| 98 | ||||
| 99 | return (bo->resource->bus.offset >> PAGE_SHIFT12) + page_offset; | |||
| 100 | } | |||
| 101 | ||||
| 102 | /** | |||
| 103 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback | |||
| 104 | * @bo: The buffer object | |||
| 105 | * @vmf: The fault structure handed to the callback | |||
| 106 | * | |||
| 107 | * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped | |||
| 108 | * during long waits, and after the wait the callback will be restarted. This | |||
| 109 | * is to allow other threads using the same virtual memory space concurrent | |||
| 110 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer | |||
| 111 | * object reservations sometimes wait for GPU and should therefore be | |||
| 112 | * considered long waits. This function reserves the buffer object interruptibly | |||
| 113 | * taking this into account. Starvation is avoided by the vm system not | |||
| 114 | * allowing too many repeated restarts. | |||
| 115 | * This function is intended to be used in customized fault() and _mkwrite() | |||
| 116 | * handlers. | |||
| 117 | * | |||
| 118 | * Return: | |||
| 119 | * 0 on success and the bo was reserved. | |||
| 120 | * VM_FAULT_RETRY if blocking wait. | |||
| 121 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. | |||
| 122 | */ | |||
| 123 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, | |||
| 124 | struct vm_fault *vmf) | |||
| 125 | { | |||
| 126 | /* | |||
| 127 | * Work around locking order reversal in fault / nopfn | |||
| 128 | * between mmap_lock and bo_reserve: Perform a trylock operation | |||
| 129 | * for reserve, and if it fails, retry the fault after waiting | |||
| 130 | * for the buffer to become unreserved. | |||
| 131 | */ | |||
| 132 | if (unlikely(!dma_resv_trylock(bo->base.resv))__builtin_expect(!!(!dma_resv_trylock(bo->base.resv)), 0)) { | |||
| 133 | /* | |||
| 134 | * If the fault allows retry and this is the first | |||
| 135 | * fault attempt, we try to release the mmap_lock | |||
| 136 | * before waiting | |||
| 137 | */ | |||
| 138 | if (fault_flag_allow_retry_first(vmf->flags)) { | |||
| 139 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | |||
| 140 | ttm_bo_get(bo); | |||
| 141 | mmap_read_unlock(vmf->vma->vm_mm); | |||
| 142 | if (!dma_resv_lock_interruptible(bo->base.resv, | |||
| 143 | NULL((void *)0))) | |||
| 144 | dma_resv_unlock(bo->base.resv); | |||
| 145 | ttm_bo_put(bo); | |||
| 146 | } | |||
| 147 | ||||
| 148 | return VM_FAULT_RETRY3; | |||
| 149 | } | |||
| 150 | ||||
| 151 | if (dma_resv_lock_interruptible(bo->base.resv, NULL((void *)0))) | |||
| 152 | return VM_FAULT_NOPAGE1; | |||
| 153 | } | |||
| 154 | ||||
| 155 | /* | |||
| 156 | * Refuse to fault imported pages. This should be handled | |||
| 157 | * (if at all) by redirecting mmap to the exporter. | |||
| 158 | */ | |||
| 159 | if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2))) { | |||
| 160 | if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE(1 << 3))) { | |||
| 161 | dma_resv_unlock(bo->base.resv); | |||
| 162 | return VM_FAULT_SIGBUS2; | |||
| 163 | } | |||
| 164 | } | |||
| 165 | ||||
| 166 | return 0; | |||
| 167 | } | |||
| 168 | EXPORT_SYMBOL(ttm_bo_vm_reserve); | |||
| 169 | ||||
| 170 | /** | |||
| 171 | * ttm_bo_vm_fault_reserved - TTM fault helper | |||
| 172 | * @vmf: The struct vm_fault given as argument to the fault callback | |||
| 173 | * @prot: The page protection to be used for this memory area. | |||
| 174 | * @num_prefault: Maximum number of prefault pages. The caller may want to | |||
| 175 | * specify this based on madvice settings and the size of the GPU object | |||
| 176 | * backed by the memory. | |||
| 177 | * | |||
| 178 | * This function inserts one or more page table entries pointing to the | |||
| 179 | * memory backing the buffer object, and then returns a return code | |||
| 180 | * instructing the caller to retry the page access. | |||
| 181 | * | |||
| 182 | * Return: | |||
| 183 | * VM_FAULT_NOPAGE on success or pending signal | |||
| 184 | * VM_FAULT_SIGBUS on unspecified error | |||
| 185 | * VM_FAULT_OOM on out-of-memory | |||
| 186 | * VM_FAULT_RETRY if retryable wait | |||
| 187 | */ | |||
| 188 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
| 189 | pgprot_t prot, | |||
| 190 | pgoff_t num_prefault) | |||
| 191 | { | |||
| 192 | struct vm_area_struct *vma = vmf->vma; | |||
| 193 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 194 | struct ttm_device *bdev = bo->bdev; | |||
| 195 | unsigned long page_offset; | |||
| 196 | unsigned long page_last; | |||
| 197 | unsigned long pfn; | |||
| 198 | struct ttm_tt *ttm = NULL((void *)0); | |||
| 199 | struct vm_page *page; | |||
| 200 | int err; | |||
| 201 | pgoff_t i; | |||
| 202 | vm_fault_t ret = VM_FAULT_NOPAGE1; | |||
| 203 | unsigned long address = vmf->address; | |||
| 204 | ||||
| 205 | /* | |||
| 206 | * Wait for buffer data in transit, due to a pipelined | |||
| 207 | * move. | |||
| 208 | */ | |||
| 209 | ret = ttm_bo_vm_fault_idle(bo, vmf); | |||
| 210 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) | |||
| 211 | return ret; | |||
| 212 | ||||
| 213 | err = ttm_mem_io_reserve(bdev, bo->resource); | |||
| 214 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) | |||
| 215 | return VM_FAULT_SIGBUS2; | |||
| 216 | ||||
| 217 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT12) + | |||
| 218 | vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); | |||
| 219 | page_last = vma_pages(vma) + vma->vm_pgoff - | |||
| 220 | drm_vma_node_start(&bo->base.vma_node); | |||
| 221 | ||||
| 222 | if (unlikely(page_offset >= bo->resource->num_pages)__builtin_expect(!!(page_offset >= bo->resource->num_pages ), 0)) | |||
| 223 | return VM_FAULT_SIGBUS2; | |||
| 224 | ||||
| 225 | prot = ttm_io_prot(bo, bo->resource, prot); | |||
| 226 | if (!bo->resource->bus.is_iomem) { | |||
| 227 | struct ttm_operation_ctx ctx = { | |||
| 228 | .interruptible = false0, | |||
| 229 | .no_wait_gpu = false0, | |||
| 230 | .force_alloc = true1 | |||
| 231 | }; | |||
| 232 | ||||
| 233 | ttm = bo->ttm; | |||
| 234 | if (ttm_tt_populate(bdev, bo->ttm, &ctx)) | |||
| 235 | return VM_FAULT_OOM4; | |||
| 236 | } else { | |||
| 237 | /* Iomem should not be marked encrypted */ | |||
| 238 | prot = pgprot_decrypted(prot); | |||
| 239 | } | |||
| 240 | ||||
| 241 | /* | |||
| 242 | * Speculatively prefault a number of pages. Only error on | |||
| 243 | * first page. | |||
| 244 | */ | |||
| 245 | for (i = 0; i < num_prefault; ++i) { | |||
| 246 | if (bo->resource->bus.is_iomem) { | |||
| 247 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); | |||
| 248 | } else { | |||
| 249 | page = ttm->pages[page_offset]; | |||
| 250 | if (unlikely(!page && i == 0)__builtin_expect(!!(!page && i == 0), 0)) { | |||
| 251 | return VM_FAULT_OOM4; | |||
| 252 | } else if (unlikely(!page)__builtin_expect(!!(!page), 0)) { | |||
| 253 | break; | |||
| 254 | } | |||
| 255 | pfn = page_to_pfn(page)(((page)->phys_addr) / (1 << 12)); | |||
| 256 | } | |||
| 257 | ||||
| 258 | /* | |||
| 259 | * Note that the value of @prot at this point may differ from | |||
| 260 | * the value of @vma->vm_page_prot in the caching- and | |||
| 261 | * encryption bits. This is because the exact location of the | |||
| 262 | * data may not be known at mmap() time and may also change | |||
| 263 | * at arbitrary times while the data is mmap'ed. | |||
| 264 | * See vmf_insert_mixed_prot() for a discussion. | |||
| 265 | */ | |||
| 266 | ret = vmf_insert_pfn_prot(vma, address, pfn, prot); | |||
| 267 | ||||
| 268 | /* Never error on prefaulted PTEs */ | |||
| 269 | if (unlikely((ret & VM_FAULT_ERROR))__builtin_expect(!!((ret & VM_FAULT_ERROR)), 0)) { | |||
| 270 | if (i == 0) | |||
| 271 | return VM_FAULT_NOPAGE1; | |||
| 272 | else | |||
| 273 | break; | |||
| 274 | } | |||
| 275 | ||||
| 276 | address += PAGE_SIZE(1 << 12); | |||
| 277 | if (unlikely(++page_offset >= page_last)__builtin_expect(!!(++page_offset >= page_last), 0)) | |||
| 278 | break; | |||
| 279 | } | |||
| 280 | return ret; | |||
| 281 | } | |||
| 282 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); | |||
| 283 | ||||
| 284 | static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res) | |||
| 285 | { | |||
| 286 | struct page *dummy_page = (struct page *)res; | |||
| 287 | ||||
| 288 | __free_page(dummy_page); | |||
| 289 | } | |||
| 290 | ||||
| 291 | vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot) | |||
| 292 | { | |||
| 293 | struct vm_area_struct *vma = vmf->vma; | |||
| 294 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 295 | struct drm_device *ddev = bo->base.dev; | |||
| 296 | vm_fault_t ret = VM_FAULT_NOPAGE1; | |||
| 297 | unsigned long address; | |||
| 298 | unsigned long pfn; | |||
| 299 | struct page *page; | |||
| 300 | ||||
| 301 | /* Allocate new dummy page to map all the VA range in this VMA to it*/ | |||
| 302 | page = alloc_page(GFP_KERNEL(0x0001 | 0x0004) | __GFP_ZERO0x0008); | |||
| 303 | if (!page) | |||
| 304 | return VM_FAULT_OOM4; | |||
| 305 | ||||
| 306 | /* Set the page to be freed using drmm release action */ | |||
| 307 | if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page)) | |||
| 308 | return VM_FAULT_OOM4; | |||
| 309 | ||||
| 310 | pfn = page_to_pfn(page)(((page)->phys_addr) / (1 << 12)); | |||
| 311 | ||||
| 312 | /* Prefault the entire VMA range right away to avoid further faults */ | |||
| 313 | for (address = vma->vm_start; address < vma->vm_end; | |||
| 314 | address += PAGE_SIZE(1 << 12)) | |||
| 315 | ret = vmf_insert_pfn_prot(vma, address, pfn, prot); | |||
| 316 | ||||
| 317 | return ret; | |||
| 318 | } | |||
| 319 | EXPORT_SYMBOL(ttm_bo_vm_dummy_page); | |||
| 320 | ||||
| 321 | vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) | |||
| 322 | { | |||
| 323 | struct vm_area_struct *vma = vmf->vma; | |||
| 324 | pgprot_t prot; | |||
| 325 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 326 | struct drm_device *ddev = bo->base.dev; | |||
| 327 | vm_fault_t ret; | |||
| 328 | int idx; | |||
| 329 | ||||
| 330 | ret = ttm_bo_vm_reserve(bo, vmf); | |||
| 331 | if (ret) | |||
| 332 | return ret; | |||
| 333 | ||||
| 334 | prot = vma->vm_page_prot; | |||
| 335 | if (drm_dev_enter(ddev, &idx)) { | |||
| 336 | ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT16); | |||
| 337 | drm_dev_exit(idx); | |||
| 338 | } else { | |||
| 339 | ret = ttm_bo_vm_dummy_page(vmf, prot); | |||
| 340 | } | |||
| 341 | if (ret == VM_FAULT_RETRY3 && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) | |||
| 342 | return ret; | |||
| 343 | ||||
| 344 | dma_resv_unlock(bo->base.resv); | |||
| 345 | ||||
| 346 | return ret; | |||
| 347 | } | |||
| 348 | EXPORT_SYMBOL(ttm_bo_vm_fault); | |||
| 349 | ||||
| 350 | #else /* !__linux__ */ | |||
| 351 | ||||
| 352 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | |||
| 353 | struct uvm_faultinfo *ufi) | |||
| 354 | { | |||
| 355 | long err = 0; | |||
| 356 | ||||
| 357 | /* | |||
| 358 | * Quick non-stalling check for idle. | |||
| 359 | */ | |||
| 360 | if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) | |||
| 361 | return 0; | |||
| 362 | ||||
| 363 | #ifdef __linux__ | |||
| 364 | /* | |||
| 365 | * If possible, avoid waiting for GPU with mmap_lock | |||
| 366 | * held. We only do this if the fault allows retry and this | |||
| 367 | * is the first attempt. | |||
| 368 | */ | |||
| 369 | if (fault_flag_allow_retry_first(vmf->flags)) { | |||
| 370 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | |||
| 371 | return VM_FAULT_RETRY3; | |||
| 372 | ||||
| 373 | ttm_bo_get(bo); | |||
| 374 | mmap_read_unlock(vmf->vma->vm_mm); | |||
| 375 | (void) dma_fence_wait(bo->moving, true1); | |||
| 376 | (void)dma_resv_wait_timeout(bo->base.resv, | |||
| 377 | DMA_RESV_USAGE_KERNEL, true1, | |||
| 378 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | |||
| 379 | dma_resv_unlock(bo->base.resv); | |||
| 380 | ttm_bo_put(bo); | |||
| 381 | return VM_FAULT_RETRY3; | |||
| 382 | } | |||
| 383 | #endif | |||
| 384 | ||||
| 385 | /* | |||
| 386 | * Ordinary wait. | |||
| 387 | */ | |||
| 388 | err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true1, | |||
| 389 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | |||
| 390 | if (unlikely(err < 0)__builtin_expect(!!(err < 0), 0)) { | |||
| 391 | return (err != -ERESTARTSYS4) ? VM_FAULT_SIGBUS2 : | |||
| 392 | VM_FAULT_NOPAGE1; | |||
| 393 | } | |||
| 394 | ||||
| 395 | return 0; | |||
| 396 | } | |||
| 397 | ||||
| 398 | static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, | |||
| 399 | unsigned long page_offset) | |||
| 400 | { | |||
| 401 | struct ttm_device *bdev = bo->bdev; | |||
| 402 | ||||
| 403 | if (bdev->funcs->io_mem_pfn) | |||
| 404 | return bdev->funcs->io_mem_pfn(bo, page_offset); | |||
| 405 | ||||
| 406 | return (bo->resource->bus.offset >> PAGE_SHIFT12) + page_offset; | |||
| 407 | } | |||
| 408 | ||||
| 409 | /** | |||
| 410 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback | |||
| 411 | * @bo: The buffer object | |||
| 412 | * @vmf: The fault structure handed to the callback | |||
| 413 | * | |||
| 414 | * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped | |||
| 415 | * during long waits, and after the wait the callback will be restarted. This | |||
| 416 | * is to allow other threads using the same virtual memory space concurrent | |||
| 417 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer | |||
| 418 | * object reservations sometimes wait for GPU and should therefore be | |||
| 419 | * considered long waits. This function reserves the buffer object interruptibly | |||
| 420 | * taking this into account. Starvation is avoided by the vm system not | |||
| 421 | * allowing too many repeated restarts. | |||
| 422 | * This function is intended to be used in customized fault() and _mkwrite() | |||
| 423 | * handlers. | |||
| 424 | * | |||
| 425 | * Return: | |||
| 426 | * 0 on success and the bo was reserved. | |||
| 427 | * VM_FAULT_RETRY if blocking wait. | |||
| 428 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. | |||
| 429 | */ | |||
| 430 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo) | |||
| 431 | { | |||
| 432 | /* | |||
| 433 | * Work around locking order reversal in fault / nopfn | |||
| 434 | * between mmap_lock and bo_reserve: Perform a trylock operation | |||
| 435 | * for reserve, and if it fails, retry the fault after waiting | |||
| 436 | * for the buffer to become unreserved. | |||
| 437 | */ | |||
| 438 | if (unlikely(!dma_resv_trylock(bo->base.resv))__builtin_expect(!!(!dma_resv_trylock(bo->base.resv)), 0)) { | |||
| 439 | #ifdef __linux__ | |||
| 440 | /* | |||
| 441 | * If the fault allows retry and this is the first | |||
| 442 | * fault attempt, we try to release the mmap_lock | |||
| 443 | * before waiting | |||
| 444 | */ | |||
| 445 | if (fault_flag_allow_retry_first(vmf->flags)) { | |||
| 446 | if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | |||
| 447 | ttm_bo_get(bo); | |||
| 448 | mmap_read_unlock(vmf->vma->vm_mm); | |||
| 449 | if (!dma_resv_lock_interruptible(bo->base.resv, | |||
| 450 | NULL((void *)0))) | |||
| 451 | dma_resv_unlock(bo->base.resv); | |||
| 452 | ttm_bo_put(bo); | |||
| 453 | } | |||
| 454 | ||||
| 455 | return VM_FAULT_RETRY3; | |||
| 456 | } | |||
| 457 | #endif | |||
| 458 | ||||
| 459 | if (dma_resv_lock_interruptible(bo->base.resv, NULL((void *)0))) | |||
| 460 | return VM_FAULT_NOPAGE1; | |||
| 461 | } | |||
| 462 | ||||
| 463 | /* | |||
| 464 | * Refuse to fault imported pages. This should be handled | |||
| 465 | * (if at all) by redirecting mmap to the exporter. | |||
| 466 | */ | |||
| 467 | if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2))) { | |||
| 468 | if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE(1 << 3))) { | |||
| 469 | dma_resv_unlock(bo->base.resv); | |||
| 470 | return VM_FAULT_SIGBUS2; | |||
| 471 | } | |||
| 472 | } | |||
| 473 | ||||
| 474 | return 0; | |||
| 475 | } | |||
| 476 | ||||
| 477 | vm_fault_t ttm_bo_vm_fault_reserved(struct uvm_faultinfo *ufi, | |||
| 478 | vaddr_t vaddr, | |||
| 479 | pgoff_t num_prefault, | |||
| 480 | pgoff_t fault_page_size) | |||
| 481 | { | |||
| 482 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; | |||
| 483 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; | |||
| 484 | struct ttm_device *bdev = bo->bdev; | |||
| 485 | unsigned long page_offset; | |||
| 486 | unsigned long page_last; | |||
| 487 | unsigned long pfn; | |||
| 488 | struct ttm_tt *ttm = NULL((void *)0); | |||
| 489 | struct vm_page *page; | |||
| 490 | bus_addr_t addr; | |||
| 491 | paddr_t paddr; | |||
| 492 | vm_prot_t prot; | |||
| 493 | int pmap_flags; | |||
| 494 | int err; | |||
| 495 | pgoff_t i; | |||
| 496 | vm_fault_t ret = VM_FAULT_NOPAGE1; | |||
| 497 | unsigned long address = (unsigned long)vaddr; | |||
| 498 | ||||
| 499 | /* | |||
| 500 | * Wait for buffer data in transit, due to a pipelined | |||
| 501 | * move. | |||
| 502 | */ | |||
| 503 | ret = ttm_bo_vm_fault_idle(bo, ufi); | |||
| 504 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) | |||
| 505 | return ret; | |||
| 506 | ret = VM_FAULT_NOPAGE1; | |||
| 507 | ||||
| 508 | err = ttm_mem_io_reserve(bdev, bo->resource); | |||
| 509 | if (unlikely(err != 0)__builtin_expect(!!(err != 0), 0)) | |||
| 510 | return VM_FAULT_SIGBUS2; | |||
| 511 | ||||
| 512 | page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT12) + | |||
| 513 | drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT12); | |||
| 514 | page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT12) + | |||
| 515 | drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT12); | |||
| 516 | ||||
| 517 | if (unlikely(page_offset >= bo->resource->num_pages)__builtin_expect(!!(page_offset >= bo->resource->num_pages ), 0)) | |||
| 518 | return VM_FAULT_SIGBUS2; | |||
| 519 | ||||
| 520 | prot = ufi->entry->protection; | |||
| 521 | pmap_flags = ttm_io_prot(bo, bo->resource, 0); | |||
| 522 | if (!bo->resource->bus.is_iomem) { | |||
| 523 | struct ttm_operation_ctx ctx = { | |||
| 524 | .interruptible = false0, | |||
| 525 | .no_wait_gpu = false0, | |||
| 526 | .force_alloc = true1 | |||
| 527 | }; | |||
| 528 | ||||
| 529 | ttm = bo->ttm; | |||
| 530 | if (ttm_tt_populate(bdev, bo->ttm, &ctx)) | |||
| 531 | return VM_FAULT_OOM4; | |||
| 532 | } | |||
| 533 | ||||
| 534 | #ifdef __linux__ | |||
| 535 | /* We don't prefault on huge faults. Yet. */ | |||
| 536 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)0 && fault_page_size != 1) | |||
| 537 | return ttm_bo_vm_insert_huge(vmf, bo, page_offset, | |||
| 538 | fault_page_size, prot); | |||
| 539 | #endif | |||
| 540 | ||||
| 541 | /* | |||
| 542 | * Speculatively prefault a number of pages. Only error on | |||
| 543 | * first page. | |||
| 544 | */ | |||
| 545 | for (i = 0; i < num_prefault; ++i) { | |||
| 546 | if (bo->resource->bus.is_iomem
| |||
| 547 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); | |||
| 548 | addr = pfn << PAGE_SHIFT12; | |||
| 549 | paddr = bus_space_mmap(bdev->memt, addr, 0, prot, 0)((bdev->memt)->mmap((addr), (0), (prot), (0))); | |||
| 550 | } else { | |||
| 551 | page = ttm->pages[page_offset]; | |||
| ||||
| 552 | if (unlikely(!page && i == 0)__builtin_expect(!!(!page && i == 0), 0)) { | |||
| 553 | return VM_FAULT_OOM4; | |||
| 554 | } else if (unlikely(!page)__builtin_expect(!!(!page), 0)) { | |||
| 555 | break; | |||
| 556 | } | |||
| 557 | paddr = VM_PAGE_TO_PHYS(page)((page)->phys_addr); | |||
| 558 | } | |||
| 559 | ||||
| 560 | err = pmap_enter(ufi->orig_map->pmap, address, | |||
| 561 | paddr | pmap_flags, prot, PMAP_CANFAIL0x00000020 | prot); | |||
| 562 | ||||
| 563 | /* Never error on prefaulted PTEs */ | |||
| 564 | if (unlikely(err)__builtin_expect(!!(err), 0)) { | |||
| 565 | ret = VM_FAULT_OOM4; | |||
| 566 | if (i == 0) | |||
| 567 | return VM_FAULT_NOPAGE1; | |||
| 568 | else | |||
| 569 | break; | |||
| 570 | } | |||
| 571 | ||||
| 572 | address += PAGE_SIZE(1 << 12); | |||
| 573 | if (unlikely(++page_offset >= page_last)__builtin_expect(!!(++page_offset >= page_last), 0)) | |||
| 574 | break; | |||
| 575 | } | |||
| 576 | pmap_update(ufi->orig_map->pmap); | |||
| 577 | return ret; | |||
| 578 | } | |||
| 579 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); | |||
| 580 | ||||
| 581 | int | |||
| 582 | ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, | |||
| 583 | int npages, int centeridx, vm_fault_t fault_type, | |||
| 584 | vm_prot_t access_type, int flags) | |||
| 585 | { | |||
| 586 | struct uvm_object *uobj = ufi->entry->object.uvm_obj; | |||
| 587 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; | |||
| 588 | vm_fault_t ret; | |||
| 589 | ||||
| 590 | ret = ttm_bo_vm_reserve(bo); | |||
| 591 | if (ret
| |||
| ||||
| 592 | switch (ret) { | |||
| 593 | case VM_FAULT_NOPAGE1: | |||
| 594 | ret = VM_PAGER_OK0; | |||
| 595 | break; | |||
| 596 | case VM_FAULT_RETRY3: | |||
| 597 | ret = VM_PAGER_REFAULT7; | |||
| 598 | break; | |||
| 599 | default: | |||
| 600 | ret = VM_PAGER_BAD1; | |||
| 601 | break; | |||
| 602 | } | |||
| 603 | ||||
| 604 | uvmfault_unlockall(ufi, NULL((void *)0), uobj); | |||
| 605 | return ret; | |||
| 606 | } | |||
| 607 | ||||
| 608 | ret = ttm_bo_vm_fault_reserved(ufi, vaddr, TTM_BO_VM_NUM_PREFAULT16, 1); | |||
| 609 | switch (ret) { | |||
| 610 | case VM_FAULT_NOPAGE1: | |||
| 611 | ret = VM_PAGER_OK0; | |||
| 612 | break; | |||
| 613 | case VM_FAULT_RETRY3: | |||
| 614 | ret = VM_PAGER_REFAULT7; | |||
| 615 | break; | |||
| 616 | default: | |||
| 617 | ret = VM_PAGER_BAD1; | |||
| 618 | break; | |||
| 619 | } | |||
| 620 | ||||
| 621 | dma_resv_unlock(bo->base.resv); | |||
| 622 | ||||
| 623 | uvmfault_unlockall(ufi, NULL((void *)0), uobj); | |||
| 624 | return ret; | |||
| 625 | } | |||
| 626 | EXPORT_SYMBOL(ttm_bo_vm_fault); | |||
| 627 | ||||
| 628 | #endif /* !__linux__ */ | |||
| 629 | ||||
| 630 | #ifdef notyet | |||
| 631 | void ttm_bo_vm_open(struct vm_area_struct *vma) | |||
| 632 | { | |||
| 633 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 634 | ||||
| 635 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping)({ int __ret = !!(bo->bdev->dev_mapping != vma->vm_file ->f_mapping); if (__ret) printf("WARNING %s failed at %s:%d\n" , "bo->bdev->dev_mapping != vma->vm_file->f_mapping" , "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_vm.c", 635); __builtin_expect (!!(__ret), 0); }); | |||
| 636 | ||||
| 637 | ttm_bo_get(bo); | |||
| 638 | } | |||
| 639 | EXPORT_SYMBOL(ttm_bo_vm_open); | |||
| 640 | ||||
| 641 | void ttm_bo_vm_close(struct vm_area_struct *vma) | |||
| 642 | { | |||
| 643 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 644 | ||||
| 645 | ttm_bo_put(bo); | |||
| 646 | vma->vm_private_data = NULL((void *)0); | |||
| 647 | } | |||
| 648 | EXPORT_SYMBOL(ttm_bo_vm_close); | |||
| 649 | ||||
| 650 | static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, | |||
| 651 | unsigned long offset, | |||
| 652 | uint8_t *buf, int len, int write) | |||
| 653 | { | |||
| 654 | unsigned long page = offset >> PAGE_SHIFT12; | |||
| 655 | unsigned long bytes_left = len; | |||
| 656 | int ret; | |||
| 657 | ||||
| 658 | /* Copy a page at a time, that way no extra virtual address | |||
| 659 | * mapping is needed | |||
| 660 | */ | |||
| 661 | offset -= page << PAGE_SHIFT12; | |||
| 662 | do { | |||
| 663 | unsigned long bytes = min(bytes_left, PAGE_SIZE - offset)(((bytes_left)<((1 << 12) - offset))?(bytes_left):(( 1 << 12) - offset)); | |||
| 664 | struct ttm_bo_kmap_obj map; | |||
| 665 | void *ptr; | |||
| 666 | bool_Bool is_iomem; | |||
| 667 | ||||
| 668 | ret = ttm_bo_kmap(bo, page, 1, &map); | |||
| 669 | if (ret) | |||
| 670 | return ret; | |||
| 671 | ||||
| 672 | ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; | |||
| 673 | WARN_ON_ONCE(is_iomem)({ static int __warned; int __ret = !!(is_iomem); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "is_iomem" , "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_vm.c", 673); __warned = 1; } __builtin_expect(!!(__ret), 0); }); | |||
| 674 | if (write) | |||
| 675 | memcpy(ptr, buf, bytes)__builtin_memcpy((ptr), (buf), (bytes)); | |||
| 676 | else | |||
| 677 | memcpy(buf, ptr, bytes)__builtin_memcpy((buf), (ptr), (bytes)); | |||
| 678 | ttm_bo_kunmap(&map); | |||
| 679 | ||||
| 680 | page++; | |||
| 681 | buf += bytes; | |||
| 682 | bytes_left -= bytes; | |||
| 683 | offset = 0; | |||
| 684 | } while (bytes_left); | |||
| 685 | ||||
| 686 | return len; | |||
| 687 | } | |||
| 688 | ||||
| 689 | int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, | |||
| 690 | void *buf, int len, int write) | |||
| 691 | { | |||
| 692 | struct ttm_buffer_object *bo = vma->vm_private_data; | |||
| 693 | unsigned long offset = (addr) - vma->vm_start + | |||
| 694 | ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) | |||
| 695 | << PAGE_SHIFT12); | |||
| 696 | int ret; | |||
| 697 | ||||
| 698 | if (len < 1 || (offset + len) >> PAGE_SHIFT12 > bo->resource->num_pages) | |||
| 699 | return -EIO5; | |||
| 700 | ||||
| 701 | ret = ttm_bo_reserve(bo, true1, false0, NULL((void *)0)); | |||
| 702 | if (ret) | |||
| 703 | return ret; | |||
| 704 | ||||
| 705 | switch (bo->resource->mem_type) { | |||
| 706 | case TTM_PL_SYSTEM0: | |||
| 707 | fallthroughdo {} while (0); | |||
| 708 | case TTM_PL_TT1: | |||
| 709 | ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); | |||
| 710 | break; | |||
| 711 | default: | |||
| 712 | if (bo->bdev->funcs->access_memory) | |||
| 713 | ret = bo->bdev->funcs->access_memory( | |||
| 714 | bo, offset, buf, len, write); | |||
| 715 | else | |||
| 716 | ret = -EIO5; | |||
| 717 | } | |||
| 718 | ||||
| 719 | ttm_bo_unreserve(bo); | |||
| 720 | ||||
| 721 | return ret; | |||
| 722 | } | |||
| 723 | EXPORT_SYMBOL(ttm_bo_vm_access); | |||
| 724 | ||||
| 725 | static const struct vm_operations_struct ttm_bo_vm_ops = { | |||
| 726 | .fault = ttm_bo_vm_fault, | |||
| 727 | .open = ttm_bo_vm_open, | |||
| 728 | .close = ttm_bo_vm_close, | |||
| 729 | .access = ttm_bo_vm_access, | |||
| 730 | }; | |||
| 731 | #endif | |||
| 732 | ||||
| 733 | void | |||
| 734 | ttm_bo_vm_reference(struct uvm_object *uobj) | |||
| 735 | { | |||
| 736 | struct ttm_buffer_object *bo = | |||
| 737 | (struct ttm_buffer_object *)uobj; | |||
| 738 | ||||
| 739 | ttm_bo_get(bo); | |||
| 740 | } | |||
| 741 | ||||
| 742 | void | |||
| 743 | ttm_bo_vm_detach(struct uvm_object *uobj) | |||
| 744 | { | |||
| 745 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj; | |||
| 746 | ||||
| 747 | ttm_bo_put(bo); | |||
| 748 | } | |||
| 749 | ||||
| 750 | const struct uvm_pagerops ttm_bo_vm_ops = { | |||
| 751 | .pgo_fault = ttm_bo_vm_fault, | |||
| 752 | .pgo_reference = ttm_bo_vm_reference, | |||
| 753 | .pgo_detach = ttm_bo_vm_detach | |||
| 754 | }; | |||
| 755 | ||||
| 756 | #ifdef __linux__ | |||
| 757 | int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
| 758 | { | |||
| 759 | /* Enforce no COW since would have really strange behavior with it. */ | |||
| 760 | if (is_cow_mapping(vma->vm_flags)) | |||
| 761 | return -EINVAL22; | |||
| 762 | ||||
| 763 | ttm_bo_get(bo); | |||
| 764 | ||||
| 765 | /* | |||
| 766 | * Drivers may want to override the vm_ops field. Otherwise we | |||
| 767 | * use TTM's default callbacks. | |||
| 768 | */ | |||
| 769 | if (!vma->vm_ops) | |||
| 770 | vma->vm_ops = &ttm_bo_vm_ops; | |||
| 771 | ||||
| 772 | /* | |||
| 773 | * Note: We're transferring the bo reference to | |||
| 774 | * vma->vm_private_data here. | |||
| 775 | */ | |||
| 776 | ||||
| 777 | vma->vm_private_data = bo; | |||
| 778 | ||||
| 779 | vma->vm_flags |= VM_PFNMAP; | |||
| 780 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | |||
| 781 | return 0; | |||
| 782 | } | |||
| 783 | EXPORT_SYMBOL(ttm_bo_mmap_obj); | |||
| 784 | #else /* !__linux__ */ | |||
| 785 | int ttm_bo_mmap_obj(struct ttm_buffer_object *bo) | |||
| 786 | { | |||
| 787 | /* Enforce no COW since would have really strange behavior with it. */ | |||
| 788 | #ifdef notyet | |||
| 789 | if (UVM_ET_ISCOPYONWRITE(entry)) | |||
| 790 | return -EINVAL22; | |||
| 791 | #endif | |||
| 792 | ||||
| 793 | ttm_bo_get(bo); | |||
| 794 | ||||
| 795 | /* | |||
| 796 | * Drivers may want to override the vm_ops field. Otherwise we | |||
| 797 | * use TTM's default callbacks. | |||
| 798 | */ | |||
| 799 | if (bo->base.uobj.pgops == NULL((void *)0)) | |||
| 800 | uvm_obj_init(&bo->base.uobj, &ttm_bo_vm_ops, 1); | |||
| 801 | ||||
| 802 | /* | |||
| 803 | * Note: We're transferring the bo reference to | |||
| 804 | * vma->vm_private_data here. | |||
| 805 | */ | |||
| 806 | ||||
| 807 | #ifdef notyet | |||
| 808 | vma->vm_private_data = bo; | |||
| 809 | ||||
| 810 | vma->vm_flags |= VM_PFNMAP; | |||
| 811 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | |||
| 812 | #endif | |||
| 813 | return 0; | |||
| 814 | } | |||
| 815 | #endif /* !__linux__ */ |