File: | dev/pci/drm/i915/gem/i915_gem_mman.c |
Warning: | line 895, column 21 Value stored to 'bdev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2014-2016 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/anon_inodes.h> |
8 | #include <linux/mman.h> |
9 | #include <linux/pfn_t.h> |
10 | #include <linux/sizes.h> |
11 | |
12 | #include <drm/drm_cache.h> |
13 | |
14 | #include "gt/intel_gt.h" |
15 | #include "gt/intel_gt_requests.h" |
16 | |
17 | #include "i915_drv.h" |
18 | #include "i915_gem_evict.h" |
19 | #include "i915_gem_gtt.h" |
20 | #include "i915_gem_ioctls.h" |
21 | #include "i915_gem_object.h" |
22 | #include "i915_gem_mman.h" |
23 | #include "i915_mm.h" |
24 | #include "i915_trace.h" |
25 | #include "i915_user_extensions.h" |
26 | #include "i915_gem_ttm.h" |
27 | #include "i915_vma.h" |
28 | |
29 | #ifdef __linux__ |
30 | static inline bool_Bool |
31 | __vma_matches(struct vm_area_struct *vma, struct file *filp, |
32 | unsigned long addr, unsigned long size) |
33 | { |
34 | if (vma->vm_file != filp) |
35 | return false0; |
36 | |
37 | return vma->vm_start == addr && |
38 | (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size)(((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1 )); |
39 | } |
40 | #endif |
41 | |
42 | /** |
43 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
44 | * it is mapped to. |
45 | * @dev: drm device |
46 | * @data: ioctl data blob |
47 | * @file: drm file |
48 | * |
49 | * While the mapping holds a reference on the contents of the object, it doesn't |
50 | * imply a ref on the object itself. |
51 | * |
52 | * IMPORTANT: |
53 | * |
54 | * DRM driver writers who look a this function as an example for how to do GEM |
55 | * mmap support, please don't implement mmap support like here. The modern way |
56 | * to implement DRM mmap support is with an mmap offset ioctl (like |
57 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. |
58 | * That way debug tooling like valgrind will understand what's going on, hiding |
59 | * the mmap call in a driver private ioctl will break that. The i915 driver only |
60 | * does cpu mmaps this way because we didn't know better. |
61 | */ |
62 | int |
63 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
64 | struct drm_file *file) |
65 | { |
66 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); |
67 | struct drm_i915_gem_mmap *args = data; |
68 | struct drm_i915_gem_object *obj; |
69 | vaddr_t addr; |
70 | vsize_t size; |
71 | int ret; |
72 | |
73 | #ifdef __OpenBSD__1 |
74 | if (args->size == 0 || args->offset & PAGE_MASK((1 << 12) - 1)) |
75 | return -EINVAL22; |
76 | size = round_page(args->size)(((args->size) + ((1 << 12) - 1)) & ~((1 << 12) - 1)); |
77 | if (args->offset + size < args->offset) |
78 | return -EINVAL22; |
79 | #endif |
80 | |
81 | /* |
82 | * mmap ioctl is disallowed for all discrete platforms, |
83 | * and for all platforms with GRAPHICS_VER > 12. |
84 | */ |
85 | if (IS_DGFX(i915)((&(i915)->__info)->is_dgfx) || GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8 | ((&(i915)->__runtime)->graphics.ip.rel)) > IP_VER(12, 0)((12) << 8 | (0))) |
86 | return -EOPNOTSUPP45; |
87 | |
88 | if (args->flags & ~(I915_MMAP_WC0x1)) |
89 | return -EINVAL22; |
90 | |
91 | if (args->flags & I915_MMAP_WC0x1 && !pat_enabled()) |
92 | return -ENODEV19; |
93 | |
94 | obj = i915_gem_object_lookup(file, args->handle); |
95 | if (!obj) |
96 | return -ENOENT2; |
97 | |
98 | /* prime objects have no backing filp to GEM mmap |
99 | * pages from. |
100 | */ |
101 | #ifdef __linux__ |
102 | if (!obj->base.filp) { |
103 | addr = -ENXIO6; |
104 | goto err; |
105 | } |
106 | #else |
107 | if (!obj->base.uao) { |
108 | addr = -ENXIO6; |
109 | goto err; |
110 | } |
111 | #endif |
112 | |
113 | if (range_overflows(args->offset, args->size, (u64)obj->base.size)({ typeof(args->offset) start__ = (args->offset); typeof (args->size) size__ = (args->size); typeof((u64)obj-> base.size) max__ = ((u64)obj->base.size); (void)(&start__ == &size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__; })) { |
114 | addr = -EINVAL22; |
115 | goto err; |
116 | } |
117 | |
118 | #ifdef __linux__ |
119 | addr = vm_mmap(obj->base.filp, 0, args->size, |
120 | PROT_READ0x01 | PROT_WRITE0x02, MAP_SHARED0x0001, |
121 | args->offset); |
122 | if (IS_ERR_VALUE(addr)__builtin_expect(!!((addr) >= (unsigned long)-95), 0)) |
123 | goto err; |
124 | |
125 | if (args->flags & I915_MMAP_WC0x1) { |
126 | struct mm_struct *mm = current->mm; |
127 | struct vm_area_struct *vma; |
128 | |
129 | if (mmap_write_lock_killable(mm)) { |
130 | addr = -EINTR4; |
131 | goto err; |
132 | } |
133 | vma = find_vma(mm, addr); |
134 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
135 | vma->vm_page_prot = |
136 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
137 | else |
138 | addr = -ENOMEM12; |
139 | mmap_write_unlock(mm); |
140 | if (IS_ERR_VALUE(addr)__builtin_expect(!!((addr) >= (unsigned long)-95), 0)) |
141 | goto err; |
142 | } |
143 | i915_gem_object_put(obj); |
144 | #else |
145 | addr = 0; |
146 | uao_reference(obj->base.uao); |
147 | ret = -uvm_map(&curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_vmspace->vm_map, &addr, size, |
148 | obj->base.uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,((0x01 | 0x02) | ((0x01 | 0x02) << 8) | ((0) << 4 ) | ((1) << 12) | ((args->flags & 0x1) ? 0x4000000 : 0)) |
149 | PROT_READ | PROT_WRITE, MAP_INHERIT_SHARE, MADV_RANDOM,((0x01 | 0x02) | ((0x01 | 0x02) << 8) | ((0) << 4 ) | ((1) << 12) | ((args->flags & 0x1) ? 0x4000000 : 0)) |
150 | (args->flags & I915_MMAP_WC) ? UVM_FLAG_WC : 0)((0x01 | 0x02) | ((0x01 | 0x02) << 8) | ((0) << 4 ) | ((1) << 12) | ((args->flags & 0x1) ? 0x4000000 : 0))); |
151 | if (ret != 0) |
152 | uao_detach(obj->base.uao); |
153 | i915_gem_object_put(obj); |
154 | if (ret) |
155 | return ret; |
156 | #endif |
157 | |
158 | args->addr_ptr = (u64)addr; |
159 | return 0; |
160 | |
161 | err: |
162 | i915_gem_object_put(obj); |
163 | return addr; |
164 | } |
165 | |
166 | static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) |
167 | { |
168 | return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT12; |
169 | } |
170 | |
171 | /** |
172 | * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps |
173 | * |
174 | * A history of the GTT mmap interface: |
175 | * |
176 | * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to |
177 | * aligned and suitable for fencing, and still fit into the available |
178 | * mappable space left by the pinned display objects. A classic problem |
179 | * we called the page-fault-of-doom where we would ping-pong between |
180 | * two objects that could not fit inside the GTT and so the memcpy |
181 | * would page one object in at the expense of the other between every |
182 | * single byte. |
183 | * |
184 | * 1 - Objects can be any size, and have any compatible fencing (X Y, or none |
185 | * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the |
186 | * object is too large for the available space (or simply too large |
187 | * for the mappable aperture!), a view is created instead and faulted |
188 | * into userspace. (This view is aligned and sized appropriately for |
189 | * fenced access.) |
190 | * |
191 | * 2 - Recognise WC as a separate cache domain so that we can flush the |
192 | * delayed writes via GTT before performing direct access via WC. |
193 | * |
194 | * 3 - Remove implicit set-domain(GTT) and synchronisation on initial |
195 | * pagefault; swapin remains transparent. |
196 | * |
197 | * 4 - Support multiple fault handlers per object depending on object's |
198 | * backing storage (a.k.a. MMAP_OFFSET). |
199 | * |
200 | * Restrictions: |
201 | * |
202 | * * snoopable objects cannot be accessed via the GTT. It can cause machine |
203 | * hangs on some architectures, corruption on others. An attempt to service |
204 | * a GTT page fault from a snoopable object will generate a SIGBUS. |
205 | * |
206 | * * the object must be able to fit into RAM (physical memory, though no |
207 | * limited to the mappable aperture). |
208 | * |
209 | * |
210 | * Caveats: |
211 | * |
212 | * * a new GTT page fault will synchronize rendering from the GPU and flush |
213 | * all data to system memory. Subsequent access will not be synchronized. |
214 | * |
215 | * * all mappings are revoked on runtime device suspend. |
216 | * |
217 | * * there are only 8, 16 or 32 fence registers to share between all users |
218 | * (older machines require fence register for display and blitter access |
219 | * as well). Contention of the fence registers will cause the previous users |
220 | * to be unmapped and any new access will generate new page faults. |
221 | * |
222 | * * running out of memory while servicing a fault may generate a SIGBUS, |
223 | * rather than the expected SIGSEGV. |
224 | */ |
225 | int i915_gem_mmap_gtt_version(void) |
226 | { |
227 | return 4; |
228 | } |
229 | |
230 | static inline struct i915_gtt_view |
231 | compute_partial_view(const struct drm_i915_gem_object *obj, |
232 | pgoff_t page_offset, |
233 | unsigned int chunk) |
234 | { |
235 | struct i915_gtt_view view; |
236 | |
237 | if (i915_gem_object_is_tiled(obj)) |
238 | chunk = roundup(chunk, tile_row_pages(obj) ?: 1)((((chunk)+((tile_row_pages(obj) ?: 1)-1))/(tile_row_pages(obj ) ?: 1))*(tile_row_pages(obj) ?: 1)); |
239 | |
240 | view.type = I915_GTT_VIEW_PARTIAL; |
241 | view.partial.offset = rounddown(page_offset, chunk)(((page_offset) / (chunk)) * (chunk)); |
242 | view.partial.size = |
243 | min_t(unsigned int, chunk,({ unsigned int __min_a = (chunk); unsigned int __min_b = ((obj ->base.size >> 12) - view.partial.offset); __min_a < __min_b ? __min_a : __min_b; }) |
244 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset)({ unsigned int __min_a = (chunk); unsigned int __min_b = ((obj ->base.size >> 12) - view.partial.offset); __min_a < __min_b ? __min_a : __min_b; }); |
245 | |
246 | /* If the partial covers the entire object, just create a normal VMA. */ |
247 | if (chunk >= obj->base.size >> PAGE_SHIFT12) |
248 | view.type = I915_GTT_VIEW_NORMAL; |
249 | |
250 | return view; |
251 | } |
252 | |
253 | #ifdef __linux__ |
254 | |
255 | static vm_fault_t i915_error_to_vmf_fault(int err) |
256 | { |
257 | switch (err) { |
258 | default: |
259 | WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err)({ static int __warned; int __ret = !!(err); if (__ret && !__warned) { printf("unhandled error in %s: %i\n", __func__, err); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
260 | fallthroughdo {} while (0); |
261 | case -EIO5: /* shmemfs failure from swap device */ |
262 | case -EFAULT14: /* purged object */ |
263 | case -ENODEV19: /* bad object, how did you get here! */ |
264 | case -ENXIO6: /* unable to access backing store (on device) */ |
265 | return VM_FAULT_SIGBUS2; |
266 | |
267 | case -ENOMEM12: /* our allocation failure */ |
268 | return VM_FAULT_OOM4; |
269 | |
270 | case 0: |
271 | case -EAGAIN35: |
272 | case -ENOSPC28: /* transient failure to evict? */ |
273 | case -ENOBUFS55: /* temporarily out of fences? */ |
274 | case -ERESTARTSYS4: |
275 | case -EINTR4: |
276 | case -EBUSY16: |
277 | /* |
278 | * EBUSY is ok: this just means that another thread |
279 | * already did the job. |
280 | */ |
281 | return VM_FAULT_NOPAGE1; |
282 | } |
283 | } |
284 | |
285 | static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) |
286 | { |
287 | struct vm_area_struct *area = vmf->vma; |
288 | struct i915_mmap_offset *mmo = area->vm_private_data; |
289 | struct drm_i915_gem_object *obj = mmo->obj; |
290 | resource_size_t iomap; |
291 | int err; |
292 | |
293 | /* Sanity check that we allow writing into this object */ |
294 | if (unlikely(i915_gem_object_is_readonly(obj) &&__builtin_expect(!!(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE), 0) |
295 | area->vm_flags & VM_WRITE)__builtin_expect(!!(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE), 0)) |
296 | return VM_FAULT_SIGBUS2; |
297 | |
298 | if (i915_gem_object_lock_interruptible(obj, NULL((void *)0))) |
299 | return VM_FAULT_NOPAGE1; |
300 | |
301 | err = i915_gem_object_pin_pages(obj); |
302 | if (err) |
303 | goto out; |
304 | |
305 | iomap = -1; |
306 | if (!i915_gem_object_has_struct_page(obj)) { |
307 | iomap = obj->mm.region->iomap.base; |
308 | iomap -= obj->mm.region->region.start; |
309 | } |
310 | |
311 | /* PTEs are revoked in obj->ops->put_pages() */ |
312 | err = remap_io_sg(area, |
313 | area->vm_start, area->vm_end - area->vm_start, |
314 | obj->mm.pages->sgl, iomap); |
315 | |
316 | if (area->vm_flags & VM_WRITE) { |
317 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); |
318 | obj->mm.dirty = true1; |
319 | } |
320 | |
321 | i915_gem_object_unpin_pages(obj); |
322 | |
323 | out: |
324 | i915_gem_object_unlock(obj); |
325 | return i915_error_to_vmf_fault(err); |
326 | } |
327 | |
328 | static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) |
329 | { |
330 | #define MIN_CHUNK_PAGES((1 << 20) >> 12) (SZ_1M(1 << 20) >> PAGE_SHIFT12) |
331 | struct vm_area_struct *area = vmf->vma; |
332 | struct i915_mmap_offset *mmo = area->vm_private_data; |
333 | struct drm_i915_gem_object *obj = mmo->obj; |
334 | struct drm_device *dev = obj->base.dev; |
335 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); |
336 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
337 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; |
338 | bool_Bool write = area->vm_flags & VM_WRITE; |
339 | struct i915_gem_ww_ctx ww; |
340 | intel_wakeref_t wakeref; |
341 | struct i915_vma *vma; |
342 | pgoff_t page_offset; |
343 | int srcu; |
344 | int ret; |
345 | |
346 | /* We don't use vmf->pgoff since that has the fake offset */ |
347 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT12; |
348 | |
349 | trace_i915_gem_object_fault(obj, page_offset, true, write); |
350 | |
351 | wakeref = intel_runtime_pm_get(rpm); |
352 | |
353 | i915_gem_ww_ctx_init(&ww, true1); |
354 | retry: |
355 | ret = i915_gem_object_lock(obj, &ww); |
356 | if (ret) |
357 | goto err_rpm; |
358 | |
359 | /* Sanity check that we allow writing into this object */ |
360 | if (i915_gem_object_is_readonly(obj) && write) { |
361 | ret = -EFAULT14; |
362 | goto err_rpm; |
363 | } |
364 | |
365 | ret = i915_gem_object_pin_pages(obj); |
366 | if (ret) |
367 | goto err_rpm; |
368 | |
369 | ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); |
370 | if (ret) |
371 | goto err_pages; |
372 | |
373 | /* Now pin it into the GTT as needed */ |
374 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL((void *)0), 0, 0, |
375 | PIN_MAPPABLE(1ULL << (3)) | |
376 | PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ | |
377 | PIN_NOEVICT(1ULL << (0))); |
378 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK11)) { |
379 | /* Use a partial view if it is bigger than available space */ |
380 | struct i915_gtt_view view = |
381 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES((1 << 20) >> 12)); |
382 | unsigned int flags; |
383 | |
384 | flags = PIN_MAPPABLE(1ULL << (3)) | PIN_NOSEARCH(1ULL << (1)); |
385 | if (view.type == I915_GTT_VIEW_NORMAL) |
386 | flags |= PIN_NONBLOCK(1ULL << (2)); /* avoid warnings for pinned */ |
387 | |
388 | /* |
389 | * Userspace is now writing through an untracked VMA, abandon |
390 | * all hope that the hardware is able to track future writes. |
391 | */ |
392 | |
393 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
394 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK11)) { |
395 | flags = PIN_MAPPABLE(1ULL << (3)); |
396 | view.type = I915_GTT_VIEW_PARTIAL; |
397 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
398 | } |
399 | |
400 | /* |
401 | * The entire mappable GGTT is pinned? Unexpected! |
402 | * Try to evict the object we locked too, as normally we skip it |
403 | * due to lack of short term pinning inside execbuf. |
404 | */ |
405 | if (vma == ERR_PTR(-ENOSPC28)) { |
406 | ret = mutex_lock_interruptible(&ggtt->vm.mutex); |
407 | if (!ret) { |
408 | ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL((void *)0)); |
409 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); |
410 | } |
411 | if (ret) |
412 | goto err_reset; |
413 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
414 | } |
415 | } |
416 | if (IS_ERR(vma)) { |
417 | ret = PTR_ERR(vma); |
418 | goto err_reset; |
419 | } |
420 | |
421 | /* Access to snoopable pages through the GTT is incoherent. */ |
422 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)((&(i915)->__info)->has_llc)) { |
423 | ret = -EFAULT14; |
424 | goto err_unpin; |
425 | } |
426 | |
427 | ret = i915_vma_pin_fence(vma); |
428 | if (ret) |
429 | goto err_unpin; |
430 | |
431 | /* Finally, remap it using the new GTT offset */ |
432 | ret = remap_io_mapping(area, |
433 | area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT12), |
434 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT12, |
435 | min_t(u64, vma->size, area->vm_end - area->vm_start)({ u64 __min_a = (vma->size); u64 __min_b = (area->vm_end - area->vm_start); __min_a < __min_b ? __min_a : __min_b ; }), |
436 | &ggtt->iomap); |
437 | if (ret) |
438 | goto err_fence; |
439 | |
440 | assert_rpm_wakelock_held(rpm); |
441 | |
442 | /* Mark as being mmapped into userspace for later revocation */ |
443 | mutex_lock(&to_gt(i915)->ggtt->vm.mutex)rw_enter_write(&to_gt(i915)->ggtt->vm.mutex); |
444 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) |
445 | list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); |
446 | mutex_unlock(&to_gt(i915)->ggtt->vm.mutex)rw_exit_write(&to_gt(i915)->ggtt->vm.mutex); |
447 | |
448 | /* Track the mmo associated with the fenced vma */ |
449 | vma->mmo = mmo; |
450 | |
451 | if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250) |
452 | intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, |
453 | msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250)); |
454 | |
455 | if (write) { |
456 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); |
457 | i915_vma_set_ggtt_write(vma); |
458 | obj->mm.dirty = true1; |
459 | } |
460 | |
461 | err_fence: |
462 | i915_vma_unpin_fence(vma); |
463 | err_unpin: |
464 | __i915_vma_unpin(vma); |
465 | err_reset: |
466 | intel_gt_reset_unlock(ggtt->vm.gt, srcu); |
467 | err_pages: |
468 | i915_gem_object_unpin_pages(obj); |
469 | err_rpm: |
470 | if (ret == -EDEADLK11) { |
471 | ret = i915_gem_ww_ctx_backoff(&ww); |
472 | if (!ret) |
473 | goto retry; |
474 | } |
475 | i915_gem_ww_ctx_fini(&ww); |
476 | intel_runtime_pm_put(rpm, wakeref); |
477 | return i915_error_to_vmf_fault(ret); |
478 | } |
479 | |
480 | static int |
481 | vm_access(struct vm_area_struct *area, unsigned long addr, |
482 | void *buf, int len, int write) |
483 | { |
484 | struct i915_mmap_offset *mmo = area->vm_private_data; |
485 | struct drm_i915_gem_object *obj = mmo->obj; |
486 | struct i915_gem_ww_ctx ww; |
487 | void *vaddr; |
488 | int err = 0; |
489 | |
490 | if (i915_gem_object_is_readonly(obj) && write) |
491 | return -EACCES13; |
492 | |
493 | addr -= area->vm_start; |
494 | if (range_overflows_t(u64, addr, len, obj->base.size)({ typeof((u64)(addr)) start__ = ((u64)(addr)); typeof((u64)( len)) size__ = ((u64)(len)); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base.size)); (void)(&start__ == & size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__; })) |
495 | return -EINVAL22; |
496 | |
497 | i915_gem_ww_ctx_init(&ww, true1); |
498 | retry: |
499 | err = i915_gem_object_lock(obj, &ww); |
500 | if (err) |
501 | goto out; |
502 | |
503 | /* As this is primarily for debugging, let's focus on simplicity */ |
504 | vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); |
505 | if (IS_ERR(vaddr)) { |
506 | err = PTR_ERR(vaddr); |
507 | goto out; |
508 | } |
509 | |
510 | if (write) { |
511 | memcpy(vaddr + addr, buf, len)__builtin_memcpy((vaddr + addr), (buf), (len)); |
512 | __i915_gem_object_flush_map(obj, addr, len); |
513 | } else { |
514 | memcpy(buf, vaddr + addr, len)__builtin_memcpy((buf), (vaddr + addr), (len)); |
515 | } |
516 | |
517 | i915_gem_object_unpin_map(obj); |
518 | out: |
519 | if (err == -EDEADLK11) { |
520 | err = i915_gem_ww_ctx_backoff(&ww); |
521 | if (!err) |
522 | goto retry; |
523 | } |
524 | i915_gem_ww_ctx_fini(&ww); |
525 | |
526 | if (err) |
527 | return err; |
528 | |
529 | return len; |
530 | } |
531 | |
532 | #else /* !__linux__ */ |
533 | |
534 | static int i915_error_to_vmf_fault(int err) |
535 | { |
536 | switch (err) { |
537 | default: |
538 | WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err)({ static int __warned; int __ret = !!(err); if (__ret && !__warned) { printf("unhandled error in %s: %i\n", __func__, err); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
539 | fallthroughdo {} while (0); |
540 | case -EIO5: /* shmemfs failure from swap device */ |
541 | case -EFAULT14: /* purged object */ |
542 | case -ENODEV19: /* bad object, how did you get here! */ |
543 | case -ENXIO6: /* unable to access backing store (on device) */ |
544 | return VM_PAGER_ERROR4; |
545 | |
546 | case -ENOMEM12: /* our allocation failure */ |
547 | return VM_PAGER_ERROR4; |
548 | |
549 | case 0: |
550 | case -EAGAIN35: |
551 | case -ENOSPC28: /* transient failure to evict? */ |
552 | case -ENOBUFS55: /* temporarily out of fences? */ |
553 | case -ERESTART-1: |
554 | case -EINTR4: |
555 | case -EBUSY16: |
556 | /* |
557 | * EBUSY is ok: this just means that another thread |
558 | * already did the job. |
559 | */ |
560 | return VM_PAGER_OK0; |
561 | } |
562 | } |
563 | |
564 | static int |
565 | vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi, |
566 | vm_prot_t access_type) |
567 | { |
568 | struct vm_map_entry *entry = ufi->entry; |
569 | struct drm_i915_gem_object *obj = mmo->obj; |
570 | int write = !!(access_type & PROT_WRITE0x02); |
571 | struct sg_table *pages; |
572 | struct sg_page_iter sg_iter; |
573 | vm_prot_t mapprot; |
574 | vaddr_t va = entry->start; |
575 | paddr_t pa, pa_flags = 0; |
576 | int flags; |
577 | int err; |
578 | |
579 | /* Sanity check that we allow writing into this object */ |
580 | if (unlikely(i915_gem_object_is_readonly(obj) && write)__builtin_expect(!!(i915_gem_object_is_readonly(obj) && write), 0)) { |
581 | uvmfault_unlockall(ufi, NULL((void *)0), &obj->base.uobj); |
582 | return VM_PAGER_BAD1; |
583 | } |
584 | |
585 | if (i915_gem_object_lock_interruptible(obj, NULL((void *)0))) |
586 | return VM_PAGER_ERROR4; |
587 | |
588 | err = i915_gem_object_pin_pages(obj); |
589 | if (err) |
590 | goto out; |
591 | |
592 | flags = mapprot = entry->protection; |
593 | if (write == 0) |
594 | flags &= ~PROT_WRITE0x02; |
595 | |
596 | switch (mmo->mmap_type) { |
597 | case I915_MMAP_TYPE_WC: |
598 | pa_flags |= PMAP_WC0x2; |
599 | break; |
600 | case I915_MMAP_TYPE_UC: |
601 | pa_flags |= PMAP_NOCACHE0x1; |
602 | break; |
603 | default: |
604 | break; |
605 | } |
606 | |
607 | pages = obj->mm.pages; |
608 | for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0)__sg_page_iter_start((&sg_iter), (pages->sgl), (pages-> nents), (0)); while (__sg_page_iter_next(&sg_iter)) { |
609 | pa = sg_page_iter_dma_address(&sg_iter); |
610 | if (pmap_enter(ufi->orig_map->pmap, va, pa | pa_flags, |
611 | mapprot, PMAP_CANFAIL0x00000020 | flags)) { |
612 | err = -ENOMEM12; |
613 | break; |
614 | } |
615 | va += PAGE_SIZE(1 << 12); |
616 | } |
617 | pmap_update(ufi->orig_map->pmap); |
618 | |
619 | if (write) { |
620 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); |
621 | obj->mm.dirty = true1; |
622 | } |
623 | |
624 | i915_gem_object_unpin_pages(obj); |
625 | |
626 | out: |
627 | i915_gem_object_unlock(obj); |
628 | uvmfault_unlockall(ufi, NULL((void *)0), &obj->base.uobj); |
629 | return i915_error_to_vmf_fault(err); |
630 | } |
631 | |
632 | int |
633 | remap_io_mapping(pmap_t pm, vm_prot_t mapprot, |
634 | vaddr_t va, unsigned long pfn, unsigned long size) |
635 | { |
636 | vaddr_t end = va + size; |
637 | paddr_t pa = ptoa(pfn)((paddr_t)(pfn) << 12); |
638 | |
639 | while (va < end) { |
640 | if (pmap_enter(pm, va, pa | PMAP_WC0x2, mapprot, PMAP_CANFAIL0x00000020 | mapprot)) |
641 | return -ENOMEM12; |
642 | va += PAGE_SIZE(1 << 12); |
643 | pa += PAGE_SIZE(1 << 12); |
644 | } |
645 | |
646 | return 0; |
647 | } |
648 | |
649 | static int |
650 | vm_fault_gtt(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi, |
651 | vaddr_t vaddr, vm_prot_t access_type) |
652 | { |
653 | #define MIN_CHUNK_PAGES((1 << 20) >> 12) (SZ_1M(1 << 20) >> PAGE_SHIFT12) |
654 | struct vm_map_entry *entry = ufi->entry; |
655 | struct drm_i915_gem_object *obj = mmo->obj; |
656 | struct drm_device *dev = obj->base.dev; |
657 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); |
658 | struct intel_runtime_pm *rpm = &i915->runtime_pm; |
659 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; |
660 | int write = !!(access_type & PROT_WRITE0x02); |
661 | struct i915_gem_ww_ctx ww; |
662 | intel_wakeref_t wakeref; |
663 | struct i915_vma *vma; |
664 | pgoff_t page_offset; |
665 | int srcu; |
666 | int ret; |
667 | |
668 | /* We don't use vmf->pgoff since that has the fake offset */ |
669 | page_offset = (vaddr - entry->start) >> PAGE_SHIFT12; |
670 | |
671 | trace_i915_gem_object_fault(obj, page_offset, true, write); |
672 | |
673 | wakeref = intel_runtime_pm_get(rpm); |
674 | |
675 | i915_gem_ww_ctx_init(&ww, true1); |
676 | retry: |
677 | ret = i915_gem_object_lock(obj, &ww); |
678 | if (ret) |
679 | goto err_rpm; |
680 | |
681 | /* Sanity check that we allow writing into this object */ |
682 | if (i915_gem_object_is_readonly(obj) && write) { |
683 | ret = -EFAULT14; |
684 | goto err_rpm; |
685 | } |
686 | |
687 | ret = i915_gem_object_pin_pages(obj); |
688 | if (ret) |
689 | goto err_rpm; |
690 | |
691 | ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); |
692 | if (ret) |
693 | goto err_pages; |
694 | |
695 | /* Now pin it into the GTT as needed */ |
696 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL((void *)0), 0, 0, |
697 | PIN_MAPPABLE(1ULL << (3)) | |
698 | PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ | |
699 | PIN_NOEVICT(1ULL << (0))); |
700 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK11)) { |
701 | /* Use a partial view if it is bigger than available space */ |
702 | struct i915_gtt_view view = |
703 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES((1 << 20) >> 12)); |
704 | unsigned int flags; |
705 | |
706 | flags = PIN_MAPPABLE(1ULL << (3)) | PIN_NOSEARCH(1ULL << (1)); |
707 | if (view.type == I915_GTT_VIEW_NORMAL) |
708 | flags |= PIN_NONBLOCK(1ULL << (2)); /* avoid warnings for pinned */ |
709 | |
710 | /* |
711 | * Userspace is now writing through an untracked VMA, abandon |
712 | * all hope that the hardware is able to track future writes. |
713 | */ |
714 | |
715 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
716 | if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK11)) { |
717 | flags = PIN_MAPPABLE(1ULL << (3)); |
718 | view.type = I915_GTT_VIEW_PARTIAL; |
719 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
720 | } |
721 | |
722 | /* |
723 | * The entire mappable GGTT is pinned? Unexpected! |
724 | * Try to evict the object we locked too, as normally we skip it |
725 | * due to lack of short term pinning inside execbuf. |
726 | */ |
727 | if (vma == ERR_PTR(-ENOSPC28)) { |
728 | ret = mutex_lock_interruptible(&ggtt->vm.mutex); |
729 | if (!ret) { |
730 | ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL((void *)0)); |
731 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); |
732 | } |
733 | if (ret) |
734 | goto err_reset; |
735 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); |
736 | } |
737 | } |
738 | if (IS_ERR(vma)) { |
739 | ret = PTR_ERR(vma); |
740 | goto err_reset; |
741 | } |
742 | |
743 | /* Access to snoopable pages through the GTT is incoherent. */ |
744 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)((&(i915)->__info)->has_llc)) { |
745 | ret = -EFAULT14; |
746 | goto err_unpin; |
747 | } |
748 | |
749 | ret = i915_vma_pin_fence(vma); |
750 | if (ret) |
751 | goto err_unpin; |
752 | |
753 | /* Finally, remap it using the new GTT offset */ |
754 | ret = remap_io_mapping(ufi->orig_map->pmap, entry->protection, |
755 | entry->start + (vma->gtt_view.partial.offset << PAGE_SHIFT12), |
756 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT12, |
757 | min_t(u64, vma->size, entry->end - entry->start)({ u64 __min_a = (vma->size); u64 __min_b = (entry->end - entry->start); __min_a < __min_b ? __min_a : __min_b ; })); |
758 | if (ret) |
759 | goto err_fence; |
760 | |
761 | assert_rpm_wakelock_held(rpm); |
762 | |
763 | /* Mark as being mmapped into userspace for later revocation */ |
764 | mutex_lock(&to_gt(i915)->ggtt->vm.mutex)rw_enter_write(&to_gt(i915)->ggtt->vm.mutex); |
765 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) |
766 | list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); |
767 | mutex_unlock(&to_gt(i915)->ggtt->vm.mutex)rw_exit_write(&to_gt(i915)->ggtt->vm.mutex); |
768 | |
769 | /* Track the mmo associated with the fenced vma */ |
770 | vma->mmo = mmo; |
771 | |
772 | if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250) |
773 | intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, |
774 | msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250)); |
775 | |
776 | if (write) { |
777 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); |
778 | i915_vma_set_ggtt_write(vma); |
779 | obj->mm.dirty = true1; |
780 | } |
781 | |
782 | err_fence: |
783 | i915_vma_unpin_fence(vma); |
784 | err_unpin: |
785 | __i915_vma_unpin(vma); |
786 | err_reset: |
787 | intel_gt_reset_unlock(ggtt->vm.gt, srcu); |
788 | err_pages: |
789 | i915_gem_object_unpin_pages(obj); |
790 | err_rpm: |
791 | if (ret == -EDEADLK11) { |
792 | ret = i915_gem_ww_ctx_backoff(&ww); |
793 | if (!ret) |
794 | goto retry; |
795 | } |
796 | i915_gem_ww_ctx_fini(&ww); |
797 | intel_runtime_pm_put(rpm, wakeref); |
798 | uvmfault_unlockall(ufi, NULL((void *)0), &obj->base.uobj); |
799 | return i915_error_to_vmf_fault(ret); |
800 | } |
801 | |
802 | int |
803 | i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi, |
804 | off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx, |
805 | vm_prot_t access_type, int flags) |
806 | { |
807 | struct drm_vma_offset_node *node; |
808 | struct drm_device *dev = gem_obj->dev; |
809 | struct vm_map_entry *entry = ufi->entry; |
810 | vsize_t size = entry->end - entry->start; |
811 | struct i915_mmap_offset *mmo = NULL((void *)0); |
812 | |
813 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
814 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
815 | entry->offset >> PAGE_SHIFT12, |
816 | size >> PAGE_SHIFT12); |
817 | if (likely(node)__builtin_expect(!!(node), 1)) |
818 | mmo = container_of(node, struct i915_mmap_offset, vma_node)({ const __typeof( ((struct i915_mmap_offset *)0)->vma_node ) *__mptr = (node); (struct i915_mmap_offset *)( (char *)__mptr - __builtin_offsetof(struct i915_mmap_offset, vma_node) );}); |
819 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
820 | if (!mmo) { |
821 | uvmfault_unlockall(ufi, NULL((void *)0), &gem_obj->uobj); |
822 | return VM_PAGER_BAD1; |
823 | } |
824 | |
825 | KASSERT(gem_obj == &mmo->obj->base)((gem_obj == &mmo->obj->base) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_mman.c" , 825, "gem_obj == &mmo->obj->base")); |
826 | |
827 | if (mmo->mmap_type == I915_MMAP_TYPE_GTT) |
828 | return vm_fault_gtt(mmo, ufi, vaddr, access_type); |
829 | |
830 | return vm_fault_cpu(mmo, ufi, access_type); |
831 | } |
832 | |
833 | #endif /* !__linux__ */ |
834 | |
835 | void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) |
836 | { |
837 | struct i915_vma *vma; |
838 | |
839 | GEM_BUG_ON(!obj->userfault_count)((void)0); |
840 | |
841 | for_each_ggtt_vma(vma, obj)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = ((&(obj)->vma.list)->next); (__typeof( *vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}); &vma->obj_link != (&(obj)->vma .list); vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = (vma->obj_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}) ) if (!i915_vma_is_ggtt(vma)) break; else |
842 | i915_vma_revoke_mmap(vma); |
843 | |
844 | GEM_BUG_ON(obj->userfault_count)((void)0); |
845 | } |
846 | |
847 | /* |
848 | * It is vital that we remove the page mapping if we have mapped a tiled |
849 | * object through the GTT and then lose the fence register due to |
850 | * resource pressure. Similarly if the object has been moved out of the |
851 | * aperture, than pages mapped into userspace must be revoked. Removing the |
852 | * mapping will then trigger a page fault on the next user access, allowing |
853 | * fixup by vm_fault_gtt(). |
854 | */ |
855 | void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) |
856 | { |
857 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
858 | intel_wakeref_t wakeref; |
859 | |
860 | /* |
861 | * Serialisation between user GTT access and our code depends upon |
862 | * revoking the CPU's PTE whilst the mutex is held. The next user |
863 | * pagefault then has to wait until we release the mutex. |
864 | * |
865 | * Note that RPM complicates somewhat by adding an additional |
866 | * requirement that operations to the GGTT be made holding the RPM |
867 | * wakeref. |
868 | */ |
869 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); |
870 | mutex_lock(&to_gt(i915)->ggtt->vm.mutex)rw_enter_write(&to_gt(i915)->ggtt->vm.mutex); |
871 | |
872 | if (!obj->userfault_count) |
873 | goto out; |
874 | |
875 | __i915_gem_object_release_mmap_gtt(obj); |
876 | |
877 | /* |
878 | * Ensure that the CPU's PTE are revoked and there are not outstanding |
879 | * memory transactions from userspace before we return. The TLB |
880 | * flushing implied above by changing the PTE above *should* be |
881 | * sufficient, an extra barrier here just provides us with a bit |
882 | * of paranoid documentation about our requirement to serialise |
883 | * memory writes before touching registers / GSM. |
884 | */ |
885 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); |
886 | |
887 | out: |
888 | mutex_unlock(&to_gt(i915)->ggtt->vm.mutex)rw_exit_write(&to_gt(i915)->ggtt->vm.mutex); |
889 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); |
890 | } |
891 | |
892 | void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) |
893 | { |
894 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
895 | struct ttm_device *bdev = bo->bdev; |
Value stored to 'bdev' during its initialization is never read | |
896 | |
897 | #ifdef __linux__ |
898 | drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); |
899 | #endif |
900 | |
901 | /* |
902 | * We have exclusive access here via runtime suspend. All other callers |
903 | * must first grab the rpm wakeref. |
904 | */ |
905 | GEM_BUG_ON(!obj->userfault_count)((void)0); |
906 | list_del(&obj->userfault_link); |
907 | obj->userfault_count = 0; |
908 | } |
909 | |
910 | void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) |
911 | { |
912 | struct i915_mmap_offset *mmo, *mn; |
913 | |
914 | if (obj->ops->unmap_virtual) |
915 | obj->ops->unmap_virtual(obj); |
916 | |
917 | spin_lock(&obj->mmo.lock)mtx_enter(&obj->mmo.lock); |
918 | rbtree_postorder_for_each_entry_safe(mmo, mn,for ((mmo) = (__rb_deepest_left((&obj->mmo.offsets)-> rb_node) ? ({ const __typeof( ((__typeof(*mmo) *)0)->offset ) *__mptr = (__rb_deepest_left((&obj->mmo.offsets)-> rb_node)); (__typeof(*mmo) *)( (char *)__mptr - __builtin_offsetof (__typeof(*mmo), offset) );}) : ((void *)0)); ((mmo) != ((void *)0)) && ((mn) = (rb_next_postorder(&mmo->offset ) ? ({ const __typeof( ((typeof(*mmo) *)0)->offset ) *__mptr = (rb_next_postorder(&mmo->offset)); (typeof(*mmo) *) ( (char *)__mptr - __builtin_offsetof(typeof(*mmo), offset) ) ;}) : ((void *)0)), 1); (mmo) = (mn)) |
919 | &obj->mmo.offsets, offset)for ((mmo) = (__rb_deepest_left((&obj->mmo.offsets)-> rb_node) ? ({ const __typeof( ((__typeof(*mmo) *)0)->offset ) *__mptr = (__rb_deepest_left((&obj->mmo.offsets)-> rb_node)); (__typeof(*mmo) *)( (char *)__mptr - __builtin_offsetof (__typeof(*mmo), offset) );}) : ((void *)0)); ((mmo) != ((void *)0)) && ((mn) = (rb_next_postorder(&mmo->offset ) ? ({ const __typeof( ((typeof(*mmo) *)0)->offset ) *__mptr = (rb_next_postorder(&mmo->offset)); (typeof(*mmo) *) ( (char *)__mptr - __builtin_offsetof(typeof(*mmo), offset) ) ;}) : ((void *)0)), 1); (mmo) = (mn)) { |
920 | /* |
921 | * vma_node_unmap for GTT mmaps handled already in |
922 | * __i915_gem_object_release_mmap_gtt |
923 | */ |
924 | if (mmo->mmap_type == I915_MMAP_TYPE_GTT) |
925 | continue; |
926 | |
927 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
928 | #ifdef __linux__ |
929 | drm_vma_node_unmap(&mmo->vma_node, |
930 | obj->base.dev->anon_inode->i_mapping); |
931 | #endif |
932 | spin_lock(&obj->mmo.lock)mtx_enter(&obj->mmo.lock); |
933 | } |
934 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
935 | } |
936 | |
937 | static struct i915_mmap_offset * |
938 | lookup_mmo(struct drm_i915_gem_object *obj, |
939 | enum i915_mmap_type mmap_type) |
940 | { |
941 | struct rb_node *rb; |
942 | |
943 | spin_lock(&obj->mmo.lock)mtx_enter(&obj->mmo.lock); |
944 | rb = obj->mmo.offsets.rb_node; |
945 | while (rb) { |
946 | struct i915_mmap_offset *mmo = |
947 | rb_entry(rb, typeof(*mmo), offset)({ const __typeof( ((typeof(*mmo) *)0)->offset ) *__mptr = (rb); (typeof(*mmo) *)( (char *)__mptr - __builtin_offsetof( typeof(*mmo), offset) );}); |
948 | |
949 | if (mmo->mmap_type == mmap_type) { |
950 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
951 | return mmo; |
952 | } |
953 | |
954 | if (mmo->mmap_type < mmap_type) |
955 | rb = rb->rb_right__entry.rbe_right; |
956 | else |
957 | rb = rb->rb_left__entry.rbe_left; |
958 | } |
959 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
960 | |
961 | return NULL((void *)0); |
962 | } |
963 | |
964 | static struct i915_mmap_offset * |
965 | insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) |
966 | { |
967 | struct rb_node *rb, **p; |
968 | |
969 | spin_lock(&obj->mmo.lock)mtx_enter(&obj->mmo.lock); |
970 | rb = NULL((void *)0); |
971 | p = &obj->mmo.offsets.rb_node; |
972 | while (*p) { |
973 | struct i915_mmap_offset *pos; |
974 | |
975 | rb = *p; |
976 | pos = rb_entry(rb, typeof(*pos), offset)({ const __typeof( ((typeof(*pos) *)0)->offset ) *__mptr = (rb); (typeof(*pos) *)( (char *)__mptr - __builtin_offsetof( typeof(*pos), offset) );}); |
977 | |
978 | if (pos->mmap_type == mmo->mmap_type) { |
979 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
980 | drm_vma_offset_remove(obj->base.dev->vma_offset_manager, |
981 | &mmo->vma_node); |
982 | kfree(mmo); |
983 | return pos; |
984 | } |
985 | |
986 | if (pos->mmap_type < mmo->mmap_type) |
987 | p = &rb->rb_right__entry.rbe_right; |
988 | else |
989 | p = &rb->rb_left__entry.rbe_left; |
990 | } |
991 | rb_link_node(&mmo->offset, rb, p); |
992 | rb_insert_color(&mmo->offset, &obj->mmo.offsets)linux_root_RB_INSERT_COLOR((struct linux_root *)(&obj-> mmo.offsets), (&mmo->offset)); |
993 | spin_unlock(&obj->mmo.lock)mtx_leave(&obj->mmo.lock); |
994 | |
995 | return mmo; |
996 | } |
997 | |
998 | static struct i915_mmap_offset * |
999 | mmap_offset_attach(struct drm_i915_gem_object *obj, |
1000 | enum i915_mmap_type mmap_type, |
1001 | struct drm_file *file) |
1002 | { |
1003 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
1004 | struct i915_mmap_offset *mmo; |
1005 | int err; |
1006 | |
1007 | GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops)((void)0); |
1008 | |
1009 | mmo = lookup_mmo(obj, mmap_type); |
1010 | if (mmo) |
1011 | goto out; |
1012 | |
1013 | mmo = kmalloc(sizeof(*mmo), GFP_KERNEL(0x0001 | 0x0004)); |
1014 | if (!mmo) |
1015 | return ERR_PTR(-ENOMEM12); |
1016 | |
1017 | mmo->obj = obj; |
1018 | mmo->mmap_type = mmap_type; |
1019 | drm_vma_node_reset(&mmo->vma_node); |
1020 | |
1021 | err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, |
1022 | &mmo->vma_node, obj->base.size / PAGE_SIZE(1 << 12)); |
1023 | if (likely(!err)__builtin_expect(!!(!err), 1)) |
1024 | goto insert; |
1025 | |
1026 | /* Attempt to reap some mmap space from dead objects */ |
1027 | err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT(0x7fffffff), |
1028 | NULL((void *)0)); |
1029 | if (err) |
1030 | goto err; |
1031 | |
1032 | i915_gem_drain_freed_objects(i915); |
1033 | err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, |
1034 | &mmo->vma_node, obj->base.size / PAGE_SIZE(1 << 12)); |
1035 | if (err) |
1036 | goto err; |
1037 | |
1038 | insert: |
1039 | mmo = insert_mmo(obj, mmo); |
1040 | GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo)((void)0); |
1041 | out: |
1042 | if (file) |
1043 | drm_vma_node_allow_once(&mmo->vma_node, file); |
1044 | return mmo; |
1045 | |
1046 | err: |
1047 | kfree(mmo); |
1048 | return ERR_PTR(err); |
1049 | } |
1050 | |
1051 | static int |
1052 | __assign_mmap_offset(struct drm_i915_gem_object *obj, |
1053 | enum i915_mmap_type mmap_type, |
1054 | u64 *offset, struct drm_file *file) |
1055 | { |
1056 | struct i915_mmap_offset *mmo; |
1057 | |
1058 | if (i915_gem_object_never_mmap(obj)) |
1059 | return -ENODEV19; |
1060 | |
1061 | if (obj->ops->mmap_offset) { |
1062 | if (mmap_type != I915_MMAP_TYPE_FIXED) |
1063 | return -ENODEV19; |
1064 | |
1065 | *offset = obj->ops->mmap_offset(obj); |
1066 | return 0; |
1067 | } |
1068 | |
1069 | if (mmap_type == I915_MMAP_TYPE_FIXED) |
1070 | return -ENODEV19; |
1071 | |
1072 | if (mmap_type != I915_MMAP_TYPE_GTT && |
1073 | !i915_gem_object_has_struct_page(obj) && |
1074 | !i915_gem_object_has_iomem(obj)) |
1075 | return -ENODEV19; |
1076 | |
1077 | mmo = mmap_offset_attach(obj, mmap_type, file); |
1078 | if (IS_ERR(mmo)) |
1079 | return PTR_ERR(mmo); |
1080 | |
1081 | *offset = drm_vma_node_offset_addr(&mmo->vma_node); |
1082 | return 0; |
1083 | } |
1084 | |
1085 | static int |
1086 | __assign_mmap_offset_handle(struct drm_file *file, |
1087 | u32 handle, |
1088 | enum i915_mmap_type mmap_type, |
1089 | u64 *offset) |
1090 | { |
1091 | struct drm_i915_gem_object *obj; |
1092 | int err; |
1093 | |
1094 | obj = i915_gem_object_lookup(file, handle); |
1095 | if (!obj) |
1096 | return -ENOENT2; |
1097 | |
1098 | err = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); |
1099 | if (err) |
1100 | goto out_put; |
1101 | err = __assign_mmap_offset(obj, mmap_type, offset, file); |
1102 | i915_gem_object_unlock(obj); |
1103 | out_put: |
1104 | i915_gem_object_put(obj); |
1105 | return err; |
1106 | } |
1107 | |
1108 | int |
1109 | i915_gem_dumb_mmap_offset(struct drm_file *file, |
1110 | struct drm_device *dev, |
1111 | u32 handle, |
1112 | u64 *offset) |
1113 | { |
1114 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); |
1115 | enum i915_mmap_type mmap_type; |
1116 | |
1117 | if (HAS_LMEM(to_i915(dev))((&(to_i915(dev))->__runtime)->memory_regions & ((1UL << (INTEL_REGION_LMEM_0))))) |
1118 | mmap_type = I915_MMAP_TYPE_FIXED; |
1119 | else if (pat_enabled()) |
1120 | mmap_type = I915_MMAP_TYPE_WC; |
1121 | else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) |
1122 | return -ENODEV19; |
1123 | else |
1124 | mmap_type = I915_MMAP_TYPE_GTT; |
1125 | |
1126 | return __assign_mmap_offset_handle(file, handle, mmap_type, offset); |
1127 | } |
1128 | |
1129 | /** |
1130 | * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing |
1131 | * @dev: DRM device |
1132 | * @data: GTT mapping ioctl data |
1133 | * @file: GEM object info |
1134 | * |
1135 | * Simply returns the fake offset to userspace so it can mmap it. |
1136 | * The mmap call will end up in drm_gem_mmap(), which will set things |
1137 | * up so we can get faults in the handler above. |
1138 | * |
1139 | * The fault handler will take care of binding the object into the GTT |
1140 | * (since it may have been evicted to make room for something), allocating |
1141 | * a fence register, and mapping the appropriate aperture address into |
1142 | * userspace. |
1143 | */ |
1144 | int |
1145 | i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, |
1146 | struct drm_file *file) |
1147 | { |
1148 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); |
1149 | struct drm_i915_gem_mmap_offset *args = data; |
1150 | enum i915_mmap_type type; |
1151 | int err; |
1152 | |
1153 | /* |
1154 | * Historically we failed to check args.pad and args.offset |
1155 | * and so we cannot use those fields for user input and we cannot |
1156 | * add -EINVAL for them as the ABI is fixed, i.e. old userspace |
1157 | * may be feeding in garbage in those fields. |
1158 | * |
1159 | * if (args->pad) return -EINVAL; is verbotten! |
1160 | */ |
1161 | |
1162 | err = i915_user_extensions(u64_to_user_ptr(args->extensions)((void *)(uintptr_t)(args->extensions)), |
1163 | NULL((void *)0), 0, NULL((void *)0)); |
1164 | if (err) |
1165 | return err; |
1166 | |
1167 | switch (args->flags) { |
1168 | case I915_MMAP_OFFSET_GTT0: |
1169 | if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) |
1170 | return -ENODEV19; |
1171 | type = I915_MMAP_TYPE_GTT; |
1172 | break; |
1173 | |
1174 | case I915_MMAP_OFFSET_WC1: |
1175 | if (!pat_enabled()) |
1176 | return -ENODEV19; |
1177 | type = I915_MMAP_TYPE_WC; |
1178 | break; |
1179 | |
1180 | case I915_MMAP_OFFSET_WB2: |
1181 | type = I915_MMAP_TYPE_WB; |
1182 | break; |
1183 | |
1184 | case I915_MMAP_OFFSET_UC3: |
1185 | if (!pat_enabled()) |
1186 | return -ENODEV19; |
1187 | type = I915_MMAP_TYPE_UC; |
1188 | break; |
1189 | |
1190 | case I915_MMAP_OFFSET_FIXED4: |
1191 | type = I915_MMAP_TYPE_FIXED; |
1192 | break; |
1193 | |
1194 | default: |
1195 | return -EINVAL22; |
1196 | } |
1197 | |
1198 | return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); |
1199 | } |
1200 | |
1201 | #ifdef __linux__ |
1202 | |
1203 | static void vm_open(struct vm_area_struct *vma) |
1204 | { |
1205 | struct i915_mmap_offset *mmo = vma->vm_private_data; |
1206 | struct drm_i915_gem_object *obj = mmo->obj; |
1207 | |
1208 | GEM_BUG_ON(!obj)((void)0); |
1209 | i915_gem_object_get(obj); |
1210 | } |
1211 | |
1212 | static void vm_close(struct vm_area_struct *vma) |
1213 | { |
1214 | struct i915_mmap_offset *mmo = vma->vm_private_data; |
1215 | struct drm_i915_gem_object *obj = mmo->obj; |
1216 | |
1217 | GEM_BUG_ON(!obj)((void)0); |
1218 | i915_gem_object_put(obj); |
1219 | } |
1220 | |
1221 | static const struct vm_operations_struct vm_ops_gtt = { |
1222 | .fault = vm_fault_gtt, |
1223 | .access = vm_access, |
1224 | .open = vm_open, |
1225 | .close = vm_close, |
1226 | }; |
1227 | |
1228 | static const struct vm_operations_struct vm_ops_cpu = { |
1229 | .fault = vm_fault_cpu, |
1230 | .access = vm_access, |
1231 | .open = vm_open, |
1232 | .close = vm_close, |
1233 | }; |
1234 | |
1235 | static int singleton_release(struct inode *inode, struct file *file) |
1236 | { |
1237 | struct drm_i915_privateinteldrm_softc *i915 = file->private_data; |
1238 | |
1239 | cmpxchg(&i915->gem.mmap_singleton, file, NULL)__sync_val_compare_and_swap(&i915->gem.mmap_singleton, file, ((void *)0)); |
1240 | drm_dev_put(&i915->drm); |
1241 | |
1242 | return 0; |
1243 | } |
1244 | |
1245 | static const struct file_operations singleton_fops = { |
1246 | .owner = THIS_MODULE((void *)0), |
1247 | .release = singleton_release, |
1248 | }; |
1249 | |
1250 | static struct file *mmap_singleton(struct drm_i915_privateinteldrm_softc *i915) |
1251 | { |
1252 | struct file *file; |
1253 | |
1254 | rcu_read_lock(); |
1255 | file = READ_ONCE(i915->gem.mmap_singleton)({ typeof(i915->gem.mmap_singleton) __tmp = *(volatile typeof (i915->gem.mmap_singleton) *)&(i915->gem.mmap_singleton ); membar_datadep_consumer(); __tmp; }); |
1256 | if (file && !get_file_rcu(file)) |
1257 | file = NULL((void *)0); |
1258 | rcu_read_unlock(); |
1259 | if (file) |
1260 | return file; |
1261 | |
1262 | file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR0x0002); |
1263 | if (IS_ERR(file)) |
1264 | return file; |
1265 | |
1266 | /* Everyone shares a single global address space */ |
1267 | file->f_mapping = i915->drm.anon_inode->i_mapping; |
1268 | |
1269 | smp_store_mb(i915->gem.mmap_singleton, file)do { i915->gem.mmap_singleton = file; do { __asm volatile( "mfence" ::: "memory"); } while (0); } while (0); |
1270 | drm_dev_get(&i915->drm); |
1271 | |
1272 | return file; |
1273 | } |
1274 | |
1275 | /* |
1276 | * This overcomes the limitation in drm_gem_mmap's assignment of a |
1277 | * drm_gem_object as the vma->vm_private_data. Since we need to |
1278 | * be able to resolve multiple mmap offsets which could be tied |
1279 | * to a single gem object. |
1280 | */ |
1281 | int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
1282 | { |
1283 | struct drm_vma_offset_node *node; |
1284 | struct drm_file *priv = filp->private_data; |
1285 | struct drm_device *dev = priv->minor->dev; |
1286 | struct drm_i915_gem_object *obj = NULL((void *)0); |
1287 | struct i915_mmap_offset *mmo = NULL((void *)0); |
1288 | struct file *anon; |
1289 | |
1290 | if (drm_dev_is_unplugged(dev)) |
1291 | return -ENODEV19; |
1292 | |
1293 | rcu_read_lock(); |
1294 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
1295 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
1296 | vma->vm_pgoff, |
1297 | vma_pages(vma)); |
1298 | if (node && drm_vma_node_is_allowed(node, priv)) { |
1299 | /* |
1300 | * Skip 0-refcnted objects as it is in the process of being |
1301 | * destroyed and will be invalid when the vma manager lock |
1302 | * is released. |
1303 | */ |
1304 | if (!node->driver_private) { |
1305 | mmo = container_of(node, struct i915_mmap_offset, vma_node)({ const __typeof( ((struct i915_mmap_offset *)0)->vma_node ) *__mptr = (node); (struct i915_mmap_offset *)( (char *)__mptr - __builtin_offsetof(struct i915_mmap_offset, vma_node) );}); |
1306 | obj = i915_gem_object_get_rcu(mmo->obj); |
1307 | |
1308 | GEM_BUG_ON(obj && obj->ops->mmap_ops)((void)0); |
1309 | } else { |
1310 | obj = i915_gem_object_get_rcu |
1311 | (container_of(node, struct drm_i915_gem_object,({ const __typeof( ((struct drm_i915_gem_object *)0)->base .vma_node ) *__mptr = (node); (struct drm_i915_gem_object *)( (char *)__mptr - __builtin_offsetof(struct drm_i915_gem_object , base.vma_node) );}) |
1312 | base.vma_node)({ const __typeof( ((struct drm_i915_gem_object *)0)->base .vma_node ) *__mptr = (node); (struct drm_i915_gem_object *)( (char *)__mptr - __builtin_offsetof(struct drm_i915_gem_object , base.vma_node) );})); |
1313 | |
1314 | GEM_BUG_ON(obj && !obj->ops->mmap_ops)((void)0); |
1315 | } |
1316 | } |
1317 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
1318 | rcu_read_unlock(); |
1319 | if (!obj) |
1320 | return node ? -EACCES13 : -EINVAL22; |
1321 | |
1322 | if (i915_gem_object_is_readonly(obj)) { |
1323 | if (vma->vm_flags & VM_WRITE) { |
1324 | i915_gem_object_put(obj); |
1325 | return -EINVAL22; |
1326 | } |
1327 | vma->vm_flags &= ~VM_MAYWRITE; |
1328 | } |
1329 | |
1330 | anon = mmap_singleton(to_i915(dev)); |
1331 | if (IS_ERR(anon)) { |
1332 | i915_gem_object_put(obj); |
1333 | return PTR_ERR(anon); |
1334 | } |
1335 | |
1336 | vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; |
1337 | |
1338 | /* |
1339 | * We keep the ref on mmo->obj, not vm_file, but we require |
1340 | * vma->vm_file->f_mapping, see vma_link(), for later revocation. |
1341 | * Our userspace is accustomed to having per-file resource cleanup |
1342 | * (i.e. contexts, objects and requests) on their close(fd), which |
1343 | * requires avoiding extraneous references to their filp, hence why |
1344 | * we prefer to use an anonymous file for their mmaps. |
1345 | */ |
1346 | vma_set_file(vma, anon); |
1347 | /* Drop the initial creation reference, the vma is now holding one. */ |
1348 | fput(anon); |
1349 | |
1350 | if (obj->ops->mmap_ops) { |
1351 | vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); |
1352 | vma->vm_ops = obj->ops->mmap_ops; |
1353 | vma->vm_private_data = node->driver_private; |
1354 | return 0; |
1355 | } |
1356 | |
1357 | vma->vm_private_data = mmo; |
1358 | |
1359 | switch (mmo->mmap_type) { |
1360 | case I915_MMAP_TYPE_WC: |
1361 | vma->vm_page_prot = |
1362 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1363 | vma->vm_ops = &vm_ops_cpu; |
1364 | break; |
1365 | |
1366 | case I915_MMAP_TYPE_FIXED: |
1367 | GEM_WARN_ON(1)({ __builtin_expect(!!(!!(1)), 0); }); |
1368 | fallthroughdo {} while (0); |
1369 | case I915_MMAP_TYPE_WB: |
1370 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
1371 | vma->vm_ops = &vm_ops_cpu; |
1372 | break; |
1373 | |
1374 | case I915_MMAP_TYPE_UC: |
1375 | vma->vm_page_prot = |
1376 | pgprot_noncached(vm_get_page_prot(vma->vm_flags)); |
1377 | vma->vm_ops = &vm_ops_cpu; |
1378 | break; |
1379 | |
1380 | case I915_MMAP_TYPE_GTT: |
1381 | vma->vm_page_prot = |
1382 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1383 | vma->vm_ops = &vm_ops_gtt; |
1384 | break; |
1385 | } |
1386 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
1387 | |
1388 | return 0; |
1389 | } |
1390 | |
1391 | #else /* !__linux__ */ |
1392 | |
1393 | /* |
1394 | * This overcomes the limitation in drm_gem_mmap's assignment of a |
1395 | * drm_gem_object as the vma->vm_private_data. Since we need to |
1396 | * be able to resolve multiple mmap offsets which could be tied |
1397 | * to a single gem object. |
1398 | */ |
1399 | struct uvm_object * |
1400 | i915_gem_mmap(struct file *filp, vm_prot_t accessprot, |
1401 | voff_t off, vsize_t size) |
1402 | { |
1403 | struct drm_vma_offset_node *node; |
1404 | struct drm_file *priv = (void *)filp; |
1405 | struct drm_device *dev = priv->minor->dev; |
1406 | struct drm_i915_gem_object *obj = NULL((void *)0); |
1407 | struct i915_mmap_offset *mmo = NULL((void *)0); |
1408 | |
1409 | if (drm_dev_is_unplugged(dev)) |
1410 | return NULL((void *)0); |
1411 | |
1412 | rcu_read_lock(); |
1413 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
1414 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
1415 | off >> PAGE_SHIFT12, |
1416 | atop(round_page(size))(((((size) + ((1 << 12) - 1)) & ~((1 << 12) - 1))) >> 12)); |
1417 | if (node && drm_vma_node_is_allowed(node, priv)) { |
1418 | /* |
1419 | * Skip 0-refcnted objects as it is in the process of being |
1420 | * destroyed and will be invalid when the vma manager lock |
1421 | * is released. |
1422 | */ |
1423 | mmo = container_of(node, struct i915_mmap_offset, vma_node)({ const __typeof( ((struct i915_mmap_offset *)0)->vma_node ) *__mptr = (node); (struct i915_mmap_offset *)( (char *)__mptr - __builtin_offsetof(struct i915_mmap_offset, vma_node) );}); |
1424 | obj = i915_gem_object_get_rcu(mmo->obj); |
1425 | } |
1426 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
1427 | rcu_read_unlock(); |
1428 | if (!obj) |
1429 | return NULL((void *)0); |
1430 | |
1431 | if (i915_gem_object_is_readonly(obj)) { |
1432 | if (accessprot & PROT_WRITE0x02) { |
1433 | i915_gem_object_put(obj); |
1434 | return NULL((void *)0); |
1435 | } |
1436 | } |
1437 | |
1438 | return &obj->base.uobj; |
1439 | } |
1440 | |
1441 | #endif /* !__linux__ */ |
1442 | |
1443 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
1444 | #include "selftests/i915_gem_mman.c" |
1445 | #endif |