File: | dev/pci/drm/i915/gem/i915_gem_ttm.c |
Warning: | line 279, column 27 Value stored to 'i915' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2021 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/shmem_fs.h> |
7 | |
8 | #include <drm/ttm/ttm_bo_driver.h> |
9 | #include <drm/ttm/ttm_placement.h> |
10 | #include <drm/drm_buddy.h> |
11 | |
12 | #include "i915_drv.h" |
13 | #include "i915_ttm_buddy_manager.h" |
14 | #include "intel_memory_region.h" |
15 | #include "intel_region_ttm.h" |
16 | |
17 | #include "gem/i915_gem_mman.h" |
18 | #include "gem/i915_gem_object.h" |
19 | #include "gem/i915_gem_region.h" |
20 | #include "gem/i915_gem_ttm.h" |
21 | #include "gem/i915_gem_ttm_move.h" |
22 | #include "gem/i915_gem_ttm_pm.h" |
23 | #include "gt/intel_gpu_commands.h" |
24 | |
25 | #define I915_TTM_PRIO_PURGE0 0 |
26 | #define I915_TTM_PRIO_NO_PAGES1 1 |
27 | #define I915_TTM_PRIO_HAS_PAGES2 2 |
28 | #define I915_TTM_PRIO_NEEDS_CPU_ACCESS3 3 |
29 | |
30 | /* |
31 | * Size of struct ttm_place vector in on-stack struct ttm_placement allocs |
32 | */ |
33 | #define I915_TTM_MAX_PLACEMENTSINTEL_REGION_UNKNOWN INTEL_REGION_UNKNOWN |
34 | |
35 | /** |
36 | * struct i915_ttm_tt - TTM page vector with additional private information |
37 | * @ttm: The base TTM page vector. |
38 | * @dev: The struct device used for dma mapping and unmapping. |
39 | * @cached_rsgt: The cached scatter-gather table. |
40 | * @is_shmem: Set if using shmem. |
41 | * @filp: The shmem file, if using shmem backend. |
42 | * |
43 | * Note that DMA may be going on right up to the point where the page- |
44 | * vector is unpopulated in delayed destroy. Hence keep the |
45 | * scatter-gather table mapped and cached up to that point. This is |
46 | * different from the cached gem object io scatter-gather table which |
47 | * doesn't have an associated dma mapping. |
48 | */ |
49 | struct i915_ttm_tt { |
50 | struct ttm_tt ttm; |
51 | struct device *dev; |
52 | struct i915_refct_sgt cached_rsgt; |
53 | |
54 | bool_Bool is_shmem; |
55 | struct file *filp; |
56 | }; |
57 | |
58 | static const struct ttm_place sys_placement_flags = { |
59 | .fpfn = 0, |
60 | .lpfn = 0, |
61 | .mem_type = I915_PL_SYSTEM0, |
62 | .flags = 0, |
63 | }; |
64 | |
65 | static struct ttm_placement i915_sys_placement = { |
66 | .num_placement = 1, |
67 | .placement = &sys_placement_flags, |
68 | .num_busy_placement = 1, |
69 | .busy_placement = &sys_placement_flags, |
70 | }; |
71 | |
72 | /** |
73 | * i915_ttm_sys_placement - Return the struct ttm_placement to be |
74 | * used for an object in system memory. |
75 | * |
76 | * Rather than making the struct extern, use this |
77 | * function. |
78 | * |
79 | * Return: A pointer to a static variable for sys placement. |
80 | */ |
81 | struct ttm_placement *i915_ttm_sys_placement(void) |
82 | { |
83 | return &i915_sys_placement; |
84 | } |
85 | |
86 | static int i915_ttm_err_to_gem(int err) |
87 | { |
88 | /* Fastpath */ |
89 | if (likely(!err)__builtin_expect(!!(!err), 1)) |
90 | return 0; |
91 | |
92 | switch (err) { |
93 | case -EBUSY16: |
94 | /* |
95 | * TTM likes to convert -EDEADLK to -EBUSY, and wants us to |
96 | * restart the operation, since we don't record the contending |
97 | * lock. We use -EAGAIN to restart. |
98 | */ |
99 | return -EAGAIN35; |
100 | case -ENOSPC28: |
101 | /* |
102 | * Memory type / region is full, and we can't evict. |
103 | * Except possibly system, that returns -ENOMEM; |
104 | */ |
105 | return -ENXIO6; |
106 | default: |
107 | break; |
108 | } |
109 | |
110 | return err; |
111 | } |
112 | |
113 | static enum ttm_caching |
114 | i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) |
115 | { |
116 | /* |
117 | * Objects only allowed in system get cached cpu-mappings, or when |
118 | * evicting lmem-only buffers to system for swapping. Other objects get |
119 | * WC mapping for now. Even if in system. |
120 | */ |
121 | if (obj->mm.n_placements <= 1) |
122 | return ttm_cached; |
123 | |
124 | return ttm_write_combined; |
125 | } |
126 | |
127 | static void |
128 | i915_ttm_place_from_region(const struct intel_memory_region *mr, |
129 | struct ttm_place *place, |
130 | resource_size_t offset, |
131 | resource_size_t size, |
132 | unsigned int flags) |
133 | { |
134 | memset(place, 0, sizeof(*place))__builtin_memset((place), (0), (sizeof(*place))); |
135 | place->mem_type = intel_region_to_ttm_type(mr); |
136 | |
137 | if (mr->type == INTEL_MEMORY_SYSTEM) |
138 | return; |
139 | |
140 | if (flags & I915_BO_ALLOC_CONTIGUOUS(1UL << (0))) |
141 | place->flags |= TTM_PL_FLAG_CONTIGUOUS(1 << 0); |
142 | if (offset != I915_BO_INVALID_OFFSET((resource_size_t)-1)) { |
143 | place->fpfn = offset >> PAGE_SHIFT12; |
144 | place->lpfn = place->fpfn + (size >> PAGE_SHIFT12); |
145 | } else if (mr->io_size && mr->io_size < mr->total) { |
146 | if (flags & I915_BO_ALLOC_GPU_ONLY(1UL << (6))) { |
147 | place->flags |= TTM_PL_FLAG_TOPDOWN(1 << 1); |
148 | } else { |
149 | place->fpfn = 0; |
150 | place->lpfn = mr->io_size >> PAGE_SHIFT12; |
151 | } |
152 | } |
153 | } |
154 | |
155 | static void |
156 | i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, |
157 | struct ttm_place *requested, |
158 | struct ttm_place *busy, |
159 | struct ttm_placement *placement) |
160 | { |
161 | unsigned int num_allowed = obj->mm.n_placements; |
162 | unsigned int flags = obj->flags; |
163 | unsigned int i; |
164 | |
165 | placement->num_placement = 1; |
166 | i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : |
167 | obj->mm.region, requested, obj->bo_offset, |
168 | obj->base.size, flags); |
169 | |
170 | /* Cache this on object? */ |
171 | placement->num_busy_placement = num_allowed; |
172 | for (i = 0; i < placement->num_busy_placement; ++i) |
173 | i915_ttm_place_from_region(obj->mm.placements[i], busy + i, |
174 | obj->bo_offset, obj->base.size, flags); |
175 | |
176 | if (num_allowed == 0) { |
177 | *busy = *requested; |
178 | placement->num_busy_placement = 1; |
179 | } |
180 | |
181 | placement->placement = requested; |
182 | placement->busy_placement = busy; |
183 | } |
184 | |
185 | static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, |
186 | struct ttm_tt *ttm, |
187 | struct ttm_operation_ctx *ctx) |
188 | { |
189 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
190 | return -ENOSYS78; |
191 | #ifdef notyet |
192 | struct drm_i915_privateinteldrm_softc *i915 = container_of(bdev, typeof(*i915), bdev)({ const __typeof( ((typeof(*i915) *)0)->bdev ) *__mptr = ( bdev); (typeof(*i915) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915), bdev) );}); |
193 | struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; |
194 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
195 | const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); |
196 | const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT12; |
197 | struct file *filp = i915_tt->filp; |
198 | struct sgt_iter sgt_iter; |
199 | struct sg_table *st; |
200 | struct vm_page *page; |
201 | unsigned long i; |
202 | int err; |
203 | |
204 | if (!filp) { |
205 | struct address_space *mapping; |
206 | gfp_t mask; |
207 | |
208 | filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); |
209 | if (IS_ERR(filp)) |
210 | return PTR_ERR(filp); |
211 | |
212 | mask = GFP_HIGHUSER0 | __GFP_RECLAIMABLE0; |
213 | |
214 | mapping = filp->f_mapping; |
215 | mapping_set_gfp_mask(mapping, mask); |
216 | GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM))((void)0); |
217 | |
218 | i915_tt->filp = filp; |
219 | } |
220 | |
221 | st = &i915_tt->cached_rsgt.table; |
222 | err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, |
223 | max_segment); |
224 | if (err) |
225 | return err; |
226 | |
227 | err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, |
228 | DMA_ATTR_SKIP_CPU_SYNC); |
229 | if (err) |
230 | goto err_free_st; |
231 | |
232 | i = 0; |
233 | for_each_sgt_page(page, sgt_iter, st)for ((sgt_iter) = __sgt_iter((st)->sgl, 0); ((page) = (sgt_iter ).pfn == 0 ? ((void *)0) : (PHYS_TO_VM_PAGE(((paddr_t)((sgt_iter ).pfn + ((sgt_iter).curr >> 12)) << 12)))); (((sgt_iter ).curr += (1 << 12)) >= (sgt_iter).max) ? (sgt_iter) = __sgt_iter(__sg_next((sgt_iter).sgp), 0), 0 : 0) |
234 | ttm->pages[i++] = page; |
235 | |
236 | if (ttm->page_flags & TTM_TT_FLAG_SWAPPED(1 << 0)) |
237 | ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED(1 << 0); |
238 | |
239 | return 0; |
240 | |
241 | err_free_st: |
242 | shmem_sg_free_table(st, filp->f_mapping, false0, false0); |
243 | |
244 | return err; |
245 | #endif |
246 | } |
247 | |
248 | static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) |
249 | { |
250 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
251 | #ifdef notyet |
252 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
253 | bool_Bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED(1 << 0); |
254 | struct sg_table *st = &i915_tt->cached_rsgt.table; |
255 | |
256 | shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, |
257 | backup, backup); |
258 | #endif |
259 | } |
260 | |
261 | static void i915_ttm_tt_release(struct kref *ref) |
262 | { |
263 | struct i915_ttm_tt *i915_tt = |
264 | container_of(ref, typeof(*i915_tt), cached_rsgt.kref)({ const __typeof( ((typeof(*i915_tt) *)0)->cached_rsgt.kref ) *__mptr = (ref); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), cached_rsgt.kref) );}); |
265 | struct sg_table *st = &i915_tt->cached_rsgt.table; |
266 | |
267 | GEM_WARN_ON(st->sgl)({ __builtin_expect(!!(!!(st->sgl)), 0); }); |
268 | |
269 | kfree(i915_tt); |
270 | } |
271 | |
272 | static const struct i915_refct_sgt_ops tt_rsgt_ops = { |
273 | .release = i915_ttm_tt_release |
274 | }; |
275 | |
276 | static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, |
277 | uint32_t page_flags) |
278 | { |
279 | struct drm_i915_privateinteldrm_softc *i915 = container_of(bo->bdev, typeof(*i915),({ const __typeof( ((typeof(*i915) *)0)->bdev ) *__mptr = ( bo->bdev); (typeof(*i915) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915), bdev) );}) |
Value stored to 'i915' during its initialization is never read | |
280 | bdev)({ const __typeof( ((typeof(*i915) *)0)->bdev ) *__mptr = ( bo->bdev); (typeof(*i915) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915), bdev) );}); |
281 | struct ttm_resource_manager *man = |
282 | ttm_manager_type(bo->bdev, bo->resource->mem_type); |
283 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
284 | unsigned long ccs_pages = 0; |
285 | enum ttm_caching caching; |
286 | struct i915_ttm_tt *i915_tt; |
287 | int ret; |
288 | |
289 | if (i915_ttm_is_ghost_object(bo)) |
290 | return NULL((void *)0); |
291 | |
292 | i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL(0x0001 | 0x0004)); |
293 | if (!i915_tt) |
294 | return NULL((void *)0); |
295 | |
296 | if (obj->flags & I915_BO_ALLOC_CPU_CLEAR(1UL << (2)) && |
297 | man->use_tt) |
298 | page_flags |= TTM_TT_FLAG_ZERO_ALLOC(1 << 1); |
299 | |
300 | caching = i915_ttm_select_tt_caching(obj); |
301 | if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { |
302 | page_flags |= TTM_TT_FLAG_EXTERNAL(1 << 2) | |
303 | TTM_TT_FLAG_EXTERNAL_MAPPABLE(1 << 3); |
304 | i915_tt->is_shmem = true1; |
305 | } |
306 | |
307 | if (i915_gem_object_needs_ccs_pages(obj)) |
308 | ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,((((((bo->base.size) + ((256) - 1)) / (256))) + (((1 << 12)) - 1)) / ((1 << 12))) |
309 | NUM_BYTES_PER_CCS_BYTE),((((((bo->base.size) + ((256) - 1)) / (256))) + (((1 << 12)) - 1)) / ((1 << 12))) |
310 | PAGE_SIZE)((((((bo->base.size) + ((256) - 1)) / (256))) + (((1 << 12)) - 1)) / ((1 << 12))); |
311 | |
312 | ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages); |
313 | if (ret) |
314 | goto err_free; |
315 | |
316 | __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, |
317 | &tt_rsgt_ops); |
318 | |
319 | i915_tt->dev = obj->base.dev->dev; |
320 | |
321 | return &i915_tt->ttm; |
322 | |
323 | err_free: |
324 | kfree(i915_tt); |
325 | return NULL((void *)0); |
326 | } |
327 | |
328 | static int i915_ttm_tt_populate(struct ttm_device *bdev, |
329 | struct ttm_tt *ttm, |
330 | struct ttm_operation_ctx *ctx) |
331 | { |
332 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
333 | |
334 | if (i915_tt->is_shmem) |
335 | return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); |
336 | |
337 | return ttm_pool_alloc(&bdev->pool, ttm, ctx); |
338 | } |
339 | |
340 | static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) |
341 | { |
342 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
343 | #ifdef notyet |
344 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
345 | struct sg_table *st = &i915_tt->cached_rsgt.table; |
346 | |
347 | if (st->sgl) |
348 | dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); |
349 | |
350 | if (i915_tt->is_shmem) { |
351 | i915_ttm_tt_shmem_unpopulate(ttm); |
352 | } else { |
353 | sg_free_table(st); |
354 | ttm_pool_free(&bdev->pool, ttm); |
355 | } |
356 | #endif |
357 | } |
358 | |
359 | static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) |
360 | { |
361 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
362 | |
363 | if (i915_tt->filp) |
364 | fput(i915_tt->filp); |
365 | |
366 | ttm_tt_fini(ttm); |
367 | i915_refct_sgt_put(&i915_tt->cached_rsgt); |
368 | } |
369 | |
370 | static bool_Bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, |
371 | const struct ttm_place *place) |
372 | { |
373 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
374 | |
375 | if (i915_ttm_is_ghost_object(bo)) |
376 | return false0; |
377 | |
378 | /* |
379 | * EXTERNAL objects should never be swapped out by TTM, instead we need |
380 | * to handle that ourselves. TTM will already skip such objects for us, |
381 | * but we would like to avoid grabbing locks for no good reason. |
382 | */ |
383 | if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL(1 << 2)) |
384 | return false0; |
385 | |
386 | /* Will do for now. Our pinned objects are still on TTM's LRU lists */ |
387 | if (!i915_gem_object_evictable(obj)) |
388 | return false0; |
389 | |
390 | return ttm_bo_eviction_valuable(bo, place); |
391 | } |
392 | |
393 | static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, |
394 | struct ttm_placement *placement) |
395 | { |
396 | *placement = i915_sys_placement; |
397 | } |
398 | |
399 | /** |
400 | * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information |
401 | * @obj: The GEM object |
402 | * This function frees any LMEM-related information that is cached on |
403 | * the object. For example the radix tree for fast page lookup and the |
404 | * cached refcounted sg-table |
405 | */ |
406 | void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) |
407 | { |
408 | struct radix_tree_iter iter; |
409 | void __rcu **slot; |
410 | |
411 | if (!obj->ttm.cached_io_rsgt) |
412 | return; |
413 | |
414 | rcu_read_lock(); |
415 | radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)for ((&iter)->index = (0); radix_tree_iter_find(&obj ->ttm.get_io_page.radix, &iter, &(slot)); (&iter )->index++) |
416 | radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); |
417 | rcu_read_unlock(); |
418 | |
419 | i915_refct_sgt_put(obj->ttm.cached_io_rsgt); |
420 | obj->ttm.cached_io_rsgt = NULL((void *)0); |
421 | } |
422 | |
423 | /** |
424 | * i915_ttm_purge - Clear an object of its memory |
425 | * @obj: The object |
426 | * |
427 | * This function is called to clear an object of it's memory when it is |
428 | * marked as not needed anymore. |
429 | * |
430 | * Return: 0 on success, negative error code on failure. |
431 | */ |
432 | int i915_ttm_purge(struct drm_i915_gem_object *obj) |
433 | { |
434 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
435 | struct i915_ttm_tt *i915_tt = |
436 | container_of(bo->ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (bo->ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
437 | struct ttm_operation_ctx ctx = { |
438 | .interruptible = true1, |
439 | .no_wait_gpu = false0, |
440 | }; |
441 | struct ttm_placement place = {}; |
442 | int ret; |
443 | |
444 | if (obj->mm.madv == __I915_MADV_PURGED2) |
445 | return 0; |
446 | |
447 | ret = ttm_bo_validate(bo, &place, &ctx); |
448 | if (ret) |
449 | return ret; |
450 | |
451 | if (bo->ttm && i915_tt->filp) { |
452 | /* |
453 | * The below fput(which eventually calls shmem_truncate) might |
454 | * be delayed by worker, so when directly called to purge the |
455 | * pages(like by the shrinker) we should try to be more |
456 | * aggressive and release the pages immediately. |
457 | */ |
458 | #ifdef __linux__ |
459 | shmem_truncate_range(file_inode(i915_tt->filp), |
460 | 0, (loff_t)-1); |
461 | #else |
462 | rw_enter(obj->base.uao->vmobjlock, RW_WRITE0x0001UL); |
463 | obj->base.uao->pgops->pgo_flush(obj->base.uao, 0, obj->base.size, |
464 | PGO_ALLPAGES0x010 | PGO_FREE0x008); |
465 | rw_exit(obj->base.uao->vmobjlock); |
466 | #endif |
467 | fput(fetch_and_zero(&i915_tt->filp)({ typeof(*&i915_tt->filp) __T = *(&i915_tt->filp ); *(&i915_tt->filp) = (typeof(*&i915_tt->filp) )0; __T; })); |
468 | } |
469 | |
470 | obj->write_domain = 0; |
471 | obj->read_domains = 0; |
472 | i915_ttm_adjust_gem_after_move(obj); |
473 | i915_ttm_free_cached_io_rsgt(obj); |
474 | obj->mm.madv = __I915_MADV_PURGED2; |
475 | |
476 | return 0; |
477 | } |
478 | |
479 | static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) |
480 | { |
481 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
482 | struct i915_ttm_tt *i915_tt = |
483 | container_of(bo->ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (bo->ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
484 | struct ttm_operation_ctx ctx = { |
485 | .interruptible = true1, |
486 | .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT(1UL << (1)), |
487 | }; |
488 | struct ttm_placement place = {}; |
489 | int ret; |
490 | |
491 | if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM0) |
492 | return 0; |
493 | |
494 | GEM_BUG_ON(!i915_tt->is_shmem)((void)0); |
495 | |
496 | if (!i915_tt->filp) |
497 | return 0; |
498 | |
499 | ret = ttm_bo_wait_ctx(bo, &ctx); |
500 | if (ret) |
501 | return ret; |
502 | |
503 | switch (obj->mm.madv) { |
504 | case I915_MADV_DONTNEED1: |
505 | return i915_ttm_purge(obj); |
506 | case __I915_MADV_PURGED2: |
507 | return 0; |
508 | } |
509 | |
510 | if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED(1 << 0)) |
511 | return 0; |
512 | |
513 | bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED(1 << 0); |
514 | ret = ttm_bo_validate(bo, &place, &ctx); |
515 | if (ret) { |
516 | bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED(1 << 0); |
517 | return ret; |
518 | } |
519 | |
520 | if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK(1UL << (0))) |
521 | #ifdef notyet |
522 | __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); |
523 | #else |
524 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
525 | #endif |
526 | |
527 | return 0; |
528 | } |
529 | |
530 | static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) |
531 | { |
532 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
533 | |
534 | if (bo->resource && !i915_ttm_is_ghost_object(bo)) { |
535 | __i915_gem_object_pages_fini(obj); |
536 | i915_ttm_free_cached_io_rsgt(obj); |
537 | } |
538 | } |
539 | |
540 | static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) |
541 | { |
542 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
543 | return ERR_PTR(-ENOSYS78); |
544 | #ifdef notyet |
545 | struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
546 | struct sg_table *st; |
547 | int ret; |
548 | |
549 | if (i915_tt->cached_rsgt.table.sgl) |
550 | return i915_refct_sgt_get(&i915_tt->cached_rsgt); |
551 | |
552 | st = &i915_tt->cached_rsgt.table; |
553 | ret = sg_alloc_table_from_pages_segment(st, |
554 | ttm->pages, ttm->num_pages, |
555 | 0, (unsigned long)ttm->num_pages << PAGE_SHIFT12, |
556 | i915_sg_segment_size(i915_tt->dev), GFP_KERNEL(0x0001 | 0x0004)); |
557 | if (ret) { |
558 | st->sgl = NULL((void *)0); |
559 | return ERR_PTR(ret); |
560 | } |
561 | |
562 | ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); |
563 | if (ret) { |
564 | sg_free_table(st); |
565 | return ERR_PTR(ret); |
566 | } |
567 | |
568 | return i915_refct_sgt_get(&i915_tt->cached_rsgt); |
569 | #endif |
570 | } |
571 | |
572 | /** |
573 | * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the |
574 | * resource memory |
575 | * @obj: The GEM object used for sg-table caching |
576 | * @res: The struct ttm_resource for which an sg-table is requested. |
577 | * |
578 | * This function returns a refcounted sg-table representing the memory |
579 | * pointed to by @res. If @res is the object's current resource it may also |
580 | * cache the sg_table on the object or attempt to access an already cached |
581 | * sg-table. The refcounted sg-table needs to be put when no-longer in use. |
582 | * |
583 | * Return: A valid pointer to a struct i915_refct_sgt or error pointer on |
584 | * failure. |
585 | */ |
586 | struct i915_refct_sgt * |
587 | i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, |
588 | struct ttm_resource *res) |
589 | { |
590 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
591 | u32 page_alignment; |
592 | |
593 | if (!i915_ttm_gtt_binds_lmem(res)) |
594 | return i915_ttm_tt_get_st(bo->ttm); |
595 | |
596 | page_alignment = bo->page_alignment << PAGE_SHIFT12; |
597 | if (!page_alignment) |
598 | page_alignment = obj->mm.region->min_page_size; |
599 | |
600 | /* |
601 | * If CPU mapping differs, we need to add the ttm_tt pages to |
602 | * the resulting st. Might make sense for GGTT. |
603 | */ |
604 | GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res))({ __builtin_expect(!!(!!(!i915_ttm_cpu_maps_iomem(res))), 0) ; }); |
605 | if (bo->resource == res) { |
606 | if (!obj->ttm.cached_io_rsgt) { |
607 | struct i915_refct_sgt *rsgt; |
608 | |
609 | rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, |
610 | res, |
611 | page_alignment); |
612 | if (IS_ERR(rsgt)) |
613 | return rsgt; |
614 | |
615 | obj->ttm.cached_io_rsgt = rsgt; |
616 | } |
617 | return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); |
618 | } |
619 | |
620 | return intel_region_ttm_resource_to_rsgt(obj->mm.region, res, |
621 | page_alignment); |
622 | } |
623 | |
624 | static int i915_ttm_truncate(struct drm_i915_gem_object *obj) |
625 | { |
626 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
627 | int err; |
628 | |
629 | WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED)({ static int __warned; int __ret = !!(obj->mm.madv == 0); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "obj->mm.madv == 0", "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_ttm.c" , 629); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
630 | |
631 | err = ttm_bo_wait(bo, true1, false0); |
632 | if (err) |
633 | return err; |
634 | |
635 | err = i915_ttm_move_notify(bo); |
636 | if (err) |
637 | return err; |
638 | |
639 | return i915_ttm_purge(obj); |
640 | } |
641 | |
642 | static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) |
643 | { |
644 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
645 | int ret; |
646 | |
647 | if (i915_ttm_is_ghost_object(bo)) |
648 | return; |
649 | |
650 | ret = i915_ttm_move_notify(bo); |
651 | GEM_WARN_ON(ret)({ __builtin_expect(!!(!!(ret)), 0); }); |
652 | GEM_WARN_ON(obj->ttm.cached_io_rsgt)({ __builtin_expect(!!(!!(obj->ttm.cached_io_rsgt)), 0); } ); |
653 | if (!ret && obj->mm.madv != I915_MADV_WILLNEED0) |
654 | i915_ttm_purge(obj); |
655 | } |
656 | |
657 | /** |
658 | * i915_ttm_resource_mappable - Return true if the ttm resource is CPU |
659 | * accessible. |
660 | * @res: The TTM resource to check. |
661 | * |
662 | * This is interesting on small-BAR systems where we may encounter lmem objects |
663 | * that can't be accessed via the CPU. |
664 | */ |
665 | bool_Bool i915_ttm_resource_mappable(struct ttm_resource *res) |
666 | { |
667 | struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); |
668 | |
669 | if (!i915_ttm_cpu_maps_iomem(res)) |
670 | return true1; |
671 | |
672 | return bman_res->used_visible_size == bman_res->base.num_pages; |
673 | } |
674 | |
675 | static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) |
676 | { |
677 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo); |
678 | bool_Bool unknown_state; |
679 | |
680 | if (i915_ttm_is_ghost_object(mem->bo)) |
681 | return -EINVAL22; |
682 | |
683 | if (!kref_get_unless_zero(&obj->base.refcount)) |
684 | return -EINVAL22; |
685 | |
686 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); |
687 | |
688 | unknown_state = i915_gem_object_has_unknown_state(obj); |
689 | i915_gem_object_put(obj); |
690 | if (unknown_state) |
691 | return -EINVAL22; |
692 | |
693 | if (!i915_ttm_cpu_maps_iomem(mem)) |
694 | return 0; |
695 | |
696 | if (!i915_ttm_resource_mappable(mem)) |
697 | return -EINVAL22; |
698 | |
699 | mem->bus.caching = ttm_write_combined; |
700 | mem->bus.is_iomem = true1; |
701 | |
702 | return 0; |
703 | } |
704 | |
705 | static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, |
706 | unsigned long page_offset) |
707 | { |
708 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
709 | return 0; |
710 | #ifdef notyet |
711 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
712 | struct scatterlist *sg; |
713 | unsigned long base; |
714 | unsigned int ofs; |
715 | |
716 | GEM_BUG_ON(i915_ttm_is_ghost_object(bo))((void)0); |
717 | GEM_WARN_ON(bo->ttm)({ __builtin_expect(!!(!!(bo->ttm)), 0); }); |
718 | |
719 | base = obj->mm.region->iomap.base - obj->mm.region->region.start; |
720 | sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true1); |
721 | |
722 | return ((base + sg_dma_address(sg)((sg)->dma_address)) >> PAGE_SHIFT12) + ofs; |
723 | #endif |
724 | } |
725 | |
726 | /* |
727 | * All callbacks need to take care not to downcast a struct ttm_buffer_object |
728 | * without checking its subclass, since it might be a TTM ghost object. |
729 | */ |
730 | static struct ttm_device_funcs i915_ttm_bo_driver = { |
731 | .ttm_tt_create = i915_ttm_tt_create, |
732 | .ttm_tt_populate = i915_ttm_tt_populate, |
733 | .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, |
734 | .ttm_tt_destroy = i915_ttm_tt_destroy, |
735 | .eviction_valuable = i915_ttm_eviction_valuable, |
736 | .evict_flags = i915_ttm_evict_flags, |
737 | .move = i915_ttm_move, |
738 | .swap_notify = i915_ttm_swap_notify, |
739 | .delete_mem_notify = i915_ttm_delete_mem_notify, |
740 | .io_mem_reserve = i915_ttm_io_mem_reserve, |
741 | .io_mem_pfn = i915_ttm_io_mem_pfn, |
742 | }; |
743 | |
744 | /** |
745 | * i915_ttm_driver - Return a pointer to the TTM device funcs |
746 | * |
747 | * Return: Pointer to statically allocated TTM device funcs. |
748 | */ |
749 | struct ttm_device_funcs *i915_ttm_driver(void) |
750 | { |
751 | return &i915_ttm_bo_driver; |
752 | } |
753 | |
754 | static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, |
755 | struct ttm_placement *placement) |
756 | { |
757 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
758 | struct ttm_operation_ctx ctx = { |
759 | .interruptible = true1, |
760 | .no_wait_gpu = false0, |
761 | }; |
762 | int real_num_busy; |
763 | int ret; |
764 | |
765 | /* First try only the requested placement. No eviction. */ |
766 | real_num_busy = fetch_and_zero(&placement->num_busy_placement)({ typeof(*&placement->num_busy_placement) __T = *(& placement->num_busy_placement); *(&placement->num_busy_placement ) = (typeof(*&placement->num_busy_placement))0; __T; } ); |
767 | ret = ttm_bo_validate(bo, placement, &ctx); |
768 | if (ret) { |
769 | ret = i915_ttm_err_to_gem(ret); |
770 | /* |
771 | * Anything that wants to restart the operation gets to |
772 | * do that. |
773 | */ |
774 | if (ret == -EDEADLK11 || ret == -EINTR4 || ret == -ERESTARTSYS4 || |
775 | ret == -EAGAIN35) |
776 | return ret; |
777 | |
778 | /* |
779 | * If the initial attempt fails, allow all accepted placements, |
780 | * evicting if necessary. |
781 | */ |
782 | placement->num_busy_placement = real_num_busy; |
783 | ret = ttm_bo_validate(bo, placement, &ctx); |
784 | if (ret) |
785 | return i915_ttm_err_to_gem(ret); |
786 | } |
787 | |
788 | if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { |
789 | ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); |
790 | if (ret) |
791 | return ret; |
792 | |
793 | i915_ttm_adjust_domains_after_move(obj); |
794 | i915_ttm_adjust_gem_after_move(obj); |
795 | } |
796 | |
797 | if (!i915_gem_object_has_pages(obj)) { |
798 | struct i915_refct_sgt *rsgt = |
799 | i915_ttm_resource_get_st(obj, bo->resource); |
800 | |
801 | if (IS_ERR(rsgt)) |
802 | return PTR_ERR(rsgt); |
803 | |
804 | GEM_BUG_ON(obj->mm.rsgt)((void)0); |
805 | obj->mm.rsgt = rsgt; |
806 | __i915_gem_object_set_pages(obj, &rsgt->table, |
807 | i915_sg_dma_sizes(rsgt->table.sgl)); |
808 | } |
809 | |
810 | GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages))((void)0); |
811 | i915_ttm_adjust_lru(obj); |
812 | return ret; |
813 | } |
814 | |
815 | static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) |
816 | { |
817 | struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTSINTEL_REGION_UNKNOWN]; |
818 | struct ttm_placement placement; |
819 | |
820 | GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS)((void)0); |
821 | |
822 | /* Move to the requested placement. */ |
823 | i915_ttm_placement_from_obj(obj, &requested, busy, &placement); |
824 | |
825 | return __i915_ttm_get_pages(obj, &placement); |
826 | } |
827 | |
828 | /** |
829 | * DOC: Migration vs eviction |
830 | * |
831 | * GEM migration may not be the same as TTM migration / eviction. If |
832 | * the TTM core decides to evict an object it may be evicted to a |
833 | * TTM memory type that is not in the object's allowable GEM regions, or |
834 | * in fact theoretically to a TTM memory type that doesn't correspond to |
835 | * a GEM memory region. In that case the object's GEM region is not |
836 | * updated, and the data is migrated back to the GEM region at |
837 | * get_pages time. TTM may however set up CPU ptes to the object even |
838 | * when it is evicted. |
839 | * Gem forced migration using the i915_ttm_migrate() op, is allowed even |
840 | * to regions that are not in the object's list of allowable placements. |
841 | */ |
842 | static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, |
843 | struct intel_memory_region *mr, |
844 | unsigned int flags) |
845 | { |
846 | struct ttm_place requested; |
847 | struct ttm_placement placement; |
848 | int ret; |
849 | |
850 | i915_ttm_place_from_region(mr, &requested, obj->bo_offset, |
851 | obj->base.size, flags); |
852 | placement.num_placement = 1; |
853 | placement.num_busy_placement = 1; |
854 | placement.placement = &requested; |
855 | placement.busy_placement = &requested; |
856 | |
857 | ret = __i915_ttm_get_pages(obj, &placement); |
858 | if (ret) |
859 | return ret; |
860 | |
861 | /* |
862 | * Reinitialize the region bindings. This is primarily |
863 | * required for objects where the new region is not in |
864 | * its allowable placements. |
865 | */ |
866 | if (obj->mm.region != mr) { |
867 | i915_gem_object_release_memory_region(obj); |
868 | i915_gem_object_init_memory_region(obj, mr); |
869 | } |
870 | |
871 | return 0; |
872 | } |
873 | |
874 | static int i915_ttm_migrate(struct drm_i915_gem_object *obj, |
875 | struct intel_memory_region *mr, |
876 | unsigned int flags) |
877 | { |
878 | return __i915_ttm_migrate(obj, mr, flags); |
879 | } |
880 | |
881 | static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, |
882 | struct sg_table *st) |
883 | { |
884 | /* |
885 | * We're currently not called from a shrinker, so put_pages() |
886 | * typically means the object is about to destroyed, or called |
887 | * from move_notify(). So just avoid doing much for now. |
888 | * If the object is not destroyed next, The TTM eviction logic |
889 | * and shrinkers will move it out if needed. |
890 | */ |
891 | |
892 | if (obj->mm.rsgt) |
893 | i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)({ typeof(*&obj->mm.rsgt) __T = *(&obj->mm.rsgt ); *(&obj->mm.rsgt) = (typeof(*&obj->mm.rsgt))0 ; __T; })); |
894 | } |
895 | |
896 | /** |
897 | * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. |
898 | * @obj: The object |
899 | */ |
900 | void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) |
901 | { |
902 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
903 | struct i915_ttm_tt *i915_tt = |
904 | container_of(bo->ttm, typeof(*i915_tt), ttm)({ const __typeof( ((typeof(*i915_tt) *)0)->ttm ) *__mptr = (bo->ttm); (typeof(*i915_tt) *)( (char *)__mptr - __builtin_offsetof (typeof(*i915_tt), ttm) );}); |
905 | bool_Bool shrinkable = |
906 | bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); |
907 | |
908 | /* |
909 | * Don't manipulate the TTM LRUs while in TTM bo destruction. |
910 | * We're called through i915_ttm_delete_mem_notify(). |
911 | */ |
912 | if (!kref_read(&bo->kref)) |
913 | return; |
914 | |
915 | /* |
916 | * We skip managing the shrinker LRU in set_pages() and just manage |
917 | * everything here. This does at least solve the issue with having |
918 | * temporary shmem mappings(like with evicted lmem) not being visible to |
919 | * the shrinker. Only our shmem objects are shrinkable, everything else |
920 | * we keep as unshrinkable. |
921 | * |
922 | * To make sure everything plays nice we keep an extra shrink pin in TTM |
923 | * if the underlying pages are not currently shrinkable. Once we release |
924 | * our pin, like when the pages are moved to shmem, the pages will then |
925 | * be added to the shrinker LRU, assuming the caller isn't also holding |
926 | * a pin. |
927 | * |
928 | * TODO: consider maybe also bumping the shrinker list here when we have |
929 | * already unpinned it, which should give us something more like an LRU. |
930 | * |
931 | * TODO: There is a small window of opportunity for this function to |
932 | * get called from eviction after we've dropped the last GEM refcount, |
933 | * but before the TTM deleted flag is set on the object. Avoid |
934 | * adjusting the shrinker list in such cases, since the object is |
935 | * not available to the shrinker anyway due to its zero refcount. |
936 | * To fix this properly we should move to a TTM shrinker LRU list for |
937 | * these objects. |
938 | */ |
939 | if (kref_get_unless_zero(&obj->base.refcount)) { |
940 | if (shrinkable != obj->mm.ttm_shrinkable) { |
941 | if (shrinkable) { |
942 | if (obj->mm.madv == I915_MADV_WILLNEED0) |
943 | __i915_gem_object_make_shrinkable(obj); |
944 | else |
945 | __i915_gem_object_make_purgeable(obj); |
946 | } else { |
947 | i915_gem_object_make_unshrinkable(obj); |
948 | } |
949 | |
950 | obj->mm.ttm_shrinkable = shrinkable; |
951 | } |
952 | i915_gem_object_put(obj); |
953 | } |
954 | |
955 | /* |
956 | * Put on the correct LRU list depending on the MADV status |
957 | */ |
958 | spin_lock(&bo->bdev->lru_lock)mtx_enter(&bo->bdev->lru_lock); |
959 | if (shrinkable) { |
960 | /* Try to keep shmem_tt from being considered for shrinking. */ |
961 | bo->priority = TTM_MAX_BO_PRIORITY4U - 1; |
962 | } else if (obj->mm.madv != I915_MADV_WILLNEED0) { |
963 | bo->priority = I915_TTM_PRIO_PURGE0; |
964 | } else if (!i915_gem_object_has_pages(obj)) { |
965 | bo->priority = I915_TTM_PRIO_NO_PAGES1; |
966 | } else { |
967 | struct ttm_resource_manager *man = |
968 | ttm_manager_type(bo->bdev, bo->resource->mem_type); |
969 | |
970 | /* |
971 | * If we need to place an LMEM resource which doesn't need CPU |
972 | * access then we should try not to victimize mappable objects |
973 | * first, since we likely end up stealing more of the mappable |
974 | * portion. And likewise when we try to find space for a mappble |
975 | * object, we know not to ever victimize objects that don't |
976 | * occupy any mappable pages. |
977 | */ |
978 | if (i915_ttm_cpu_maps_iomem(bo->resource) && |
979 | i915_ttm_buddy_man_visible_size(man) < man->size && |
980 | !(obj->flags & I915_BO_ALLOC_GPU_ONLY(1UL << (6)))) |
981 | bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS3; |
982 | else |
983 | bo->priority = I915_TTM_PRIO_HAS_PAGES2; |
984 | } |
985 | |
986 | ttm_bo_move_to_lru_tail(bo); |
987 | spin_unlock(&bo->bdev->lru_lock)mtx_leave(&bo->bdev->lru_lock); |
988 | } |
989 | |
990 | /* |
991 | * TTM-backed gem object destruction requires some clarification. |
992 | * Basically we have two possibilities here. We can either rely on the |
993 | * i915 delayed destruction and put the TTM object when the object |
994 | * is idle. This would be detected by TTM which would bypass the |
995 | * TTM delayed destroy handling. The other approach is to put the TTM |
996 | * object early and rely on the TTM destroyed handling, and then free |
997 | * the leftover parts of the GEM object once TTM's destroyed list handling is |
998 | * complete. For now, we rely on the latter for two reasons: |
999 | * a) TTM can evict an object even when it's on the delayed destroy list, |
1000 | * which in theory allows for complete eviction. |
1001 | * b) There is work going on in TTM to allow freeing an object even when |
1002 | * it's not idle, and using the TTM destroyed list handling could help us |
1003 | * benefit from that. |
1004 | */ |
1005 | static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) |
1006 | { |
1007 | GEM_BUG_ON(!obj->ttm.created)((void)0); |
1008 | |
1009 | ttm_bo_put(i915_gem_to_ttm(obj)); |
1010 | } |
1011 | |
1012 | #ifdef notyet |
1013 | static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) |
1014 | { |
1015 | struct vm_area_struct *area = vmf->vma; |
1016 | struct ttm_buffer_object *bo = area->vm_private_data; |
1017 | struct drm_device *dev = bo->base.dev; |
1018 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
1019 | intel_wakeref_t wakeref = 0; |
1020 | vm_fault_t ret; |
1021 | int idx; |
1022 | |
1023 | if (i915_ttm_is_ghost_object(bo)) |
1024 | return VM_FAULT_SIGBUS2; |
1025 | |
1026 | /* Sanity check that we allow writing into this object */ |
1027 | if (unlikely(i915_gem_object_is_readonly(obj) &&__builtin_expect(!!(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE), 0) |
1028 | area->vm_flags & VM_WRITE)__builtin_expect(!!(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE), 0)) |
1029 | return VM_FAULT_SIGBUS2; |
1030 | |
1031 | ret = ttm_bo_vm_reserve(bo, vmf); |
1032 | if (ret) |
1033 | return ret; |
1034 | |
1035 | if (obj->mm.madv != I915_MADV_WILLNEED0) { |
1036 | dma_resv_unlock(bo->base.resv); |
1037 | return VM_FAULT_SIGBUS2; |
1038 | } |
1039 | |
1040 | if (!i915_ttm_resource_mappable(bo->resource)) { |
1041 | int err = -ENODEV19; |
1042 | int i; |
1043 | |
1044 | for (i = 0; i < obj->mm.n_placements; i++) { |
1045 | struct intel_memory_region *mr = obj->mm.placements[i]; |
1046 | unsigned int flags; |
1047 | |
1048 | if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) |
1049 | continue; |
1050 | |
1051 | flags = obj->flags; |
1052 | flags &= ~I915_BO_ALLOC_GPU_ONLY(1UL << (6)); |
1053 | err = __i915_ttm_migrate(obj, mr, flags); |
1054 | if (!err) |
1055 | break; |
1056 | } |
1057 | |
1058 | if (err) { |
1059 | drm_dbg(dev, "Unable to make resource CPU accessible\n")__drm_dev_dbg(((void *)0), (dev) ? (dev)->dev : ((void *)0 ), DRM_UT_DRIVER, "Unable to make resource CPU accessible\n"); |
1060 | dma_resv_unlock(bo->base.resv); |
1061 | ret = VM_FAULT_SIGBUS2; |
1062 | goto out_rpm; |
1063 | } |
1064 | } |
1065 | |
1066 | if (i915_ttm_cpu_maps_iomem(bo->resource)) |
1067 | wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); |
1068 | |
1069 | if (drm_dev_enter(dev, &idx)) { |
1070 | ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, |
1071 | TTM_BO_VM_NUM_PREFAULT16); |
1072 | drm_dev_exit(idx); |
1073 | } else { |
1074 | ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); |
1075 | } |
1076 | |
1077 | if (ret == VM_FAULT_RETRY3 && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) |
1078 | goto out_rpm; |
1079 | |
1080 | /* |
1081 | * ttm_bo_vm_reserve() already has dma_resv_lock. |
1082 | * userfault_count is protected by dma_resv lock and rpm wakeref. |
1083 | */ |
1084 | if (ret == VM_FAULT_NOPAGE1 && wakeref && !obj->userfault_count) { |
1085 | obj->userfault_count = 1; |
1086 | spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock)mtx_enter(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock ); |
1087 | list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list); |
1088 | spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock)mtx_leave(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock ); |
1089 | } |
1090 | |
1091 | if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250) |
1092 | intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, |
1093 | msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND250)); |
1094 | |
1095 | i915_ttm_adjust_lru(obj); |
1096 | |
1097 | dma_resv_unlock(bo->base.resv); |
1098 | |
1099 | out_rpm: |
1100 | if (wakeref) |
1101 | intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); |
1102 | |
1103 | return ret; |
1104 | } |
1105 | |
1106 | static int |
1107 | vm_access_ttm(struct vm_area_struct *area, unsigned long addr, |
1108 | void *buf, int len, int write) |
1109 | { |
1110 | struct drm_i915_gem_object *obj = |
1111 | i915_ttm_to_gem(area->vm_private_data); |
1112 | |
1113 | if (i915_gem_object_is_readonly(obj) && write) |
1114 | return -EACCES13; |
1115 | |
1116 | return ttm_bo_vm_access(area, addr, buf, len, write); |
1117 | } |
1118 | |
1119 | static void ttm_vm_open(struct vm_area_struct *vma) |
1120 | { |
1121 | struct drm_i915_gem_object *obj = |
1122 | i915_ttm_to_gem(vma->vm_private_data); |
1123 | |
1124 | GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data))((void)0); |
1125 | i915_gem_object_get(obj); |
1126 | } |
1127 | |
1128 | static void ttm_vm_close(struct vm_area_struct *vma) |
1129 | { |
1130 | struct drm_i915_gem_object *obj = |
1131 | i915_ttm_to_gem(vma->vm_private_data); |
1132 | |
1133 | GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data))((void)0); |
1134 | i915_gem_object_put(obj); |
1135 | } |
1136 | |
1137 | static const struct vm_operations_struct vm_ops_ttm = { |
1138 | .fault = vm_fault_ttm, |
1139 | .access = vm_access_ttm, |
1140 | .open = ttm_vm_open, |
1141 | .close = ttm_vm_close, |
1142 | }; |
1143 | |
1144 | #endif |
1145 | |
1146 | static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) |
1147 | { |
1148 | /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ |
1149 | GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node))((void)0); |
1150 | |
1151 | return drm_vma_node_offset_addr(&obj->base.vma_node); |
1152 | } |
1153 | |
1154 | static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) |
1155 | { |
1156 | struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); |
1157 | intel_wakeref_t wakeref = 0; |
1158 | |
1159 | assert_object_held_shared(obj); |
1160 | |
1161 | if (i915_ttm_cpu_maps_iomem(bo->resource)) { |
1162 | wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); |
1163 | |
1164 | /* userfault_count is protected by obj lock and rpm wakeref. */ |
1165 | if (obj->userfault_count) { |
1166 | spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock)mtx_enter(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock ); |
1167 | list_del(&obj->userfault_link); |
1168 | spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock)mtx_leave(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock ); |
1169 | obj->userfault_count = 0; |
1170 | } |
1171 | } |
1172 | |
1173 | ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); |
1174 | |
1175 | if (wakeref) |
1176 | intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); |
1177 | } |
1178 | |
1179 | static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { |
1180 | .name = "i915_gem_object_ttm", |
1181 | .flags = I915_GEM_OBJECT_IS_SHRINKABLE(1UL << (1)) | |
1182 | I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST(1UL << (2)), |
1183 | |
1184 | .get_pages = i915_ttm_get_pages, |
1185 | .put_pages = i915_ttm_put_pages, |
1186 | .truncate = i915_ttm_truncate, |
1187 | .shrink = i915_ttm_shrink, |
1188 | |
1189 | .adjust_lru = i915_ttm_adjust_lru, |
1190 | .delayed_free = i915_ttm_delayed_free, |
1191 | .migrate = i915_ttm_migrate, |
1192 | |
1193 | .mmap_offset = i915_ttm_mmap_offset, |
1194 | .unmap_virtual = i915_ttm_unmap_virtual, |
1195 | #ifdef notyet |
1196 | .mmap_ops = &vm_ops_ttm, |
1197 | #endif |
1198 | }; |
1199 | |
1200 | void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) |
1201 | { |
1202 | struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); |
1203 | |
1204 | i915_gem_object_release_memory_region(obj); |
1205 | mutex_destroy(&obj->ttm.get_io_page.lock); |
1206 | |
1207 | if (obj->ttm.created) { |
1208 | /* |
1209 | * We freely manage the shrinker LRU outide of the mm.pages life |
1210 | * cycle. As a result when destroying the object we should be |
1211 | * extra paranoid and ensure we remove it from the LRU, before |
1212 | * we free the object. |
1213 | * |
1214 | * Touching the ttm_shrinkable outside of the object lock here |
1215 | * should be safe now that the last GEM object ref was dropped. |
1216 | */ |
1217 | if (obj->mm.ttm_shrinkable) |
1218 | i915_gem_object_make_unshrinkable(obj); |
1219 | |
1220 | i915_ttm_backup_free(obj); |
1221 | |
1222 | /* This releases all gem object bindings to the backend. */ |
1223 | __i915_gem_free_object(obj); |
1224 | |
1225 | call_rcu(&obj->rcu, __i915_gem_free_object_rcu); |
1226 | } else { |
1227 | __i915_gem_object_fini(obj); |
1228 | } |
1229 | } |
1230 | |
1231 | /** |
1232 | * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object |
1233 | * @mem: The initial memory region for the object. |
1234 | * @obj: The gem object. |
1235 | * @size: Object size in bytes. |
1236 | * @flags: gem object flags. |
1237 | * |
1238 | * Return: 0 on success, negative error code on failure. |
1239 | */ |
1240 | int __i915_gem_ttm_object_init(struct intel_memory_region *mem, |
1241 | struct drm_i915_gem_object *obj, |
1242 | resource_size_t offset, |
1243 | resource_size_t size, |
1244 | resource_size_t page_size, |
1245 | unsigned int flags) |
1246 | { |
1247 | static struct lock_class_key lock_class; |
1248 | struct drm_i915_privateinteldrm_softc *i915 = mem->i915; |
1249 | struct ttm_operation_ctx ctx = { |
1250 | .interruptible = true1, |
1251 | .no_wait_gpu = false0, |
1252 | }; |
1253 | enum ttm_bo_type bo_type; |
1254 | int ret; |
1255 | |
1256 | drm_gem_private_object_init(&i915->drm, &obj->base, size); |
1257 | i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); |
1258 | |
1259 | obj->bo_offset = offset; |
1260 | |
1261 | /* Don't put on a region list until we're either locked or fully initialized. */ |
1262 | obj->mm.region = mem; |
1263 | INIT_LIST_HEAD(&obj->mm.region_link); |
1264 | |
1265 | INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN){ (&obj->ttm.get_io_page.radix)->rnode = ((void *)0 ); (&obj->ttm.get_io_page.radix)->gfp_mask = (0x0001 | 0x0004) | 0; (&obj->ttm.get_io_page.radix)->height = 0; }; |
1266 | rw_init(&obj->ttm.get_io_page.lock, "i915ttm")_rw_init_flags(&obj->ttm.get_io_page.lock, "i915ttm", 0 , ((void *)0)); |
1267 | bo_type = (obj->flags & I915_BO_ALLOC_USER(1UL << (3))) ? ttm_bo_type_device : |
1268 | ttm_bo_type_kernel; |
1269 | |
1270 | obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); |
1271 | |
1272 | /* Forcing the page size is kernel internal only */ |
1273 | GEM_BUG_ON(page_size && obj->mm.n_placements)((void)0); |
1274 | |
1275 | /* |
1276 | * Keep an extra shrink pin to prevent the object from being made |
1277 | * shrinkable too early. If the ttm_tt is ever allocated in shmem, we |
1278 | * drop the pin. The TTM backend manages the shrinker LRU itself, |
1279 | * outside of the normal mm.pages life cycle. |
1280 | */ |
1281 | i915_gem_object_make_unshrinkable(obj); |
1282 | |
1283 | /* |
1284 | * If this function fails, it will call the destructor, but |
1285 | * our caller still owns the object. So no freeing in the |
1286 | * destructor until obj->ttm.created is true. |
1287 | * Similarly, in delayed_destroy, we can't call ttm_bo_put() |
1288 | * until successful initialization. |
1289 | */ |
1290 | ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type, |
1291 | &i915_sys_placement, page_size >> PAGE_SHIFT12, |
1292 | &ctx, NULL((void *)0), NULL((void *)0), i915_ttm_bo_destroy); |
1293 | if (ret) |
1294 | return i915_ttm_err_to_gem(ret); |
1295 | |
1296 | obj->ttm.created = true1; |
1297 | i915_gem_object_release_memory_region(obj); |
1298 | i915_gem_object_init_memory_region(obj, mem); |
1299 | i915_ttm_adjust_domains_after_move(obj); |
1300 | i915_ttm_adjust_gem_after_move(obj); |
1301 | i915_gem_object_unlock(obj); |
1302 | |
1303 | return 0; |
1304 | } |
1305 | |
1306 | static const struct intel_memory_region_ops ttm_system_region_ops = { |
1307 | .init_object = __i915_gem_ttm_object_init, |
1308 | .release = intel_region_ttm_fini, |
1309 | }; |
1310 | |
1311 | struct intel_memory_region * |
1312 | i915_gem_ttm_system_setup(struct drm_i915_privateinteldrm_softc *i915, |
1313 | u16 type, u16 instance) |
1314 | { |
1315 | struct intel_memory_region *mr; |
1316 | |
1317 | mr = intel_memory_region_create(i915, 0, |
1318 | totalram_pages() << PAGE_SHIFT12, |
1319 | PAGE_SIZE(1 << 12), 0, 0, |
1320 | type, instance, |
1321 | &ttm_system_region_ops); |
1322 | if (IS_ERR(mr)) |
1323 | return mr; |
1324 | |
1325 | intel_memory_region_set_name(mr, "system-ttm"); |
1326 | return mr; |
1327 | } |