File: | dev/pci/drm/i915/gt/intel_timeline.c |
Warning: | line 620, column 29 Value stored to 'timelines' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2016-2018 Intel Corporation |
5 | */ |
6 | |
7 | #include "i915_drv.h" |
8 | |
9 | #include "i915_active.h" |
10 | #include "i915_syncmap.h" |
11 | #include "intel_gt.h" |
12 | #include "intel_ring.h" |
13 | #include "intel_timeline.h" |
14 | |
15 | #define ptr_set_bit(ptr, bit)((typeof(ptr))((unsigned long)(ptr) | (1UL << (bit)))) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)(1UL << (bit)))) |
16 | #define ptr_test_bit(ptr, bit)((unsigned long)(ptr) & (1UL << (bit))) ((unsigned long)(ptr) & BIT(bit)(1UL << (bit))) |
17 | |
18 | #define CACHELINE_BITS6 6 |
19 | #define CACHELINE_FREE6 CACHELINE_BITS6 |
20 | |
21 | struct intel_timeline_hwsp { |
22 | struct intel_gt *gt; |
23 | struct intel_gt_timelines *gt_timelines; |
24 | struct list_head free_link; |
25 | struct i915_vma *vma; |
26 | u64 free_bitmap; |
27 | }; |
28 | |
29 | static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) |
30 | { |
31 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; |
32 | struct drm_i915_gem_object *obj; |
33 | struct i915_vma *vma; |
34 | |
35 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE(1 << 12)); |
36 | if (IS_ERR(obj)) |
37 | return ERR_CAST(obj); |
38 | |
39 | i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); |
40 | |
41 | vma = i915_vma_instance(obj, >->ggtt->vm, NULL((void *)0)); |
42 | if (IS_ERR(vma)) |
43 | i915_gem_object_put(obj); |
44 | |
45 | return vma; |
46 | } |
47 | |
48 | static struct i915_vma * |
49 | hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) |
50 | { |
51 | struct intel_gt_timelines *gt = &timeline->gt->timelines; |
52 | struct intel_timeline_hwsp *hwsp; |
53 | |
54 | BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE)extern char _ctassert[(!((8 * sizeof(u64)) * 64 > (1 << 12))) ? 1 : -1 ] __attribute__((__unused__)); |
55 | |
56 | spin_lock_irq(>->hwsp_lock)mtx_enter(>->hwsp_lock); |
57 | |
58 | /* hwsp_free_list only contains HWSP that have available cachelines */ |
59 | hwsp = list_first_entry_or_null(>->hwsp_free_list,(list_empty(>->hwsp_free_list) ? ((void *)0) : ({ const __typeof( ((typeof(*hwsp) *)0)->free_link ) *__mptr = ((& gt->hwsp_free_list)->next); (typeof(*hwsp) *)( (char *) __mptr - __builtin_offsetof(typeof(*hwsp), free_link) );})) |
60 | typeof(*hwsp), free_link)(list_empty(>->hwsp_free_list) ? ((void *)0) : ({ const __typeof( ((typeof(*hwsp) *)0)->free_link ) *__mptr = ((& gt->hwsp_free_list)->next); (typeof(*hwsp) *)( (char *) __mptr - __builtin_offsetof(typeof(*hwsp), free_link) );})); |
61 | if (!hwsp) { |
62 | struct i915_vma *vma; |
63 | |
64 | spin_unlock_irq(>->hwsp_lock)mtx_leave(>->hwsp_lock); |
65 | |
66 | hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL(0x0001 | 0x0004)); |
67 | if (!hwsp) |
68 | return ERR_PTR(-ENOMEM12); |
69 | |
70 | vma = __hwsp_alloc(timeline->gt); |
71 | if (IS_ERR(vma)) { |
72 | kfree(hwsp); |
73 | return vma; |
74 | } |
75 | |
76 | GT_TRACE(timeline->gt, "new HWSP allocated\n")do { const struct intel_gt *gt__ __attribute__((__unused__)) = (timeline->gt); do { } while (0); } while (0); |
77 | |
78 | vma->private = hwsp; |
79 | hwsp->gt = timeline->gt; |
80 | hwsp->vma = vma; |
81 | hwsp->free_bitmap = ~0ull; |
82 | hwsp->gt_timelines = gt; |
83 | |
84 | spin_lock_irq(>->hwsp_lock)mtx_enter(>->hwsp_lock); |
85 | list_add(&hwsp->free_link, >->hwsp_free_list); |
86 | } |
87 | |
88 | GEM_BUG_ON(!hwsp->free_bitmap)((void)0); |
89 | *cacheline = __ffs64(hwsp->free_bitmap)__builtin_ctzll(hwsp->free_bitmap); |
90 | hwsp->free_bitmap &= ~BIT_ULL(*cacheline)(1ULL << (*cacheline)); |
91 | if (!hwsp->free_bitmap) |
92 | list_del(&hwsp->free_link); |
93 | |
94 | spin_unlock_irq(>->hwsp_lock)mtx_leave(>->hwsp_lock); |
95 | |
96 | GEM_BUG_ON(hwsp->vma->private != hwsp)((void)0); |
97 | return hwsp->vma; |
98 | } |
99 | |
100 | static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) |
101 | { |
102 | struct intel_gt_timelines *gt = hwsp->gt_timelines; |
103 | unsigned long flags; |
104 | |
105 | spin_lock_irqsave(>->hwsp_lock, flags)do { flags = 0; mtx_enter(>->hwsp_lock); } while (0); |
106 | |
107 | /* As a cacheline becomes available, publish the HWSP on the freelist */ |
108 | if (!hwsp->free_bitmap) |
109 | list_add_tail(&hwsp->free_link, >->hwsp_free_list); |
110 | |
111 | GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap))((void)0); |
112 | hwsp->free_bitmap |= BIT_ULL(cacheline)(1ULL << (cacheline)); |
113 | |
114 | /* And if no one is left using it, give the page back to the system */ |
115 | if (hwsp->free_bitmap == ~0ull) { |
116 | i915_vma_put(hwsp->vma); |
117 | list_del(&hwsp->free_link); |
118 | kfree(hwsp); |
119 | } |
120 | |
121 | spin_unlock_irqrestore(>->hwsp_lock, flags)do { (void)(flags); mtx_leave(>->hwsp_lock); } while (0); |
122 | } |
123 | |
124 | static void __rcu_cacheline_free(struct rcu_head *rcu) |
125 | { |
126 | struct intel_timeline_cacheline *cl = |
127 | container_of(rcu, typeof(*cl), rcu)({ const __typeof( ((typeof(*cl) *)0)->rcu ) *__mptr = (rcu ); (typeof(*cl) *)( (char *)__mptr - __builtin_offsetof(typeof (*cl), rcu) );}); |
128 | |
129 | /* Must wait until after all *rq->hwsp are complete before removing */ |
130 | i915_gem_object_unpin_map(cl->hwsp->vma->obj); |
131 | __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)((unsigned long)(cl->vaddr) & ((1UL << (6)) - 1) )); |
132 | |
133 | i915_active_fini(&cl->active); |
134 | kfree(cl); |
135 | } |
136 | |
137 | static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) |
138 | { |
139 | GEM_BUG_ON(!i915_active_is_idle(&cl->active))((void)0); |
140 | call_rcu(&cl->rcu, __rcu_cacheline_free); |
141 | } |
142 | |
143 | __i915_active_call__attribute__((__aligned__(4))) |
144 | static void __cacheline_retire(struct i915_active *active) |
145 | { |
146 | struct intel_timeline_cacheline *cl = |
147 | container_of(active, typeof(*cl), active)({ const __typeof( ((typeof(*cl) *)0)->active ) *__mptr = ( active); (typeof(*cl) *)( (char *)__mptr - __builtin_offsetof (typeof(*cl), active) );}); |
148 | |
149 | i915_vma_unpin(cl->hwsp->vma); |
150 | if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)((unsigned long)(cl->vaddr) & (1UL << (6)))) |
151 | __idle_cacheline_free(cl); |
152 | } |
153 | |
154 | static int __cacheline_active(struct i915_active *active) |
155 | { |
156 | struct intel_timeline_cacheline *cl = |
157 | container_of(active, typeof(*cl), active)({ const __typeof( ((typeof(*cl) *)0)->active ) *__mptr = ( active); (typeof(*cl) *)( (char *)__mptr - __builtin_offsetof (typeof(*cl), active) );}); |
158 | |
159 | __i915_vma_pin(cl->hwsp->vma); |
160 | return 0; |
161 | } |
162 | |
163 | static struct intel_timeline_cacheline * |
164 | cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) |
165 | { |
166 | struct intel_timeline_cacheline *cl; |
167 | void *vaddr; |
168 | |
169 | GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS))((void)0); |
170 | |
171 | cl = kmalloc(sizeof(*cl), GFP_KERNEL(0x0001 | 0x0004)); |
172 | if (!cl) |
173 | return ERR_PTR(-ENOMEM12); |
174 | |
175 | vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); |
176 | if (IS_ERR(vaddr)) { |
177 | kfree(cl); |
178 | return ERR_CAST(vaddr); |
179 | } |
180 | |
181 | cl->hwsp = hwsp; |
182 | cl->vaddr = page_pack_bits(vaddr, cacheline)({ unsigned long __bits = (cacheline); ((void)0); ((typeof(vaddr ))((unsigned long)(vaddr) | __bits)); }); |
183 | |
184 | i915_active_init(&cl->active, __cacheline_active, __cacheline_retire)do { static struct lock_class_key __mkey; static struct lock_class_key __wkey; __i915_active_init(&cl->active, __cacheline_active , __cacheline_retire, &__mkey, &__wkey); } while (0); |
185 | |
186 | return cl; |
187 | } |
188 | |
189 | static void cacheline_acquire(struct intel_timeline_cacheline *cl, |
190 | u32 ggtt_offset) |
191 | { |
192 | if (!cl) |
193 | return; |
194 | |
195 | cl->ggtt_offset = ggtt_offset; |
196 | i915_active_acquire(&cl->active); |
197 | } |
198 | |
199 | static void cacheline_release(struct intel_timeline_cacheline *cl) |
200 | { |
201 | if (cl) |
202 | i915_active_release(&cl->active); |
203 | } |
204 | |
205 | static void cacheline_free(struct intel_timeline_cacheline *cl) |
206 | { |
207 | if (!i915_active_acquire_if_busy(&cl->active)) { |
208 | __idle_cacheline_free(cl); |
209 | return; |
210 | } |
211 | |
212 | GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE))((void)0); |
213 | cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE)((typeof(cl->vaddr))((unsigned long)(cl->vaddr) | (1UL << (6)))); |
214 | |
215 | i915_active_release(&cl->active); |
216 | } |
217 | |
218 | static int intel_timeline_init(struct intel_timeline *timeline, |
219 | struct intel_gt *gt, |
220 | struct i915_vma *hwsp, |
221 | unsigned int offset) |
222 | { |
223 | void *vaddr; |
224 | |
225 | kref_init(&timeline->kref); |
226 | atomic_set(&timeline->pin_count, 0)({ typeof(*(&timeline->pin_count)) __tmp = ((0)); *(volatile typeof(*(&timeline->pin_count)) *)&(*(&timeline ->pin_count)) = __tmp; __tmp; }); |
227 | |
228 | timeline->gt = gt; |
229 | |
230 | timeline->has_initial_breadcrumb = !hwsp; |
231 | timeline->hwsp_cacheline = NULL((void *)0); |
232 | |
233 | if (!hwsp) { |
234 | struct intel_timeline_cacheline *cl; |
235 | unsigned int cacheline; |
236 | |
237 | hwsp = hwsp_alloc(timeline, &cacheline); |
238 | if (IS_ERR(hwsp)) |
239 | return PTR_ERR(hwsp); |
240 | |
241 | cl = cacheline_alloc(hwsp->private, cacheline); |
242 | if (IS_ERR(cl)) { |
243 | __idle_hwsp_free(hwsp->private, cacheline); |
244 | return PTR_ERR(cl); |
245 | } |
246 | |
247 | timeline->hwsp_cacheline = cl; |
248 | timeline->hwsp_offset = cacheline * CACHELINE_BYTES64; |
249 | |
250 | vaddr = page_mask_bits(cl->vaddr)({ unsigned long __v = (unsigned long)(cl->vaddr); (typeof (cl->vaddr))(__v & -(1UL << (12))); }); |
251 | } else { |
252 | timeline->hwsp_offset = offset; |
253 | vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); |
254 | if (IS_ERR(vaddr)) |
255 | return PTR_ERR(vaddr); |
256 | } |
257 | |
258 | timeline->hwsp_seqno = |
259 | memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES)__builtin_memset((vaddr + timeline->hwsp_offset), (0), (64 )); |
260 | |
261 | timeline->hwsp_ggtt = i915_vma_get(hwsp); |
262 | GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size)((void)0); |
263 | |
264 | timeline->fence_context = dma_fence_context_alloc(1); |
265 | |
266 | rw_init(&timeline->mutex, "itmln")_rw_init_flags(&timeline->mutex, "itmln", 0, ((void *) 0)); |
267 | |
268 | INIT_ACTIVE_FENCE(&timeline->last_request)__i915_active_fence_init((&timeline->last_request), (( void *)0), ((void *)0)); |
269 | INIT_LIST_HEAD(&timeline->requests); |
270 | |
271 | i915_syncmap_init(&timeline->sync); |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | void intel_gt_init_timelines(struct intel_gt *gt) |
277 | { |
278 | struct intel_gt_timelines *timelines = >->timelines; |
279 | |
280 | mtx_init(&timelines->lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&timelines ->lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); |
281 | INIT_LIST_HEAD(&timelines->active_list); |
282 | |
283 | mtx_init(&timelines->hwsp_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&timelines ->hwsp_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9 ) ? 0x9 : ((0x9)))); } while (0); |
284 | INIT_LIST_HEAD(&timelines->hwsp_free_list); |
285 | } |
286 | |
287 | static void intel_timeline_fini(struct intel_timeline *timeline) |
288 | { |
289 | GEM_BUG_ON(atomic_read(&timeline->pin_count))((void)0); |
290 | GEM_BUG_ON(!list_empty(&timeline->requests))((void)0); |
291 | GEM_BUG_ON(timeline->retire)((void)0); |
292 | |
293 | if (timeline->hwsp_cacheline) |
294 | cacheline_free(timeline->hwsp_cacheline); |
295 | else |
296 | i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); |
297 | |
298 | i915_vma_put(timeline->hwsp_ggtt); |
299 | |
300 | /* |
301 | * A small race exists between intel_gt_retire_requests_timeout and |
302 | * intel_timeline_exit which could result in the syncmap not getting |
303 | * free'd. Rather than work to hard to seal this race, simply cleanup |
304 | * the syncmap on fini. |
305 | */ |
306 | i915_syncmap_free(&timeline->sync); |
307 | } |
308 | |
309 | struct intel_timeline * |
310 | __intel_timeline_create(struct intel_gt *gt, |
311 | struct i915_vma *global_hwsp, |
312 | unsigned int offset) |
313 | { |
314 | struct intel_timeline *timeline; |
315 | int err; |
316 | |
317 | timeline = kzalloc(sizeof(*timeline), GFP_KERNEL(0x0001 | 0x0004)); |
318 | if (!timeline) |
319 | return ERR_PTR(-ENOMEM12); |
320 | |
321 | err = intel_timeline_init(timeline, gt, global_hwsp, offset); |
322 | if (err) { |
323 | kfree(timeline); |
324 | return ERR_PTR(err); |
325 | } |
326 | |
327 | return timeline; |
328 | } |
329 | |
330 | void __intel_timeline_pin(struct intel_timeline *tl) |
331 | { |
332 | GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0); |
333 | atomic_inc(&tl->pin_count)__sync_fetch_and_add(&tl->pin_count, 1); |
334 | } |
335 | |
336 | int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww) |
337 | { |
338 | int err; |
339 | |
340 | if (atomic_add_unless(&tl->pin_count, 1, 0)) |
341 | return 0; |
342 | |
343 | err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH(1ULL << (5))); |
344 | if (err) |
345 | return err; |
346 | |
347 | tl->hwsp_offset = |
348 | i915_ggtt_offset(tl->hwsp_ggtt) + |
349 | offset_in_page(tl->hwsp_offset)((vaddr_t)(tl->hwsp_offset) & ((1 << 12) - 1)); |
350 | GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",do { const struct intel_gt *gt__ __attribute__((__unused__)) = (tl->gt); do { } while (0); } while (0) |
351 | tl->fence_context, tl->hwsp_offset)do { const struct intel_gt *gt__ __attribute__((__unused__)) = (tl->gt); do { } while (0); } while (0); |
352 | |
353 | cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset); |
354 | if (atomic_fetch_inc(&tl->pin_count)__sync_fetch_and_add(&tl->pin_count, 1)) { |
355 | cacheline_release(tl->hwsp_cacheline); |
356 | __i915_vma_unpin(tl->hwsp_ggtt); |
357 | } |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | void intel_timeline_reset_seqno(const struct intel_timeline *tl) |
363 | { |
364 | /* Must be pinned to be writable, and no requests in flight. */ |
365 | GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0); |
366 | WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno)({ typeof(*(u32 *)tl->hwsp_seqno) __tmp = (tl->seqno); * (volatile typeof(*(u32 *)tl->hwsp_seqno) *)&(*(u32 *)tl ->hwsp_seqno) = __tmp; __tmp; }); |
367 | } |
368 | |
369 | void intel_timeline_enter(struct intel_timeline *tl) |
370 | { |
371 | struct intel_gt_timelines *timelines = &tl->gt->timelines; |
372 | |
373 | /* |
374 | * Pretend we are serialised by the timeline->mutex. |
375 | * |
376 | * While generally true, there are a few exceptions to the rule |
377 | * for the engine->kernel_context being used to manage power |
378 | * transitions. As the engine_park may be called from under any |
379 | * timeline, it uses the power mutex as a global serialisation |
380 | * lock to prevent any other request entering its timeline. |
381 | * |
382 | * The rule is generally tl->mutex, otherwise engine->wakeref.mutex. |
383 | * |
384 | * However, intel_gt_retire_request() does not know which engine |
385 | * it is retiring along and so cannot partake in the engine-pm |
386 | * barrier, and there we use the tl->active_count as a means to |
387 | * pin the timeline in the active_list while the locks are dropped. |
388 | * Ergo, as that is outside of the engine-pm barrier, we need to |
389 | * use atomic to manipulate tl->active_count. |
390 | */ |
391 | lockdep_assert_held(&tl->mutex)do { (void)(&tl->mutex); } while(0); |
392 | |
393 | if (atomic_add_unless(&tl->active_count, 1, 0)) |
394 | return; |
395 | |
396 | spin_lock(&timelines->lock)mtx_enter(&timelines->lock); |
397 | if (!atomic_fetch_inc(&tl->active_count)__sync_fetch_and_add(&tl->active_count, 1)) { |
398 | /* |
399 | * The HWSP is volatile, and may have been lost while inactive, |
400 | * e.g. across suspend/resume. Be paranoid, and ensure that |
401 | * the HWSP value matches our seqno so we don't proclaim |
402 | * the next request as already complete. |
403 | */ |
404 | intel_timeline_reset_seqno(tl); |
405 | list_add_tail(&tl->link, &timelines->active_list); |
406 | } |
407 | spin_unlock(&timelines->lock)mtx_leave(&timelines->lock); |
408 | } |
409 | |
410 | void intel_timeline_exit(struct intel_timeline *tl) |
411 | { |
412 | struct intel_gt_timelines *timelines = &tl->gt->timelines; |
413 | |
414 | /* See intel_timeline_enter() */ |
415 | lockdep_assert_held(&tl->mutex)do { (void)(&tl->mutex); } while(0); |
416 | |
417 | GEM_BUG_ON(!atomic_read(&tl->active_count))((void)0); |
418 | if (atomic_add_unless(&tl->active_count, -1, 1)) |
419 | return; |
420 | |
421 | spin_lock(&timelines->lock)mtx_enter(&timelines->lock); |
422 | if (atomic_dec_and_test(&tl->active_count)(__sync_sub_and_fetch((&tl->active_count), 1) == 0)) |
423 | list_del(&tl->link); |
424 | spin_unlock(&timelines->lock)mtx_leave(&timelines->lock); |
425 | |
426 | /* |
427 | * Since this timeline is idle, all bariers upon which we were waiting |
428 | * must also be complete and so we can discard the last used barriers |
429 | * without loss of information. |
430 | */ |
431 | i915_syncmap_free(&tl->sync); |
432 | } |
433 | |
434 | static u32 timeline_advance(struct intel_timeline *tl) |
435 | { |
436 | GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0); |
437 | GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb)((void)0); |
438 | |
439 | return tl->seqno += 1 + tl->has_initial_breadcrumb; |
440 | } |
441 | |
442 | static void timeline_rollback(struct intel_timeline *tl) |
443 | { |
444 | tl->seqno -= 1 + tl->has_initial_breadcrumb; |
445 | } |
446 | |
447 | static noinline__attribute__((__noinline__)) int |
448 | __intel_timeline_get_seqno(struct intel_timeline *tl, |
449 | struct i915_request *rq, |
450 | u32 *seqno) |
451 | { |
452 | struct intel_timeline_cacheline *cl; |
453 | unsigned int cacheline; |
454 | struct i915_vma *vma; |
455 | void *vaddr; |
456 | int err; |
457 | |
458 | might_lock(&tl->gt->ggtt->vm.mutex); |
459 | GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context)do { const struct intel_gt *gt__ __attribute__((__unused__)) = (tl->gt); do { } while (0); } while (0); |
460 | |
461 | /* |
462 | * If there is an outstanding GPU reference to this cacheline, |
463 | * such as it being sampled by a HW semaphore on another timeline, |
464 | * we cannot wraparound our seqno value (the HW semaphore does |
465 | * a strict greater-than-or-equals compare, not i915_seqno_passed). |
466 | * So if the cacheline is still busy, we must detach ourselves |
467 | * from it and leave it inflight alongside its users. |
468 | * |
469 | * However, if nobody is watching and we can guarantee that nobody |
470 | * will, we could simply reuse the same cacheline. |
471 | * |
472 | * if (i915_active_request_is_signaled(&tl->last_request) && |
473 | * i915_active_is_signaled(&tl->hwsp_cacheline->active)) |
474 | * return 0; |
475 | * |
476 | * That seems unlikely for a busy timeline that needed to wrap in |
477 | * the first place, so just replace the cacheline. |
478 | */ |
479 | |
480 | vma = hwsp_alloc(tl, &cacheline); |
481 | if (IS_ERR(vma)) { |
482 | err = PTR_ERR(vma); |
483 | goto err_rollback; |
484 | } |
485 | |
486 | err = i915_ggtt_pin(vma, NULL((void *)0), 0, PIN_HIGH(1ULL << (5))); |
487 | if (err) { |
488 | __idle_hwsp_free(vma->private, cacheline); |
489 | goto err_rollback; |
490 | } |
491 | |
492 | cl = cacheline_alloc(vma->private, cacheline); |
493 | if (IS_ERR(cl)) { |
494 | err = PTR_ERR(cl); |
495 | __idle_hwsp_free(vma->private, cacheline); |
496 | goto err_unpin; |
497 | } |
498 | GEM_BUG_ON(cl->hwsp->vma != vma)((void)0); |
499 | |
500 | /* |
501 | * Attach the old cacheline to the current request, so that we only |
502 | * free it after the current request is retired, which ensures that |
503 | * all writes into the cacheline from previous requests are complete. |
504 | */ |
505 | err = i915_active_ref(&tl->hwsp_cacheline->active, |
506 | tl->fence_context, |
507 | &rq->fence); |
508 | if (err) |
509 | goto err_cacheline; |
510 | |
511 | cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ |
512 | cacheline_free(tl->hwsp_cacheline); |
513 | |
514 | i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ |
515 | i915_vma_put(tl->hwsp_ggtt); |
516 | |
517 | tl->hwsp_ggtt = i915_vma_get(vma); |
518 | |
519 | vaddr = page_mask_bits(cl->vaddr)({ unsigned long __v = (unsigned long)(cl->vaddr); (typeof (cl->vaddr))(__v & -(1UL << (12))); }); |
520 | tl->hwsp_offset = cacheline * CACHELINE_BYTES64; |
521 | tl->hwsp_seqno = |
522 | memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES)__builtin_memset((vaddr + tl->hwsp_offset), (0), (64)); |
523 | |
524 | tl->hwsp_offset += i915_ggtt_offset(vma); |
525 | GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",do { const struct intel_gt *gt__ __attribute__((__unused__)) = (tl->gt); do { } while (0); } while (0) |
526 | tl->fence_context, tl->hwsp_offset)do { const struct intel_gt *gt__ __attribute__((__unused__)) = (tl->gt); do { } while (0); } while (0); |
527 | |
528 | cacheline_acquire(cl, tl->hwsp_offset); |
529 | tl->hwsp_cacheline = cl; |
530 | |
531 | *seqno = timeline_advance(tl); |
532 | GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno))((void)0); |
533 | return 0; |
534 | |
535 | err_cacheline: |
536 | cacheline_free(cl); |
537 | err_unpin: |
538 | i915_vma_unpin(vma); |
539 | err_rollback: |
540 | timeline_rollback(tl); |
541 | return err; |
542 | } |
543 | |
544 | int intel_timeline_get_seqno(struct intel_timeline *tl, |
545 | struct i915_request *rq, |
546 | u32 *seqno) |
547 | { |
548 | *seqno = timeline_advance(tl); |
549 | |
550 | /* Replace the HWSP on wraparound for HW semaphores */ |
551 | if (unlikely(!*seqno && tl->hwsp_cacheline)__builtin_expect(!!(!*seqno && tl->hwsp_cacheline) , 0)) |
552 | return __intel_timeline_get_seqno(tl, rq, seqno); |
553 | |
554 | return 0; |
555 | } |
556 | |
557 | static int cacheline_ref(struct intel_timeline_cacheline *cl, |
558 | struct i915_request *rq) |
559 | { |
560 | return i915_active_add_request(&cl->active, rq); |
561 | } |
562 | |
563 | int intel_timeline_read_hwsp(struct i915_request *from, |
564 | struct i915_request *to, |
565 | u32 *hwsp) |
566 | { |
567 | struct intel_timeline_cacheline *cl; |
568 | int err; |
569 | |
570 | GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline))((void)0); |
571 | |
572 | rcu_read_lock(); |
573 | cl = rcu_dereference(from->hwsp_cacheline)(from->hwsp_cacheline); |
574 | if (i915_request_completed(from)) /* confirm cacheline is valid */ |
575 | goto unlock; |
576 | if (unlikely(!i915_active_acquire_if_busy(&cl->active))__builtin_expect(!!(!i915_active_acquire_if_busy(&cl-> active)), 0)) |
577 | goto unlock; /* seqno wrapped and completed! */ |
578 | if (unlikely(i915_request_completed(from))__builtin_expect(!!(i915_request_completed(from)), 0)) |
579 | goto release; |
580 | rcu_read_unlock(); |
581 | |
582 | err = cacheline_ref(cl, to); |
583 | if (err) |
584 | goto out; |
585 | |
586 | *hwsp = cl->ggtt_offset; |
587 | out: |
588 | i915_active_release(&cl->active); |
589 | return err; |
590 | |
591 | release: |
592 | i915_active_release(&cl->active); |
593 | unlock: |
594 | rcu_read_unlock(); |
595 | return 1; |
596 | } |
597 | |
598 | void intel_timeline_unpin(struct intel_timeline *tl) |
599 | { |
600 | GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0); |
601 | if (!atomic_dec_and_test(&tl->pin_count)(__sync_sub_and_fetch((&tl->pin_count), 1) == 0)) |
602 | return; |
603 | |
604 | cacheline_release(tl->hwsp_cacheline); |
605 | |
606 | __i915_vma_unpin(tl->hwsp_ggtt); |
607 | } |
608 | |
609 | void __intel_timeline_free(struct kref *kref) |
610 | { |
611 | struct intel_timeline *timeline = |
612 | container_of(kref, typeof(*timeline), kref)({ const __typeof( ((typeof(*timeline) *)0)->kref ) *__mptr = (kref); (typeof(*timeline) *)( (char *)__mptr - __builtin_offsetof (typeof(*timeline), kref) );}); |
613 | |
614 | intel_timeline_fini(timeline); |
615 | kfree_rcu(timeline, rcu)do { free((void *)timeline, 145, 0); } while(0); |
616 | } |
617 | |
618 | void intel_gt_fini_timelines(struct intel_gt *gt) |
619 | { |
620 | struct intel_gt_timelines *timelines = >->timelines; |
Value stored to 'timelines' during its initialization is never read | |
621 | |
622 | GEM_BUG_ON(!list_empty(&timelines->active_list))((void)0); |
623 | GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list))((void)0); |
624 | } |
625 | |
626 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
627 | #include "gt/selftests/mock_timeline.c" |
628 | #include "gt/selftest_timeline.c" |
629 | #endif |