Bug Summary

File:dev/pci/drm/i915/gt/intel_timeline.c
Warning:line 405, column 29
Value stored to 'timelines' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name intel_timeline.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gt/intel_timeline.c
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2016-2018 Intel Corporation
4 */
5
6#include <drm/drm_cache.h>
7
8#include "gem/i915_gem_internal.h"
9
10#include "i915_active.h"
11#include "i915_drv.h"
12#include "i915_syncmap.h"
13#include "intel_gt.h"
14#include "intel_ring.h"
15#include "intel_timeline.h"
16
17#define TIMELINE_SEQNO_BYTES8 8
18
19static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
20{
21 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
22 struct drm_i915_gem_object *obj;
23 struct i915_vma *vma;
24
25 obj = i915_gem_object_create_internal(i915, PAGE_SIZE(1 << 12));
26 if (IS_ERR(obj))
27 return ERR_CAST(obj);
28
29 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
30
31 vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL((void *)0));
32 if (IS_ERR(vma))
33 i915_gem_object_put(obj);
34
35 return vma;
36}
37
38static void __timeline_retire(struct i915_active *active)
39{
40 struct intel_timeline *tl =
41 container_of(active, typeof(*tl), active)({ const __typeof( ((typeof(*tl) *)0)->active ) *__mptr = (
active); (typeof(*tl) *)( (char *)__mptr - __builtin_offsetof
(typeof(*tl), active) );})
;
42
43 i915_vma_unpin(tl->hwsp_ggtt);
44 intel_timeline_put(tl);
45}
46
47static int __timeline_active(struct i915_active *active)
48{
49 struct intel_timeline *tl =
50 container_of(active, typeof(*tl), active)({ const __typeof( ((typeof(*tl) *)0)->active ) *__mptr = (
active); (typeof(*tl) *)( (char *)__mptr - __builtin_offsetof
(typeof(*tl), active) );})
;
51
52 __i915_vma_pin(tl->hwsp_ggtt);
53 intel_timeline_get(tl);
54 return 0;
55}
56
57I915_SELFTEST_EXPORTstatic int
58intel_timeline_pin_map(struct intel_timeline *timeline)
59{
60 struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
61 u32 ofs = offset_in_page(timeline->hwsp_offset)((vaddr_t)(timeline->hwsp_offset) & ((1 << 12) -
1))
;
62 void *vaddr;
63
64 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
65 if (IS_ERR(vaddr))
66 return PTR_ERR(vaddr);
67
68 timeline->hwsp_map = vaddr;
69 timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES)__builtin_memset((vaddr + ofs), (0), (8));
70 drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES8);
71
72 return 0;
73}
74
75static int intel_timeline_init(struct intel_timeline *timeline,
76 struct intel_gt *gt,
77 struct i915_vma *hwsp,
78 unsigned int offset)
79{
80 kref_init(&timeline->kref);
81 atomic_set(&timeline->pin_count, 0)({ typeof(*(&timeline->pin_count)) __tmp = ((0)); *(volatile
typeof(*(&timeline->pin_count)) *)&(*(&timeline
->pin_count)) = __tmp; __tmp; })
;
82
83 timeline->gt = gt;
84
85 if (hwsp) {
86 timeline->hwsp_offset = offset;
87 timeline->hwsp_ggtt = i915_vma_get(hwsp);
88 } else {
89 timeline->has_initial_breadcrumb = true1;
90 hwsp = hwsp_alloc(gt);
91 if (IS_ERR(hwsp))
92 return PTR_ERR(hwsp);
93 timeline->hwsp_ggtt = hwsp;
94 }
95
96 timeline->hwsp_map = NULL((void *)0);
97 timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
98
99 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size)((void)0);
100
101 timeline->fence_context = dma_fence_context_alloc(1);
102
103 rw_init(&timeline->mutex, "itmln")_rw_init_flags(&timeline->mutex, "itmln", 0, ((void *)
0))
;
104
105 INIT_ACTIVE_FENCE(&timeline->last_request)__i915_active_fence_init((&timeline->last_request), ((
void *)0), ((void *)0))
;
106 INIT_LIST_HEAD(&timeline->requests);
107
108 i915_syncmap_init(&timeline->sync);
109 i915_active_init(&timeline->active, __timeline_active,do { static struct lock_class_key __mkey; static struct lock_class_key
__wkey; __i915_active_init(&timeline->active, __timeline_active
, __timeline_retire, 0, &__mkey, &__wkey); } while (0
)
110 __timeline_retire, 0)do { static struct lock_class_key __mkey; static struct lock_class_key
__wkey; __i915_active_init(&timeline->active, __timeline_active
, __timeline_retire, 0, &__mkey, &__wkey); } while (0
)
;
111
112 return 0;
113}
114
115void intel_gt_init_timelines(struct intel_gt *gt)
116{
117 struct intel_gt_timelines *timelines = &gt->timelines;
118
119 mtx_init(&timelines->lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&timelines
->lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ?
0x9 : ((0x0)))); } while (0)
;
120 INIT_LIST_HEAD(&timelines->active_list);
121}
122
123static void intel_timeline_fini(struct rcu_head *rcu)
124{
125 struct intel_timeline *timeline =
126 container_of(rcu, struct intel_timeline, rcu)({ const __typeof( ((struct intel_timeline *)0)->rcu ) *__mptr
= (rcu); (struct intel_timeline *)( (char *)__mptr - __builtin_offsetof
(struct intel_timeline, rcu) );})
;
127
128 if (timeline->hwsp_map)
129 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
130
131 i915_vma_put(timeline->hwsp_ggtt);
132 i915_active_fini(&timeline->active);
133
134 /*
135 * A small race exists between intel_gt_retire_requests_timeout and
136 * intel_timeline_exit which could result in the syncmap not getting
137 * free'd. Rather than work to hard to seal this race, simply cleanup
138 * the syncmap on fini.
139 */
140 i915_syncmap_free(&timeline->sync);
141
142 kfree(timeline);
143}
144
145struct intel_timeline *
146__intel_timeline_create(struct intel_gt *gt,
147 struct i915_vma *global_hwsp,
148 unsigned int offset)
149{
150 struct intel_timeline *timeline;
151 int err;
152
153 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL(0x0001 | 0x0004));
154 if (!timeline)
155 return ERR_PTR(-ENOMEM12);
156
157 err = intel_timeline_init(timeline, gt, global_hwsp, offset);
158 if (err) {
159 kfree(timeline);
160 return ERR_PTR(err);
161 }
162
163 return timeline;
164}
165
166struct intel_timeline *
167intel_timeline_create_from_engine(struct intel_engine_cs *engine,
168 unsigned int offset)
169{
170 struct i915_vma *hwsp = engine->status_page.vma;
171 struct intel_timeline *tl;
172
173 tl = __intel_timeline_create(engine->gt, hwsp, offset);
174 if (IS_ERR(tl))
175 return tl;
176
177 /* Borrow a nearby lock; we only create these timelines during init */
178 mutex_lock(&hwsp->vm->mutex)rw_enter_write(&hwsp->vm->mutex);
179 list_add_tail(&tl->engine_link, &engine->status_page.timelines);
180 mutex_unlock(&hwsp->vm->mutex)rw_exit_write(&hwsp->vm->mutex);
181
182 return tl;
183}
184
185void __intel_timeline_pin(struct intel_timeline *tl)
186{
187 GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0);
188 atomic_inc(&tl->pin_count)__sync_fetch_and_add(&tl->pin_count, 1);
189}
190
191int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
192{
193 int err;
194
195 if (atomic_add_unless(&tl->pin_count, 1, 0))
196 return 0;
197
198 if (!tl->hwsp_map) {
199 err = intel_timeline_pin_map(tl);
200 if (err)
201 return err;
202 }
203
204 err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH(1ULL << (5)));
205 if (err)
206 return err;
207
208 tl->hwsp_offset =
209 i915_ggtt_offset(tl->hwsp_ggtt) +
210 offset_in_page(tl->hwsp_offset)((vaddr_t)(tl->hwsp_offset) & ((1 << 12) - 1));
211 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(tl->gt); do { } while (0); } while (0)
212 tl->fence_context, tl->hwsp_offset)do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(tl->gt); do { } while (0); } while (0)
;
213
214 i915_active_acquire(&tl->active);
215 if (atomic_fetch_inc(&tl->pin_count)__sync_fetch_and_add(&tl->pin_count, 1)) {
216 i915_active_release(&tl->active);
217 __i915_vma_unpin(tl->hwsp_ggtt);
218 }
219
220 return 0;
221}
222
223void intel_timeline_reset_seqno(const struct intel_timeline *tl)
224{
225 u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
226 /* Must be pinned to be writable, and no requests in flight. */
227 GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0);
228
229 memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno))__builtin_memset((hwsp_seqno + 1), (0), (8 - sizeof(*hwsp_seqno
)))
;
230 WRITE_ONCE(*hwsp_seqno, tl->seqno)({ typeof(*hwsp_seqno) __tmp = (tl->seqno); *(volatile typeof
(*hwsp_seqno) *)&(*hwsp_seqno) = __tmp; __tmp; })
;
231 drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES8);
232}
233
234void intel_timeline_enter(struct intel_timeline *tl)
235{
236 struct intel_gt_timelines *timelines = &tl->gt->timelines;
237
238 /*
239 * Pretend we are serialised by the timeline->mutex.
240 *
241 * While generally true, there are a few exceptions to the rule
242 * for the engine->kernel_context being used to manage power
243 * transitions. As the engine_park may be called from under any
244 * timeline, it uses the power mutex as a global serialisation
245 * lock to prevent any other request entering its timeline.
246 *
247 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
248 *
249 * However, intel_gt_retire_request() does not know which engine
250 * it is retiring along and so cannot partake in the engine-pm
251 * barrier, and there we use the tl->active_count as a means to
252 * pin the timeline in the active_list while the locks are dropped.
253 * Ergo, as that is outside of the engine-pm barrier, we need to
254 * use atomic to manipulate tl->active_count.
255 */
256 lockdep_assert_held(&tl->mutex)do { (void)(&tl->mutex); } while(0);
257
258 if (atomic_add_unless(&tl->active_count, 1, 0))
259 return;
260
261 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
262 if (!atomic_fetch_inc(&tl->active_count)__sync_fetch_and_add(&tl->active_count, 1)) {
263 /*
264 * The HWSP is volatile, and may have been lost while inactive,
265 * e.g. across suspend/resume. Be paranoid, and ensure that
266 * the HWSP value matches our seqno so we don't proclaim
267 * the next request as already complete.
268 */
269 intel_timeline_reset_seqno(tl);
270 list_add_tail(&tl->link, &timelines->active_list);
271 }
272 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
273}
274
275void intel_timeline_exit(struct intel_timeline *tl)
276{
277 struct intel_gt_timelines *timelines = &tl->gt->timelines;
278
279 /* See intel_timeline_enter() */
280 lockdep_assert_held(&tl->mutex)do { (void)(&tl->mutex); } while(0);
281
282 GEM_BUG_ON(!atomic_read(&tl->active_count))((void)0);
283 if (atomic_add_unless(&tl->active_count, -1, 1))
284 return;
285
286 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
287 if (atomic_dec_and_test(&tl->active_count)(__sync_sub_and_fetch((&tl->active_count), 1) == 0))
288 list_del(&tl->link);
289 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
290
291 /*
292 * Since this timeline is idle, all bariers upon which we were waiting
293 * must also be complete and so we can discard the last used barriers
294 * without loss of information.
295 */
296 i915_syncmap_free(&tl->sync);
297}
298
299static u32 timeline_advance(struct intel_timeline *tl)
300{
301 GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0);
302 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb)((void)0);
303
304 return tl->seqno += 1 + tl->has_initial_breadcrumb;
305}
306
307static noinline__attribute__((__noinline__)) int
308__intel_timeline_get_seqno(struct intel_timeline *tl,
309 u32 *seqno)
310{
311 u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES)((vaddr_t)(tl->hwsp_offset + 8) & ((1 << 12) - 1
))
;
312
313 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
314 if (TIMELINE_SEQNO_BYTES8 <= BIT(5)(1UL << (5)) && (next_ofs & BIT(5)(1UL << (5))))
315 next_ofs = offset_in_page(next_ofs + BIT(5))((vaddr_t)(next_ofs + (1UL << (5))) & ((1 << 12
) - 1))
;
316
317 tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
318 tl->hwsp_seqno = tl->hwsp_map + next_ofs;
319 intel_timeline_reset_seqno(tl);
320
321 *seqno = timeline_advance(tl);
322 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno))((void)0);
323 return 0;
324}
325
326int intel_timeline_get_seqno(struct intel_timeline *tl,
327 struct i915_request *rq,
328 u32 *seqno)
329{
330 *seqno = timeline_advance(tl);
331
332 /* Replace the HWSP on wraparound for HW semaphores */
333 if (unlikely(!*seqno && tl->has_initial_breadcrumb)__builtin_expect(!!(!*seqno && tl->has_initial_breadcrumb
), 0)
)
334 return __intel_timeline_get_seqno(tl, seqno);
335
336 return 0;
337}
338
339int intel_timeline_read_hwsp(struct i915_request *from,
340 struct i915_request *to,
341 u32 *hwsp)
342{
343 struct intel_timeline *tl;
344 int err;
345
346 rcu_read_lock();
347 tl = rcu_dereference(from->timeline)(from->timeline);
348 if (i915_request_signaled(from) ||
349 !i915_active_acquire_if_busy(&tl->active))
350 tl = NULL((void *)0);
351
352 if (tl) {
353 /* hwsp_offset may wraparound, so use from->hwsp_seqno */
354 *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
355 offset_in_page(from->hwsp_seqno)((vaddr_t)(from->hwsp_seqno) & ((1 << 12) - 1));
356 }
357
358 /* ensure we wait on the right request, if not, we completed */
359 if (tl && __i915_request_is_complete(from)) {
360 i915_active_release(&tl->active);
361 tl = NULL((void *)0);
362 }
363 rcu_read_unlock();
364
365 if (!tl)
366 return 1;
367
368 /* Can't do semaphore waits on kernel context */
369 if (!tl->has_initial_breadcrumb) {
370 err = -EINVAL22;
371 goto out;
372 }
373
374 err = i915_active_add_request(&tl->active, to);
375
376out:
377 i915_active_release(&tl->active);
378 return err;
379}
380
381void intel_timeline_unpin(struct intel_timeline *tl)
382{
383 GEM_BUG_ON(!atomic_read(&tl->pin_count))((void)0);
384 if (!atomic_dec_and_test(&tl->pin_count)(__sync_sub_and_fetch((&tl->pin_count), 1) == 0))
385 return;
386
387 i915_active_release(&tl->active);
388 __i915_vma_unpin(tl->hwsp_ggtt);
389}
390
391void __intel_timeline_free(struct kref *kref)
392{
393 struct intel_timeline *timeline =
394 container_of(kref, typeof(*timeline), kref)({ const __typeof( ((typeof(*timeline) *)0)->kref ) *__mptr
= (kref); (typeof(*timeline) *)( (char *)__mptr - __builtin_offsetof
(typeof(*timeline), kref) );})
;
395
396 GEM_BUG_ON(atomic_read(&timeline->pin_count))((void)0);
397 GEM_BUG_ON(!list_empty(&timeline->requests))((void)0);
398 GEM_BUG_ON(timeline->retire)((void)0);
399
400 call_rcu(&timeline->rcu, intel_timeline_fini);
401}
402
403void intel_gt_fini_timelines(struct intel_gt *gt)
404{
405 struct intel_gt_timelines *timelines = &gt->timelines;
Value stored to 'timelines' during its initialization is never read
406
407 GEM_BUG_ON(!list_empty(&timelines->active_list))((void)0);
408}
409
410void intel_gt_show_timelines(struct intel_gt *gt,
411 struct drm_printer *m,
412 void (*show_request)(struct drm_printer *m,
413 const struct i915_request *rq,
414 const char *prefix,
415 int indent))
416{
417 struct intel_gt_timelines *timelines = &gt->timelines;
418 struct intel_timeline *tl, *tn;
419 DRM_LIST_HEAD(free)struct list_head free = { &(free), &(free) };
420
421 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
422 list_for_each_entry_safe(tl, tn, &timelines->active_list, link)for (tl = ({ const __typeof( ((__typeof(*tl) *)0)->link ) *
__mptr = ((&timelines->active_list)->next); (__typeof
(*tl) *)( (char *)__mptr - __builtin_offsetof(__typeof(*tl), link
) );}), tn = ({ const __typeof( ((__typeof(*tl) *)0)->link
) *__mptr = (tl->link.next); (__typeof(*tl) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*tl), link) );}); &tl->
link != (&timelines->active_list); tl = tn, tn = ({ const
__typeof( ((__typeof(*tn) *)0)->link ) *__mptr = (tn->
link.next); (__typeof(*tn) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*tn), link) );}))
{
423 unsigned long count, ready, inflight;
424 struct i915_request *rq, *rn;
425 struct dma_fence *fence;
426
427 if (!mutex_trylock(&tl->mutex)(rw_enter(&tl->mutex, 0x0001UL | 0x0040UL) == 0)) {
428 drm_printf(m, "Timeline %llx: busy; skipping\n",
429 tl->fence_context);
430 continue;
431 }
432
433 intel_timeline_get(tl);
434 GEM_BUG_ON(!atomic_read(&tl->active_count))((void)0);
435 atomic_inc(&tl->active_count)__sync_fetch_and_add(&tl->active_count, 1); /* pin the list element */
436 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
437
438 count = 0;
439 ready = 0;
440 inflight = 0;
441 list_for_each_entry_safe(rq, rn, &tl->requests, link)for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *
__mptr = ((&tl->requests)->next); (__typeof(*rq) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*rq), link) );
}), rn = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *
__mptr = (rq->link.next); (__typeof(*rq) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*rq), link) );}); &rq->
link != (&tl->requests); rq = rn, rn = ({ const __typeof
( ((__typeof(*rn) *)0)->link ) *__mptr = (rn->link.next
); (__typeof(*rn) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*rn), link) );}))
{
442 if (i915_request_completed(rq))
443 continue;
444
445 count++;
446 if (i915_request_is_ready(rq))
447 ready++;
448 if (i915_request_is_active(rq))
449 inflight++;
450 }
451
452 drm_printf(m, "Timeline %llx: { ", tl->fence_context);
453 drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
454 count, ready, inflight);
455 drm_printf(m, ", seqno: { current: %d, last: %d }",
456 *tl->hwsp_seqno, tl->seqno);
457 fence = i915_active_fence_get(&tl->last_request);
458 if (fence) {
459 drm_printf(m, ", engine: %s",
460 to_request(fence)->engine->name);
461 dma_fence_put(fence);
462 }
463 drm_printf(m, " }\n");
464
465 if (show_request) {
466 list_for_each_entry_safe(rq, rn, &tl->requests, link)for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *
__mptr = ((&tl->requests)->next); (__typeof(*rq) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*rq), link) );
}), rn = ({ const __typeof( ((__typeof(*rq) *)0)->link ) *
__mptr = (rq->link.next); (__typeof(*rq) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*rq), link) );}); &rq->
link != (&tl->requests); rq = rn, rn = ({ const __typeof
( ((__typeof(*rn) *)0)->link ) *__mptr = (rn->link.next
); (__typeof(*rn) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*rn), link) );}))
467 show_request(m, rq, "", 2);
468 }
469
470 mutex_unlock(&tl->mutex)rw_exit_write(&tl->mutex);
471 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
472
473 /* Resume list iteration after reacquiring spinlock */
474 list_safe_reset_next(tl, tn, link)tn = ({ const __typeof( ((typeof(*(tl)) *)0)->link ) *__mptr
= (((tl)->link.next)); (typeof(*(tl)) *)( (char *)__mptr -
__builtin_offsetof(typeof(*(tl)), link) );})
;
475 if (atomic_dec_and_test(&tl->active_count)(__sync_sub_and_fetch((&tl->active_count), 1) == 0))
476 list_del(&tl->link);
477
478 /* Defer the final release to after the spinlock */
479 if (refcount_dec_and_test(&tl->kref.refcount)) {
480 GEM_BUG_ON(atomic_read(&tl->active_count))((void)0);
481 list_add(&tl->link, &free);
482 }
483 }
484 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
485
486 list_for_each_entry_safe(tl, tn, &free, link)for (tl = ({ const __typeof( ((__typeof(*tl) *)0)->link ) *
__mptr = ((&free)->next); (__typeof(*tl) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*tl), link) );}), tn = ({ const
__typeof( ((__typeof(*tl) *)0)->link ) *__mptr = (tl->
link.next); (__typeof(*tl) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*tl), link) );}); &tl->link != (&free); tl
= tn, tn = ({ const __typeof( ((__typeof(*tn) *)0)->link )
*__mptr = (tn->link.next); (__typeof(*tn) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*tn), link) );}))
487 __intel_timeline_free(&tl->kref);
488}
489
490#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
491#include "gt/selftests/mock_timeline.c"
492#include "gt/selftest_timeline.c"
493#endif