File: | dev/pci/drm/include/linux/llist.h |
Warning: | line 58, column 18 Access to field 'next' results in a dereference of a null pointer (loaded from variable 'new_last') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * SPDX-License-Identifier: MIT | |||
3 | * | |||
4 | * Copyright © 2019 Intel Corporation | |||
5 | */ | |||
6 | ||||
7 | #include <linux/debugobjects.h> | |||
8 | ||||
9 | #include "gt/intel_context.h" | |||
10 | #include "gt/intel_engine_heartbeat.h" | |||
11 | #include "gt/intel_engine_pm.h" | |||
12 | #include "gt/intel_ring.h" | |||
13 | ||||
14 | #include "i915_drv.h" | |||
15 | #include "i915_active.h" | |||
16 | ||||
17 | /* | |||
18 | * Active refs memory management | |||
19 | * | |||
20 | * To be more economical with memory, we reap all the i915_active trees as | |||
21 | * they idle (when we know the active requests are inactive) and allocate the | |||
22 | * nodes from a local slab cache to hopefully reduce the fragmentation. | |||
23 | */ | |||
24 | static struct pool slab_cache; | |||
25 | ||||
26 | struct active_node { | |||
27 | struct rb_node node; | |||
28 | struct i915_active_fence base; | |||
29 | struct i915_active *ref; | |||
30 | u64 timeline; | |||
31 | }; | |||
32 | ||||
33 | #define fetch_node(x)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(x) __tmp = *(volatile typeof(x) *)& (x); membar_datadep_consumer(); __tmp; })); (typeof(struct active_node ) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node ), node) );}) rb_entry(READ_ONCE(x), typeof(struct active_node), node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(x) __tmp = *(volatile typeof(x) *)& (x); membar_datadep_consumer(); __tmp; })); (typeof(struct active_node ) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node ), node) );}) | |||
34 | ||||
35 | static inline struct active_node * | |||
36 | node_from_active(struct i915_active_fence *active) | |||
37 | { | |||
38 | return container_of(active, struct active_node, base)({ const __typeof( ((struct active_node *)0)->base ) *__mptr = (active); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, base) );}); | |||
39 | } | |||
40 | ||||
41 | #define take_preallocated_barriers(x)llist_del_all(&(x)->preallocated_barriers) llist_del_all(&(x)->preallocated_barriers) | |||
42 | ||||
43 | static inline bool_Bool is_barrier(const struct i915_active_fence *active) | |||
44 | { | |||
45 | return IS_ERR(rcu_access_pointer(active->fence)(active->fence)); | |||
46 | } | |||
47 | ||||
48 | static inline struct llist_node *barrier_to_ll(struct active_node *node) | |||
49 | { | |||
50 | GEM_BUG_ON(!is_barrier(&node->base))((void)0); | |||
51 | return (struct llist_node *)&node->base.cb.node; | |||
52 | } | |||
53 | ||||
54 | static inline struct intel_engine_cs * | |||
55 | __barrier_to_engine(struct active_node *node) | |||
56 | { | |||
57 | return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev)({ typeof(node->base.cb.node.prev) __tmp = *(volatile typeof (node->base.cb.node.prev) *)&(node->base.cb.node.prev ); membar_datadep_consumer(); __tmp; }); | |||
58 | } | |||
59 | ||||
60 | static inline struct intel_engine_cs * | |||
61 | barrier_to_engine(struct active_node *node) | |||
62 | { | |||
63 | GEM_BUG_ON(!is_barrier(&node->base))((void)0); | |||
64 | return __barrier_to_engine(node); | |||
65 | } | |||
66 | ||||
67 | static inline struct active_node *barrier_from_ll(struct llist_node *x) | |||
68 | { | |||
69 | return container_of((struct list_head *)x,({ const __typeof( ((struct active_node *)0)->base.cb.node ) *__mptr = ((struct list_head *)x); (struct active_node *)( (char *)__mptr - __builtin_offsetof(struct active_node, base .cb.node) );}) | |||
70 | struct active_node, base.cb.node)({ const __typeof( ((struct active_node *)0)->base.cb.node ) *__mptr = ((struct list_head *)x); (struct active_node *)( (char *)__mptr - __builtin_offsetof(struct active_node, base .cb.node) );}); | |||
71 | } | |||
72 | ||||
73 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0 && IS_ENABLED(CONFIG_DEBUG_OBJECTS)0 | |||
74 | ||||
75 | static void *active_debug_hint(void *addr) | |||
76 | { | |||
77 | struct i915_active *ref = addr; | |||
78 | ||||
79 | return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; | |||
80 | } | |||
81 | ||||
82 | static const struct debug_obj_descr active_debug_desc = { | |||
83 | .name = "i915_active", | |||
84 | .debug_hint = active_debug_hint, | |||
85 | }; | |||
86 | ||||
87 | static void debug_active_init(struct i915_active *ref) | |||
88 | { | |||
89 | debug_object_init(ref, &active_debug_desc); | |||
90 | } | |||
91 | ||||
92 | static void debug_active_activate(struct i915_active *ref) | |||
93 | { | |||
94 | lockdep_assert_held(&ref->tree_lock)do { (void)(&ref->tree_lock); } while(0); | |||
95 | debug_object_activate(ref, &active_debug_desc); | |||
96 | } | |||
97 | ||||
98 | static void debug_active_deactivate(struct i915_active *ref) | |||
99 | { | |||
100 | lockdep_assert_held(&ref->tree_lock)do { (void)(&ref->tree_lock); } while(0); | |||
101 | if (!atomic_read(&ref->count)({ typeof(*(&ref->count)) __tmp = *(volatile typeof(*( &ref->count)) *)&(*(&ref->count)); membar_datadep_consumer (); __tmp; })) /* after the last dec */ | |||
102 | debug_object_deactivate(ref, &active_debug_desc); | |||
103 | } | |||
104 | ||||
105 | static void debug_active_fini(struct i915_active *ref) | |||
106 | { | |||
107 | debug_object_free(ref, &active_debug_desc); | |||
108 | } | |||
109 | ||||
110 | static void debug_active_assert(struct i915_active *ref) | |||
111 | { | |||
112 | debug_object_assert_init(ref, &active_debug_desc); | |||
113 | } | |||
114 | ||||
115 | #else | |||
116 | ||||
117 | static inline void debug_active_init(struct i915_active *ref) { } | |||
118 | static inline void debug_active_activate(struct i915_active *ref) { } | |||
119 | static inline void debug_active_deactivate(struct i915_active *ref) { } | |||
120 | static inline void debug_active_fini(struct i915_active *ref) { } | |||
121 | static inline void debug_active_assert(struct i915_active *ref) { } | |||
122 | ||||
123 | #endif | |||
124 | ||||
125 | static void | |||
126 | __active_retire(struct i915_active *ref) | |||
127 | { | |||
128 | struct rb_root root = RB_ROOT(struct rb_root) { ((void *)0) }; | |||
129 | struct active_node *it, *n; | |||
130 | unsigned long flags; | |||
131 | ||||
132 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
133 | ||||
134 | /* return the unused nodes to our slabcache -- flushing the allocator */ | |||
135 | if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)atomic_dec_and_lock(&ref->count, &ref->tree_lock )) | |||
136 | return; | |||
137 | ||||
138 | GEM_BUG_ON(rcu_access_pointer(ref->excl.fence))((void)0); | |||
139 | debug_active_deactivate(ref); | |||
140 | ||||
141 | /* Even if we have not used the cache, we may still have a barrier */ | |||
142 | if (!ref->cache) | |||
143 | ref->cache = fetch_node(ref->tree.rb_node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(ref->tree.rb_node) __tmp = *(volatile typeof(ref->tree.rb_node) *)&(ref->tree.rb_node); membar_datadep_consumer (); __tmp; })); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node), node) );}); | |||
144 | ||||
145 | /* Keep the MRU cached node for reuse */ | |||
146 | if (ref->cache) { | |||
147 | /* Discard all other nodes in the tree */ | |||
148 | rb_erase(&ref->cache->node, &ref->tree)linux_root_RB_REMOVE((struct linux_root *)(&ref->tree) , (&ref->cache->node)); | |||
149 | root = ref->tree; | |||
150 | ||||
151 | /* Rebuild the tree with only the cached node */ | |||
152 | rb_link_node(&ref->cache->node, NULL((void *)0), &ref->tree.rb_node); | |||
153 | rb_insert_color(&ref->cache->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&ref->cache->node)); | |||
154 | GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node)((void)0); | |||
155 | ||||
156 | /* Make the cached node available for reuse with any timeline */ | |||
157 | ref->cache->timeline = 0; /* needs cmpxchg(u64) */ | |||
158 | } | |||
159 | ||||
160 | spin_unlock_irqrestore(&ref->tree_lock, flags)do { (void)(flags); mtx_leave(&ref->tree_lock); } while (0); | |||
161 | ||||
162 | /* After the final retire, the entire struct may be freed */ | |||
163 | if (ref->retire) | |||
164 | ref->retire(ref); | |||
165 | ||||
166 | /* ... except if you wait on it, you must manage your own references! */ | |||
167 | wake_up_var(ref); | |||
168 | ||||
169 | /* Finally free the discarded timeline tree */ | |||
170 | rbtree_postorder_for_each_entry_safe(it, n, &root, node)for ((it) = (__rb_deepest_left((&root)->rb_node) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left ((&root)->rb_node)); (__typeof(*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node) );}) : ((void *)0) ); ((it) != ((void *)0)) && ((n) = (rb_next_postorder (&it->node) ? ({ const __typeof( ((typeof(*it) *)0)-> node ) *__mptr = (rb_next_postorder(&it->node)); (typeof (*it) *)( (char *)__mptr - __builtin_offsetof(typeof(*it), node ) );}) : ((void *)0)), 1); (it) = (n)) { | |||
171 | GEM_BUG_ON(i915_active_fence_isset(&it->base))((void)0); | |||
172 | #ifdef __linux__ | |||
173 | kmem_cache_free(slab_cache, it); | |||
174 | #else | |||
175 | pool_put(&slab_cache, it); | |||
176 | #endif | |||
177 | } | |||
178 | } | |||
179 | ||||
180 | static void | |||
181 | active_work(struct work_struct *wrk) | |||
182 | { | |||
183 | struct i915_active *ref = container_of(wrk, typeof(*ref), work)({ const __typeof( ((typeof(*ref) *)0)->work ) *__mptr = ( wrk); (typeof(*ref) *)( (char *)__mptr - __builtin_offsetof(typeof (*ref), work) );}); | |||
184 | ||||
185 | GEM_BUG_ON(!atomic_read(&ref->count))((void)0); | |||
186 | if (atomic_add_unless(&ref->count, -1, 1)) | |||
187 | return; | |||
188 | ||||
189 | __active_retire(ref); | |||
190 | } | |||
191 | ||||
192 | static void | |||
193 | active_retire(struct i915_active *ref) | |||
194 | { | |||
195 | GEM_BUG_ON(!atomic_read(&ref->count))((void)0); | |||
196 | if (atomic_add_unless(&ref->count, -1, 1)) | |||
197 | return; | |||
198 | ||||
199 | if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS(1UL << (0))) { | |||
200 | queue_work(system_unbound_wq, &ref->work); | |||
201 | return; | |||
202 | } | |||
203 | ||||
204 | __active_retire(ref); | |||
205 | } | |||
206 | ||||
207 | static inline struct dma_fence ** | |||
208 | __active_fence_slot(struct i915_active_fence *active) | |||
209 | { | |||
210 | return (struct dma_fence ** __force)&active->fence; | |||
211 | } | |||
212 | ||||
213 | static inline bool_Bool | |||
214 | active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
215 | { | |||
216 | struct i915_active_fence *active = | |||
217 | container_of(cb, typeof(*active), cb)({ const __typeof( ((typeof(*active) *)0)->cb ) *__mptr = ( cb); (typeof(*active) *)( (char *)__mptr - __builtin_offsetof (typeof(*active), cb) );}); | |||
218 | ||||
219 | return cmpxchg(__active_fence_slot(active), fence, NULL)__sync_val_compare_and_swap(__active_fence_slot(active), fence , ((void *)0)) == fence; | |||
220 | } | |||
221 | ||||
222 | static void | |||
223 | node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
224 | { | |||
225 | if (active_fence_cb(fence, cb)) | |||
226 | active_retire(container_of(cb, struct active_node, base.cb)({ const __typeof( ((struct active_node *)0)->base.cb ) *__mptr = (cb); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, base.cb) );})->ref); | |||
227 | } | |||
228 | ||||
229 | static void | |||
230 | excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
231 | { | |||
232 | if (active_fence_cb(fence, cb)) | |||
233 | active_retire(container_of(cb, struct i915_active, excl.cb)({ const __typeof( ((struct i915_active *)0)->excl.cb ) *__mptr = (cb); (struct i915_active *)( (char *)__mptr - __builtin_offsetof (struct i915_active, excl.cb) );})); | |||
234 | } | |||
235 | ||||
236 | static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) | |||
237 | { | |||
238 | struct active_node *it; | |||
239 | ||||
240 | GEM_BUG_ON(idx == 0)((void)0); /* 0 is the unordered timeline, rsvd for cache */ | |||
241 | ||||
242 | /* | |||
243 | * We track the most recently used timeline to skip a rbtree search | |||
244 | * for the common case, under typical loads we never need the rbtree | |||
245 | * at all. We can reuse the last slot if it is empty, that is | |||
246 | * after the previous activity has been retired, or if it matches the | |||
247 | * current timeline. | |||
248 | */ | |||
249 | it = READ_ONCE(ref->cache)({ typeof(ref->cache) __tmp = *(volatile typeof(ref->cache ) *)&(ref->cache); membar_datadep_consumer(); __tmp; } ); | |||
250 | if (it) { | |||
251 | u64 cached = READ_ONCE(it->timeline)({ typeof(it->timeline) __tmp = *(volatile typeof(it->timeline ) *)&(it->timeline); membar_datadep_consumer(); __tmp; }); | |||
252 | ||||
253 | /* Once claimed, this slot will only belong to this idx */ | |||
254 | if (cached == idx) | |||
255 | return it; | |||
256 | ||||
257 | /* | |||
258 | * An unclaimed cache [.timeline=0] can only be claimed once. | |||
259 | * | |||
260 | * If the value is already non-zero, some other thread has | |||
261 | * claimed the cache and we know that is does not match our | |||
262 | * idx. If, and only if, the timeline is currently zero is it | |||
263 | * worth competing to claim it atomically for ourselves (for | |||
264 | * only the winner of that race will cmpxchg return the old | |||
265 | * value of 0). | |||
266 | */ | |||
267 | if (!cached && !cmpxchg64(&it->timeline, 0, idx)__sync_val_compare_and_swap(&it->timeline, 0, idx)) | |||
268 | return it; | |||
269 | } | |||
270 | ||||
271 | BUILD_BUG_ON(offsetof(typeof(*it), node))extern char _ctassert[(!(__builtin_offsetof(typeof(*it), node ))) ? 1 : -1 ] __attribute__((__unused__)); | |||
272 | ||||
273 | /* While active, the tree can only be built; not destroyed */ | |||
274 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
275 | ||||
276 | it = fetch_node(ref->tree.rb_node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(ref->tree.rb_node) __tmp = *(volatile typeof(ref->tree.rb_node) *)&(ref->tree.rb_node); membar_datadep_consumer (); __tmp; })); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node), node) );}); | |||
277 | while (it) { | |||
278 | if (it->timeline < idx) { | |||
279 | it = fetch_node(it->node.rb_right)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(it->node.__entry.rbe_right) __tmp = *(volatile typeof(it->node.__entry.rbe_right) *)&(it-> node.__entry.rbe_right); membar_datadep_consumer(); __tmp; }) ); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof (typeof(struct active_node), node) );}); | |||
280 | } else if (it->timeline > idx) { | |||
281 | it = fetch_node(it->node.rb_left)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(it->node.__entry.rbe_left) __tmp = *(volatile typeof(it->node.__entry.rbe_left) *)&(it-> node.__entry.rbe_left); membar_datadep_consumer(); __tmp; })) ; (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof (typeof(struct active_node), node) );}); | |||
282 | } else { | |||
283 | WRITE_ONCE(ref->cache, it)({ typeof(ref->cache) __tmp = (it); *(volatile typeof(ref-> cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
284 | break; | |||
285 | } | |||
286 | } | |||
287 | ||||
288 | /* NB: If the tree rotated beneath us, we may miss our target. */ | |||
289 | return it; | |||
290 | } | |||
291 | ||||
292 | static struct i915_active_fence * | |||
293 | active_instance(struct i915_active *ref, u64 idx) | |||
294 | { | |||
295 | struct active_node *node; | |||
296 | struct rb_node **p, *parent; | |||
297 | ||||
298 | node = __active_lookup(ref, idx); | |||
299 | if (likely(node)__builtin_expect(!!(node), 1)) | |||
300 | return &node->base; | |||
301 | ||||
302 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); | |||
303 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
304 | ||||
305 | parent = NULL((void *)0); | |||
306 | p = &ref->tree.rb_node; | |||
307 | while (*p) { | |||
308 | parent = *p; | |||
309 | ||||
310 | node = rb_entry(parent, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (parent); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
311 | if (node->timeline == idx) | |||
312 | goto out; | |||
313 | ||||
314 | if (node->timeline < idx) | |||
315 | p = &parent->rb_right__entry.rbe_right; | |||
316 | else | |||
317 | p = &parent->rb_left__entry.rbe_left; | |||
318 | } | |||
319 | ||||
320 | /* | |||
321 | * XXX: We should preallocate this before i915_active_ref() is ever | |||
322 | * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC. | |||
323 | */ | |||
324 | #ifdef __linux__ | |||
325 | node = kmem_cache_alloc(slab_cache, GFP_ATOMIC0x0002); | |||
326 | #else | |||
327 | node = pool_get(&slab_cache, PR_NOWAIT0x0002); | |||
328 | #endif | |||
329 | if (!node) | |||
330 | goto out; | |||
331 | ||||
332 | __i915_active_fence_init(&node->base, NULL((void *)0), node_retire); | |||
333 | node->ref = ref; | |||
334 | node->timeline = idx; | |||
335 | ||||
336 | rb_link_node(&node->node, parent, p); | |||
337 | rb_insert_color(&node->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&node->node)); | |||
338 | ||||
339 | out: | |||
340 | WRITE_ONCE(ref->cache, node)({ typeof(ref->cache) __tmp = (node); *(volatile typeof(ref ->cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
341 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
342 | ||||
343 | return &node->base; | |||
344 | } | |||
345 | ||||
346 | void __i915_active_init(struct i915_active *ref, | |||
347 | int (*active)(struct i915_active *ref), | |||
348 | void (*retire)(struct i915_active *ref), | |||
349 | unsigned long flags, | |||
350 | struct lock_class_key *mkey, | |||
351 | struct lock_class_key *wkey) | |||
352 | { | |||
353 | debug_active_init(ref); | |||
354 | ||||
355 | ref->flags = flags; | |||
356 | ref->active = active; | |||
357 | ref->retire = retire; | |||
358 | ||||
359 | mtx_init(&ref->tree_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&ref-> tree_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | |||
360 | ref->tree = RB_ROOT(struct rb_root) { ((void *)0) }; | |||
361 | ref->cache = NULL((void *)0); | |||
362 | ||||
363 | init_llist_head(&ref->preallocated_barriers); | |||
364 | atomic_set(&ref->count, 0)({ typeof(*(&ref->count)) __tmp = ((0)); *(volatile typeof (*(&ref->count)) *)&(*(&ref->count)) = __tmp ; __tmp; }); | |||
365 | #ifdef __linux__ | |||
366 | __mutex_init(&ref->mutex, "i915_active", mkey); | |||
367 | #else | |||
368 | rw_init(&ref->mutex, "i915_active")_rw_init_flags(&ref->mutex, "i915_active", 0, ((void * )0)); | |||
369 | #endif | |||
370 | __i915_active_fence_init(&ref->excl, NULL((void *)0), excl_retire); | |||
371 | INIT_WORK(&ref->work, active_work); | |||
372 | #if IS_ENABLED(CONFIG_LOCKDEP)0 | |||
373 | lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); | |||
374 | #endif | |||
375 | } | |||
376 | ||||
377 | static bool_Bool ____active_del_barrier(struct i915_active *ref, | |||
378 | struct active_node *node, | |||
379 | struct intel_engine_cs *engine) | |||
380 | ||||
381 | { | |||
382 | struct llist_node *head = NULL((void *)0), *tail = NULL((void *)0); | |||
383 | struct llist_node *pos, *next; | |||
384 | ||||
385 | GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context)((void)0); | |||
386 | ||||
387 | /* | |||
388 | * Rebuild the llist excluding our node. We may perform this | |||
389 | * outside of the kernel_context timeline mutex and so someone | |||
390 | * else may be manipulating the engine->barrier_tasks, in | |||
391 | * which case either we or they will be upset :) | |||
392 | * | |||
393 | * A second __active_del_barrier() will report failure to claim | |||
394 | * the active_node and the caller will just shrug and know not to | |||
395 | * claim ownership of its node. | |||
396 | * | |||
397 | * A concurrent i915_request_add_active_barriers() will miss adding | |||
398 | * any of the tasks, but we will try again on the next -- and since | |||
399 | * we are actively using the barrier, we know that there will be | |||
400 | * at least another opportunity when we idle. | |||
401 | */ | |||
402 | llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks))for ((pos) = (llist_del_all(&engine->barrier_tasks)); ( pos) != ((void *)0) && ((next) = (pos)->next, pos) ; (pos) = (next)) { | |||
403 | if (node == barrier_from_ll(pos)) { | |||
404 | node = NULL((void *)0); | |||
405 | continue; | |||
406 | } | |||
407 | ||||
408 | pos->next = head; | |||
409 | head = pos; | |||
410 | if (!tail) | |||
411 | tail = pos; | |||
412 | } | |||
413 | if (head) | |||
414 | llist_add_batch(head, tail, &engine->barrier_tasks); | |||
415 | ||||
416 | return !node; | |||
417 | } | |||
418 | ||||
419 | static bool_Bool | |||
420 | __active_del_barrier(struct i915_active *ref, struct active_node *node) | |||
421 | { | |||
422 | return ____active_del_barrier(ref, node, barrier_to_engine(node)); | |||
423 | } | |||
424 | ||||
425 | static bool_Bool | |||
426 | replace_barrier(struct i915_active *ref, struct i915_active_fence *active) | |||
427 | { | |||
428 | if (!is_barrier(active)) /* proto-node used by our idle barrier? */ | |||
429 | return false0; | |||
430 | ||||
431 | /* | |||
432 | * This request is on the kernel_context timeline, and so | |||
433 | * we can use it to substitute for the pending idle-barrer | |||
434 | * request that we want to emit on the kernel_context. | |||
435 | */ | |||
436 | return __active_del_barrier(ref, node_from_active(active)); | |||
437 | } | |||
438 | ||||
439 | int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) | |||
440 | { | |||
441 | u64 idx = i915_request_timeline(rq)->fence_context; | |||
442 | struct dma_fence *fence = &rq->fence; | |||
443 | struct i915_active_fence *active; | |||
444 | int err; | |||
445 | ||||
446 | /* Prevent reaping in case we malloc/wait while building the tree */ | |||
447 | err = i915_active_acquire(ref); | |||
448 | if (err) | |||
449 | return err; | |||
450 | ||||
451 | do { | |||
452 | active = active_instance(ref, idx); | |||
453 | if (!active) { | |||
454 | err = -ENOMEM12; | |||
455 | goto out; | |||
456 | } | |||
457 | ||||
458 | if (replace_barrier(ref, active)) { | |||
459 | RCU_INIT_POINTER(active->fence, NULL)do { (active->fence) = (((void *)0)); } while(0); | |||
460 | atomic_dec(&ref->count)__sync_fetch_and_sub(&ref->count, 1); | |||
461 | } | |||
462 | } while (unlikely(is_barrier(active))__builtin_expect(!!(is_barrier(active)), 0)); | |||
463 | ||||
464 | fence = __i915_active_fence_set(active, fence); | |||
465 | if (!fence) | |||
466 | __i915_active_acquire(ref); | |||
467 | else | |||
468 | dma_fence_put(fence); | |||
469 | ||||
470 | out: | |||
471 | i915_active_release(ref); | |||
472 | return err; | |||
473 | } | |||
474 | ||||
475 | static struct dma_fence * | |||
476 | __i915_active_set_fence(struct i915_active *ref, | |||
477 | struct i915_active_fence *active, | |||
478 | struct dma_fence *fence) | |||
479 | { | |||
480 | struct dma_fence *prev; | |||
481 | ||||
482 | if (replace_barrier(ref, active)) { | |||
483 | RCU_INIT_POINTER(active->fence, fence)do { (active->fence) = (fence); } while(0); | |||
484 | return NULL((void *)0); | |||
485 | } | |||
486 | ||||
487 | prev = __i915_active_fence_set(active, fence); | |||
488 | if (!prev) | |||
489 | __i915_active_acquire(ref); | |||
490 | ||||
491 | return prev; | |||
492 | } | |||
493 | ||||
494 | struct dma_fence * | |||
495 | i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) | |||
496 | { | |||
497 | /* We expect the caller to manage the exclusive timeline ordering */ | |||
498 | return __i915_active_set_fence(ref, &ref->excl, f); | |||
499 | } | |||
500 | ||||
501 | bool_Bool i915_active_acquire_if_busy(struct i915_active *ref) | |||
502 | { | |||
503 | debug_active_assert(ref); | |||
504 | return atomic_add_unless(&ref->count, 1, 0); | |||
505 | } | |||
506 | ||||
507 | static void __i915_active_activate(struct i915_active *ref) | |||
508 | { | |||
509 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); /* __active_retire() */ | |||
510 | if (!atomic_fetch_inc(&ref->count)__sync_fetch_and_add(&ref->count, 1)) | |||
511 | debug_active_activate(ref); | |||
512 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
513 | } | |||
514 | ||||
515 | int i915_active_acquire(struct i915_active *ref) | |||
516 | { | |||
517 | int err; | |||
518 | ||||
519 | if (i915_active_acquire_if_busy(ref)) | |||
520 | return 0; | |||
521 | ||||
522 | if (!ref->active) { | |||
523 | __i915_active_activate(ref); | |||
524 | return 0; | |||
525 | } | |||
526 | ||||
527 | err = mutex_lock_interruptible(&ref->mutex); | |||
528 | if (err) | |||
529 | return err; | |||
530 | ||||
531 | if (likely(!i915_active_acquire_if_busy(ref))__builtin_expect(!!(!i915_active_acquire_if_busy(ref)), 1)) { | |||
532 | err = ref->active(ref); | |||
533 | if (!err) | |||
534 | __i915_active_activate(ref); | |||
535 | } | |||
536 | ||||
537 | mutex_unlock(&ref->mutex)rw_exit_write(&ref->mutex); | |||
538 | ||||
539 | return err; | |||
540 | } | |||
541 | ||||
542 | int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) | |||
543 | { | |||
544 | struct i915_active_fence *active; | |||
545 | int err; | |||
546 | ||||
547 | err = i915_active_acquire(ref); | |||
548 | if (err) | |||
549 | return err; | |||
550 | ||||
551 | active = active_instance(ref, idx); | |||
552 | if (!active) { | |||
553 | i915_active_release(ref); | |||
554 | return -ENOMEM12; | |||
555 | } | |||
556 | ||||
557 | return 0; /* return with active ref */ | |||
558 | } | |||
559 | ||||
560 | void i915_active_release(struct i915_active *ref) | |||
561 | { | |||
562 | debug_active_assert(ref); | |||
563 | active_retire(ref); | |||
564 | } | |||
565 | ||||
566 | static void enable_signaling(struct i915_active_fence *active) | |||
567 | { | |||
568 | struct dma_fence *fence; | |||
569 | ||||
570 | if (unlikely(is_barrier(active))__builtin_expect(!!(is_barrier(active)), 0)) | |||
571 | return; | |||
572 | ||||
573 | fence = i915_active_fence_get(active); | |||
574 | if (!fence) | |||
575 | return; | |||
576 | ||||
577 | dma_fence_enable_sw_signaling(fence); | |||
578 | dma_fence_put(fence); | |||
579 | } | |||
580 | ||||
581 | static int flush_barrier(struct active_node *it) | |||
582 | { | |||
583 | struct intel_engine_cs *engine; | |||
584 | ||||
585 | if (likely(!is_barrier(&it->base))__builtin_expect(!!(!is_barrier(&it->base)), 1)) | |||
586 | return 0; | |||
587 | ||||
588 | engine = __barrier_to_engine(it); | |||
589 | smp_rmb()do { __asm volatile("" ::: "memory"); } while (0); /* serialise with add_active_barriers */ | |||
590 | if (!is_barrier(&it->base)) | |||
591 | return 0; | |||
592 | ||||
593 | return intel_engine_flush_barriers(engine); | |||
594 | } | |||
595 | ||||
596 | static int flush_lazy_signals(struct i915_active *ref) | |||
597 | { | |||
598 | struct active_node *it, *n; | |||
599 | int err = 0; | |||
600 | ||||
601 | enable_signaling(&ref->excl); | |||
602 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)for ((it) = (__rb_deepest_left((&ref->tree)->rb_node ) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left((&ref->tree)->rb_node)); (__typeof (*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node ) );}) : ((void *)0)); ((it) != ((void *)0)) && ((n) = (rb_next_postorder(&it->node) ? ({ const __typeof( (( typeof(*it) *)0)->node ) *__mptr = (rb_next_postorder(& it->node)); (typeof(*it) *)( (char *)__mptr - __builtin_offsetof (typeof(*it), node) );}) : ((void *)0)), 1); (it) = (n)) { | |||
603 | err = flush_barrier(it); /* unconnected idle barrier? */ | |||
604 | if (err) | |||
605 | break; | |||
606 | ||||
607 | enable_signaling(&it->base); | |||
608 | } | |||
609 | ||||
610 | return err; | |||
611 | } | |||
612 | ||||
613 | int __i915_active_wait(struct i915_active *ref, int state) | |||
614 | { | |||
615 | might_sleep()assertwaitok(); | |||
616 | ||||
617 | /* Any fence added after the wait begins will not be auto-signaled */ | |||
618 | if (i915_active_acquire_if_busy(ref)) { | |||
619 | int err; | |||
620 | ||||
621 | err = flush_lazy_signals(ref); | |||
622 | i915_active_release(ref); | |||
623 | if (err) | |||
624 | return err; | |||
625 | ||||
626 | if (___wait_var_event(ref, i915_active_is_idle(ref),({ long __ret = 0 ; if (state & 0x100) __ret = ({ int __ret = 0; if (!((i915_active_is_idle(ref)))) __ret = ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry(& __wq_entry, 0); do { int __error, __wait; unsigned long deadline ; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 627, "!cold")); prepare_to_wait(&var_waitq, &__wq_entry , 0x100); deadline = jiffies + __ret; __wait = !((i915_active_is_idle (ref))); __error = sleep_finish(__ret, __wait); if ((0) > 0 ) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = (((i915_active_is_idle(ref))) ) ? 1 : 0; break; } } while (__ret > 0 && !((i915_active_is_idle (ref)))); finish_wait(&var_waitq, &__wq_entry); __ret ; }); __ret; }); else do { if (!(((i915_active_is_idle(ref))) )) ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry (&__wq_entry, 0); do { int __error, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 627, "!cold")); prepare_to_wait(&var_waitq, &__wq_entry , 0); deadline = jiffies + __ret; __wait = !(((i915_active_is_idle (ref)))); __error = sleep_finish(__ret, __wait); if ((0) > 0) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = ((((i915_active_is_idle(ref)) ))) ? 1 : 0; break; } } while (__ret > 0 && !(((i915_active_is_idle (ref))))); finish_wait(&var_waitq, &__wq_entry); __ret ; }); } while (0); __ret; }) | |||
627 | state, 0, 0, schedule())({ long __ret = 0 ; if (state & 0x100) __ret = ({ int __ret = 0; if (!((i915_active_is_idle(ref)))) __ret = ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry(& __wq_entry, 0); do { int __error, __wait; unsigned long deadline ; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 627, "!cold")); prepare_to_wait(&var_waitq, &__wq_entry , 0x100); deadline = jiffies + __ret; __wait = !((i915_active_is_idle (ref))); __error = sleep_finish(__ret, __wait); if ((0) > 0 ) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = (((i915_active_is_idle(ref))) ) ? 1 : 0; break; } } while (__ret > 0 && !((i915_active_is_idle (ref)))); finish_wait(&var_waitq, &__wq_entry); __ret ; }); __ret; }); else do { if (!(((i915_active_is_idle(ref))) )) ({ long __ret = 0; struct wait_queue_entry __wq_entry; init_wait_entry (&__wq_entry, 0); do { int __error, __wait; unsigned long deadline; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 627, "!cold")); prepare_to_wait(&var_waitq, &__wq_entry , 0); deadline = jiffies + __ret; __wait = !(((i915_active_is_idle (ref)))); __error = sleep_finish(__ret, __wait); if ((0) > 0) __ret = deadline - jiffies; if (__error == -1 || __error == 4) { __ret = -4; break; } if ((0) > 0 && (__ret <= 0 || __error == 35)) { __ret = ((((i915_active_is_idle(ref)) ))) ? 1 : 0; break; } } while (__ret > 0 && !(((i915_active_is_idle (ref))))); finish_wait(&var_waitq, &__wq_entry); __ret ; }); } while (0); __ret; })) | |||
628 | return -EINTR4; | |||
629 | } | |||
630 | ||||
631 | /* | |||
632 | * After the wait is complete, the caller may free the active. | |||
633 | * We have to flush any concurrent retirement before returning. | |||
634 | */ | |||
635 | flush_work(&ref->work); | |||
636 | return 0; | |||
637 | } | |||
638 | ||||
639 | static int __await_active(struct i915_active_fence *active, | |||
640 | int (*fn)(void *arg, struct dma_fence *fence), | |||
641 | void *arg) | |||
642 | { | |||
643 | struct dma_fence *fence; | |||
644 | ||||
645 | if (is_barrier(active)) /* XXX flush the barrier? */ | |||
646 | return 0; | |||
647 | ||||
648 | fence = i915_active_fence_get(active); | |||
649 | if (fence) { | |||
650 | int err; | |||
651 | ||||
652 | err = fn(arg, fence); | |||
653 | dma_fence_put(fence); | |||
654 | if (err < 0) | |||
655 | return err; | |||
656 | } | |||
657 | ||||
658 | return 0; | |||
659 | } | |||
660 | ||||
661 | struct wait_barrier { | |||
662 | struct wait_queue_entry base; | |||
663 | struct i915_active *ref; | |||
664 | }; | |||
665 | ||||
666 | static int | |||
667 | barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key) | |||
668 | { | |||
669 | struct wait_barrier *wb = container_of(wq, typeof(*wb), base)({ const __typeof( ((typeof(*wb) *)0)->base ) *__mptr = (wq ); (typeof(*wb) *)( (char *)__mptr - __builtin_offsetof(typeof (*wb), base) );}); | |||
670 | ||||
671 | if (i915_active_is_idle(wb->ref)) { | |||
672 | list_del(&wq->entry); | |||
673 | i915_sw_fence_complete(wq->private); | |||
674 | kfree(wq); | |||
675 | } | |||
676 | ||||
677 | return 0; | |||
678 | } | |||
679 | ||||
680 | static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) | |||
681 | { | |||
682 | struct wait_barrier *wb; | |||
683 | ||||
684 | wb = kmalloc(sizeof(*wb), GFP_KERNEL(0x0001 | 0x0004)); | |||
685 | if (unlikely(!wb)__builtin_expect(!!(!wb), 0)) | |||
686 | return -ENOMEM12; | |||
687 | ||||
688 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
689 | if (!i915_sw_fence_await(fence)) { | |||
690 | kfree(wb); | |||
691 | return -EINVAL22; | |||
692 | } | |||
693 | ||||
694 | wb->base.flags = 0; | |||
695 | wb->base.func = barrier_wake; | |||
696 | wb->base.private = fence; | |||
697 | wb->ref = ref; | |||
698 | ||||
699 | add_wait_queue(__var_waitqueue(ref), &wb->base); | |||
700 | return 0; | |||
701 | } | |||
702 | ||||
703 | static int await_active(struct i915_active *ref, | |||
704 | unsigned int flags, | |||
705 | int (*fn)(void *arg, struct dma_fence *fence), | |||
706 | void *arg, struct i915_sw_fence *barrier) | |||
707 | { | |||
708 | int err = 0; | |||
709 | ||||
710 | if (!i915_active_acquire_if_busy(ref)) | |||
711 | return 0; | |||
712 | ||||
713 | if (flags & I915_ACTIVE_AWAIT_EXCL(1UL << (0)) && | |||
714 | rcu_access_pointer(ref->excl.fence)(ref->excl.fence)) { | |||
715 | err = __await_active(&ref->excl, fn, arg); | |||
716 | if (err) | |||
717 | goto out; | |||
718 | } | |||
719 | ||||
720 | if (flags & I915_ACTIVE_AWAIT_ACTIVE(1UL << (1))) { | |||
721 | struct active_node *it, *n; | |||
722 | ||||
723 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)for ((it) = (__rb_deepest_left((&ref->tree)->rb_node ) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left((&ref->tree)->rb_node)); (__typeof (*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node ) );}) : ((void *)0)); ((it) != ((void *)0)) && ((n) = (rb_next_postorder(&it->node) ? ({ const __typeof( (( typeof(*it) *)0)->node ) *__mptr = (rb_next_postorder(& it->node)); (typeof(*it) *)( (char *)__mptr - __builtin_offsetof (typeof(*it), node) );}) : ((void *)0)), 1); (it) = (n)) { | |||
724 | err = __await_active(&it->base, fn, arg); | |||
725 | if (err) | |||
726 | goto out; | |||
727 | } | |||
728 | } | |||
729 | ||||
730 | if (flags & I915_ACTIVE_AWAIT_BARRIER(1UL << (2))) { | |||
731 | err = flush_lazy_signals(ref); | |||
732 | if (err) | |||
733 | goto out; | |||
734 | ||||
735 | err = __await_barrier(ref, barrier); | |||
736 | if (err) | |||
737 | goto out; | |||
738 | } | |||
739 | ||||
740 | out: | |||
741 | i915_active_release(ref); | |||
742 | return err; | |||
743 | } | |||
744 | ||||
745 | static int rq_await_fence(void *arg, struct dma_fence *fence) | |||
746 | { | |||
747 | return i915_request_await_dma_fence(arg, fence); | |||
748 | } | |||
749 | ||||
750 | int i915_request_await_active(struct i915_request *rq, | |||
751 | struct i915_active *ref, | |||
752 | unsigned int flags) | |||
753 | { | |||
754 | return await_active(ref, flags, rq_await_fence, rq, &rq->submit); | |||
755 | } | |||
756 | ||||
757 | static int sw_await_fence(void *arg, struct dma_fence *fence) | |||
758 | { | |||
759 | return i915_sw_fence_await_dma_fence(arg, fence, 0, | |||
760 | GFP_NOWAIT0x0002 | __GFP_NOWARN0); | |||
761 | } | |||
762 | ||||
763 | int i915_sw_fence_await_active(struct i915_sw_fence *fence, | |||
764 | struct i915_active *ref, | |||
765 | unsigned int flags) | |||
766 | { | |||
767 | return await_active(ref, flags, sw_await_fence, fence, fence); | |||
768 | } | |||
769 | ||||
770 | void i915_active_fini(struct i915_active *ref) | |||
771 | { | |||
772 | debug_active_fini(ref); | |||
773 | GEM_BUG_ON(atomic_read(&ref->count))((void)0); | |||
774 | GEM_BUG_ON(work_pending(&ref->work))((void)0); | |||
775 | mutex_destroy(&ref->mutex); | |||
776 | ||||
777 | if (ref->cache) | |||
778 | #ifdef __linux__ | |||
779 | kmem_cache_free(slab_cache, ref->cache); | |||
780 | #else | |||
781 | pool_put(&slab_cache, ref->cache); | |||
782 | #endif | |||
783 | } | |||
784 | ||||
785 | static inline bool_Bool is_idle_barrier(struct active_node *node, u64 idx) | |||
786 | { | |||
787 | return node->timeline == idx && !i915_active_fence_isset(&node->base); | |||
788 | } | |||
789 | ||||
790 | static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) | |||
791 | { | |||
792 | struct rb_node *prev, *p; | |||
793 | ||||
794 | if (RB_EMPTY_ROOT(&ref->tree)((&ref->tree)->rb_node == ((void *)0))) | |||
795 | return NULL((void *)0); | |||
796 | ||||
797 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
798 | ||||
799 | /* | |||
800 | * Try to reuse any existing barrier nodes already allocated for this | |||
801 | * i915_active, due to overlapping active phases there is likely a | |||
802 | * node kept alive (as we reuse before parking). We prefer to reuse | |||
803 | * completely idle barriers (less hassle in manipulating the llists), | |||
804 | * but otherwise any will do. | |||
805 | */ | |||
806 | if (ref->cache && is_idle_barrier(ref->cache, idx)) { | |||
807 | p = &ref->cache->node; | |||
808 | goto match; | |||
809 | } | |||
810 | ||||
811 | prev = NULL((void *)0); | |||
812 | p = ref->tree.rb_node; | |||
813 | while (p) { | |||
814 | struct active_node *node = | |||
815 | rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
816 | ||||
817 | if (is_idle_barrier(node, idx)) | |||
818 | goto match; | |||
819 | ||||
820 | prev = p; | |||
821 | if (node->timeline < idx) | |||
822 | p = READ_ONCE(p->rb_right)({ typeof(p->__entry.rbe_right) __tmp = *(volatile typeof( p->__entry.rbe_right) *)&(p->__entry.rbe_right); membar_datadep_consumer (); __tmp; }); | |||
823 | else | |||
824 | p = READ_ONCE(p->rb_left)({ typeof(p->__entry.rbe_left) __tmp = *(volatile typeof(p ->__entry.rbe_left) *)&(p->__entry.rbe_left); membar_datadep_consumer (); __tmp; }); | |||
825 | } | |||
826 | ||||
827 | /* | |||
828 | * No quick match, but we did find the leftmost rb_node for the | |||
829 | * kernel_context. Walk the rb_tree in-order to see if there were | |||
830 | * any idle-barriers on this timeline that we missed, or just use | |||
831 | * the first pending barrier. | |||
832 | */ | |||
833 | for (p = prev; p; p = rb_next(p)linux_root_RB_NEXT((p))) { | |||
834 | struct active_node *node = | |||
835 | rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
836 | struct intel_engine_cs *engine; | |||
837 | ||||
838 | if (node->timeline > idx) | |||
839 | break; | |||
840 | ||||
841 | if (node->timeline < idx) | |||
842 | continue; | |||
843 | ||||
844 | if (is_idle_barrier(node, idx)) | |||
845 | goto match; | |||
846 | ||||
847 | /* | |||
848 | * The list of pending barriers is protected by the | |||
849 | * kernel_context timeline, which notably we do not hold | |||
850 | * here. i915_request_add_active_barriers() may consume | |||
851 | * the barrier before we claim it, so we have to check | |||
852 | * for success. | |||
853 | */ | |||
854 | engine = __barrier_to_engine(node); | |||
855 | smp_rmb()do { __asm volatile("" ::: "memory"); } while (0); /* serialise with add_active_barriers */ | |||
856 | if (is_barrier(&node->base) && | |||
857 | ____active_del_barrier(ref, node, engine)) | |||
858 | goto match; | |||
859 | } | |||
860 | ||||
861 | return NULL((void *)0); | |||
862 | ||||
863 | match: | |||
864 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); | |||
865 | rb_erase(p, &ref->tree)linux_root_RB_REMOVE((struct linux_root *)(&ref->tree) , (p)); /* Hide from waits and sibling allocations */ | |||
866 | if (p == &ref->cache->node) | |||
867 | WRITE_ONCE(ref->cache, NULL)({ typeof(ref->cache) __tmp = (((void *)0)); *(volatile typeof (ref->cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
868 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
869 | ||||
870 | return rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
871 | } | |||
872 | ||||
873 | int i915_active_acquire_preallocate_barrier(struct i915_active *ref, | |||
874 | struct intel_engine_cs *engine) | |||
875 | { | |||
876 | intel_engine_mask_t tmp, mask = engine->mask; | |||
877 | struct llist_node *first = NULL((void *)0), *last = NULL((void *)0); | |||
| ||||
878 | struct intel_gt *gt = engine->gt; | |||
879 | ||||
880 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
881 | ||||
882 | /* Wait until the previous preallocation is completed */ | |||
883 | while (!llist_empty(&ref->preallocated_barriers)) | |||
884 | cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield (); } while (0); | |||
885 | ||||
886 | /* | |||
887 | * Preallocate a node for each physical engine supporting the target | |||
888 | * engine (remember virtual engines have more than one sibling). | |||
889 | * We can then use the preallocated nodes in | |||
890 | * i915_active_acquire_barrier() | |||
891 | */ | |||
892 | GEM_BUG_ON(!mask)((void)0); | |||
893 | for_each_engine_masked(engine, gt, mask, tmp)for ((tmp) = (mask) & (gt)->info.engine_mask; (tmp) ? ( (engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;) { | |||
894 | u64 idx = engine->kernel_context->timeline->fence_context; | |||
895 | struct llist_node *prev = first; | |||
896 | struct active_node *node; | |||
897 | ||||
898 | rcu_read_lock(); | |||
899 | node = reuse_idle_barrier(ref, idx); | |||
900 | rcu_read_unlock(); | |||
901 | if (!node) { | |||
902 | #ifdef __linux__ | |||
903 | node = kmem_cache_alloc(slab_cache, GFP_KERNEL(0x0001 | 0x0004)); | |||
904 | #else | |||
905 | node = pool_get(&slab_cache, PR_WAITOK0x0001); | |||
906 | #endif | |||
907 | if (!node) | |||
908 | goto unwind; | |||
909 | ||||
910 | RCU_INIT_POINTER(node->base.fence, NULL)do { (node->base.fence) = (((void *)0)); } while(0); | |||
911 | node->base.cb.func = node_retire; | |||
912 | node->timeline = idx; | |||
913 | node->ref = ref; | |||
914 | } | |||
915 | ||||
916 | if (!i915_active_fence_isset(&node->base)) { | |||
917 | /* | |||
918 | * Mark this as being *our* unconnected proto-node. | |||
919 | * | |||
920 | * Since this node is not in any list, and we have | |||
921 | * decoupled it from the rbtree, we can reuse the | |||
922 | * request to indicate this is an idle-barrier node | |||
923 | * and then we can use the rb_node and list pointers | |||
924 | * for our tracking of the pending barrier. | |||
925 | */ | |||
926 | RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN))do { (node->base.fence) = (ERR_PTR(-35)); } while(0); | |||
927 | node->base.cb.node.prev = (void *)engine; | |||
928 | __i915_active_acquire(ref); | |||
929 | } | |||
930 | GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN))((void)0); | |||
931 | ||||
932 | GEM_BUG_ON(barrier_to_engine(node) != engine)((void)0); | |||
933 | first = barrier_to_ll(node); | |||
934 | first->next = prev; | |||
935 | if (!last) | |||
936 | last = first; | |||
937 | intel_engine_pm_get(engine); | |||
938 | } | |||
939 | ||||
940 | GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers))((void)0); | |||
941 | llist_add_batch(first, last, &ref->preallocated_barriers); | |||
942 | ||||
943 | return 0; | |||
944 | ||||
945 | unwind: | |||
946 | while (first) { | |||
947 | struct active_node *node = barrier_from_ll(first); | |||
948 | ||||
949 | first = first->next; | |||
950 | ||||
951 | atomic_dec(&ref->count)__sync_fetch_and_sub(&ref->count, 1); | |||
952 | intel_engine_pm_put(barrier_to_engine(node)); | |||
953 | ||||
954 | #ifdef __linux__ | |||
955 | kmem_cache_free(slab_cache, node); | |||
956 | #else | |||
957 | pool_put(&slab_cache, node); | |||
958 | #endif | |||
959 | } | |||
960 | return -ENOMEM12; | |||
961 | } | |||
962 | ||||
963 | void i915_active_acquire_barrier(struct i915_active *ref) | |||
964 | { | |||
965 | struct llist_node *pos, *next; | |||
966 | unsigned long flags; | |||
967 | ||||
968 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
969 | ||||
970 | /* | |||
971 | * Transfer the list of preallocated barriers into the | |||
972 | * i915_active rbtree, but only as proto-nodes. They will be | |||
973 | * populated by i915_request_add_active_barriers() to point to the | |||
974 | * request that will eventually release them. | |||
975 | */ | |||
976 | llist_for_each_safe(pos, next, take_preallocated_barriers(ref))for ((pos) = (llist_del_all(&(ref)->preallocated_barriers )); (pos) != ((void *)0) && ((next) = (pos)->next, pos); (pos) = (next)) { | |||
977 | struct active_node *node = barrier_from_ll(pos); | |||
978 | struct intel_engine_cs *engine = barrier_to_engine(node); | |||
979 | struct rb_node **p, *parent; | |||
980 | ||||
981 | spin_lock_irqsave_nested(&ref->tree_lock, flags,do { (void)(0); flags = 0; mtx_enter(&ref->tree_lock); } while (0) | |||
982 | SINGLE_DEPTH_NESTING)do { (void)(0); flags = 0; mtx_enter(&ref->tree_lock); } while (0); | |||
983 | parent = NULL((void *)0); | |||
984 | p = &ref->tree.rb_node; | |||
985 | while (*p) { | |||
986 | struct active_node *it; | |||
987 | ||||
988 | parent = *p; | |||
989 | ||||
990 | it = rb_entry(parent, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (parent); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
991 | if (it->timeline < node->timeline) | |||
992 | p = &parent->rb_right__entry.rbe_right; | |||
993 | else | |||
994 | p = &parent->rb_left__entry.rbe_left; | |||
995 | } | |||
996 | rb_link_node(&node->node, parent, p); | |||
997 | rb_insert_color(&node->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&node->node)); | |||
998 | spin_unlock_irqrestore(&ref->tree_lock, flags)do { (void)(flags); mtx_leave(&ref->tree_lock); } while (0); | |||
999 | ||||
1000 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine))((void)0); | |||
1001 | llist_add(barrier_to_ll(node), &engine->barrier_tasks); | |||
1002 | intel_engine_pm_put_delay(engine, 2); | |||
1003 | } | |||
1004 | } | |||
1005 | ||||
1006 | static struct dma_fence **ll_to_fence_slot(struct llist_node *node) | |||
1007 | { | |||
1008 | return __active_fence_slot(&barrier_from_ll(node)->base); | |||
1009 | } | |||
1010 | ||||
1011 | void i915_request_add_active_barriers(struct i915_request *rq) | |||
1012 | { | |||
1013 | struct intel_engine_cs *engine = rq->engine; | |||
1014 | struct llist_node *node, *next; | |||
1015 | unsigned long flags; | |||
1016 | ||||
1017 | GEM_BUG_ON(!intel_context_is_barrier(rq->context))((void)0); | |||
1018 | GEM_BUG_ON(intel_engine_is_virtual(engine))((void)0); | |||
1019 | GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline)((void)0); | |||
1020 | ||||
1021 | node = llist_del_all(&engine->barrier_tasks); | |||
1022 | if (!node) | |||
1023 | return; | |||
1024 | /* | |||
1025 | * Attach the list of proto-fences to the in-flight request such | |||
1026 | * that the parent i915_active will be released when this request | |||
1027 | * is retired. | |||
1028 | */ | |||
1029 | spin_lock_irqsave(&rq->lock, flags)do { flags = 0; mtx_enter(&rq->lock); } while (0); | |||
1030 | llist_for_each_safe(node, next, node)for ((node) = (node); (node) != ((void *)0) && ((next ) = (node)->next, node); (node) = (next)) { | |||
1031 | /* serialise with reuse_idle_barrier */ | |||
1032 | smp_store_mb(*ll_to_fence_slot(node), &rq->fence)do { *ll_to_fence_slot(node) = &rq->fence; do { __asm volatile ("mfence" ::: "memory"); } while (0); } while (0); | |||
1033 | list_add_tail((struct list_head *)node, &rq->fence.cb_list); | |||
1034 | } | |||
1035 | spin_unlock_irqrestore(&rq->lock, flags)do { (void)(flags); mtx_leave(&rq->lock); } while (0); | |||
1036 | } | |||
1037 | ||||
1038 | /* | |||
1039 | * __i915_active_fence_set: Update the last active fence along its timeline | |||
1040 | * @active: the active tracker | |||
1041 | * @fence: the new fence (under construction) | |||
1042 | * | |||
1043 | * Records the new @fence as the last active fence along its timeline in | |||
1044 | * this active tracker, moving the tracking callbacks from the previous | |||
1045 | * fence onto this one. Gets and returns a reference to the previous fence | |||
1046 | * (if not already completed), which the caller must put after making sure | |||
1047 | * that it is executed before the new fence. To ensure that the order of | |||
1048 | * fences within the timeline of the i915_active_fence is understood, it | |||
1049 | * should be locked by the caller. | |||
1050 | */ | |||
1051 | struct dma_fence * | |||
1052 | __i915_active_fence_set(struct i915_active_fence *active, | |||
1053 | struct dma_fence *fence) | |||
1054 | { | |||
1055 | struct dma_fence *prev; | |||
1056 | unsigned long flags; | |||
1057 | ||||
1058 | /* | |||
1059 | * In case of fences embedded in i915_requests, their memory is | |||
1060 | * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release | |||
1061 | * by new requests. Then, there is a risk of passing back a pointer | |||
1062 | * to a new, completely unrelated fence that reuses the same memory | |||
1063 | * while tracked under a different active tracker. Combined with i915 | |||
1064 | * perf open/close operations that build await dependencies between | |||
1065 | * engine kernel context requests and user requests from different | |||
1066 | * timelines, this can lead to dependency loops and infinite waits. | |||
1067 | * | |||
1068 | * As a countermeasure, we try to get a reference to the active->fence | |||
1069 | * first, so if we succeed and pass it back to our user then it is not | |||
1070 | * released and potentially reused by an unrelated request before the | |||
1071 | * user has a chance to set up an await dependency on it. | |||
1072 | */ | |||
1073 | prev = i915_active_fence_get(active); | |||
1074 | if (fence == prev) | |||
1075 | return fence; | |||
1076 | ||||
1077 | GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))((void)0); | |||
1078 | ||||
1079 | /* | |||
1080 | * Consider that we have two threads arriving (A and B), with | |||
1081 | * C already resident as the active->fence. | |||
1082 | * | |||
1083 | * Both A and B have got a reference to C or NULL, depending on the | |||
1084 | * timing of the interrupt handler. Let's assume that if A has got C | |||
1085 | * then it has locked C first (before B). | |||
1086 | * | |||
1087 | * Note the strong ordering of the timeline also provides consistent | |||
1088 | * nesting rules for the fence->lock; the inner lock is always the | |||
1089 | * older lock. | |||
1090 | */ | |||
1091 | spin_lock_irqsave(fence->lock, flags)do { flags = 0; mtx_enter(fence->lock); } while (0); | |||
1092 | if (prev) | |||
1093 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING)mtx_enter(prev->lock); | |||
1094 | ||||
1095 | /* | |||
1096 | * A does the cmpxchg first, and so it sees C or NULL, as before, or | |||
1097 | * something else, depending on the timing of other threads and/or | |||
1098 | * interrupt handler. If not the same as before then A unlocks C if | |||
1099 | * applicable and retries, starting from an attempt to get a new | |||
1100 | * active->fence. Meanwhile, B follows the same path as A. | |||
1101 | * Once A succeeds with cmpxch, B fails again, retires, gets A from | |||
1102 | * active->fence, locks it as soon as A completes, and possibly | |||
1103 | * succeeds with cmpxchg. | |||
1104 | */ | |||
1105 | while (cmpxchg(__active_fence_slot(active), prev, fence)__sync_val_compare_and_swap(__active_fence_slot(active), prev , fence) != prev) { | |||
1106 | if (prev) { | |||
1107 | spin_unlock(prev->lock)mtx_leave(prev->lock); | |||
1108 | dma_fence_put(prev); | |||
1109 | } | |||
1110 | spin_unlock_irqrestore(fence->lock, flags)do { (void)(flags); mtx_leave(fence->lock); } while (0); | |||
1111 | ||||
1112 | prev = i915_active_fence_get(active); | |||
1113 | GEM_BUG_ON(prev == fence)((void)0); | |||
1114 | ||||
1115 | spin_lock_irqsave(fence->lock, flags)do { flags = 0; mtx_enter(fence->lock); } while (0); | |||
1116 | if (prev) | |||
1117 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING)mtx_enter(prev->lock); | |||
1118 | } | |||
1119 | ||||
1120 | /* | |||
1121 | * If prev is NULL then the previous fence must have been signaled | |||
1122 | * and we know that we are first on the timeline. If it is still | |||
1123 | * present then, having the lock on that fence already acquired, we | |||
1124 | * serialise with the interrupt handler, in the process of removing it | |||
1125 | * from any future interrupt callback. A will then wait on C before | |||
1126 | * executing (if present). | |||
1127 | * | |||
1128 | * As B is second, it sees A as the previous fence and so waits for | |||
1129 | * it to complete its transition and takes over the occupancy for | |||
1130 | * itself -- remembering that it needs to wait on A before executing. | |||
1131 | */ | |||
1132 | if (prev) { | |||
1133 | __list_del_entry(&active->cb.node)list_del(&active->cb.node); | |||
1134 | spin_unlock(prev->lock)mtx_leave(prev->lock); /* serialise with prev->cb_list */ | |||
1135 | } | |||
1136 | list_add_tail(&active->cb.node, &fence->cb_list); | |||
1137 | spin_unlock_irqrestore(fence->lock, flags)do { (void)(flags); mtx_leave(fence->lock); } while (0); | |||
1138 | ||||
1139 | return prev; | |||
1140 | } | |||
1141 | ||||
1142 | int i915_active_fence_set(struct i915_active_fence *active, | |||
1143 | struct i915_request *rq) | |||
1144 | { | |||
1145 | struct dma_fence *fence; | |||
1146 | int err = 0; | |||
1147 | ||||
1148 | /* Must maintain timeline ordering wrt previous active requests */ | |||
1149 | fence = __i915_active_fence_set(active, &rq->fence); | |||
1150 | if (fence) { | |||
1151 | err = i915_request_await_dma_fence(rq, fence); | |||
1152 | dma_fence_put(fence); | |||
1153 | } | |||
1154 | ||||
1155 | return err; | |||
1156 | } | |||
1157 | ||||
1158 | void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
1159 | { | |||
1160 | active_fence_cb(fence, cb); | |||
1161 | } | |||
1162 | ||||
1163 | struct auto_active { | |||
1164 | struct i915_active base; | |||
1165 | struct kref ref; | |||
1166 | }; | |||
1167 | ||||
1168 | struct i915_active *i915_active_get(struct i915_active *ref) | |||
1169 | { | |||
1170 | struct auto_active *aa = container_of(ref, typeof(*aa), base)({ const __typeof( ((typeof(*aa) *)0)->base ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), base) );}); | |||
1171 | ||||
1172 | kref_get(&aa->ref); | |||
1173 | return &aa->base; | |||
1174 | } | |||
1175 | ||||
1176 | static void auto_release(struct kref *ref) | |||
1177 | { | |||
1178 | struct auto_active *aa = container_of(ref, typeof(*aa), ref)({ const __typeof( ((typeof(*aa) *)0)->ref ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), ref) );}); | |||
1179 | ||||
1180 | i915_active_fini(&aa->base); | |||
1181 | kfree(aa); | |||
1182 | } | |||
1183 | ||||
1184 | void i915_active_put(struct i915_active *ref) | |||
1185 | { | |||
1186 | struct auto_active *aa = container_of(ref, typeof(*aa), base)({ const __typeof( ((typeof(*aa) *)0)->base ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), base) );}); | |||
1187 | ||||
1188 | kref_put(&aa->ref, auto_release); | |||
1189 | } | |||
1190 | ||||
1191 | static int auto_active(struct i915_active *ref) | |||
1192 | { | |||
1193 | i915_active_get(ref); | |||
1194 | return 0; | |||
1195 | } | |||
1196 | ||||
1197 | static void auto_retire(struct i915_active *ref) | |||
1198 | { | |||
1199 | i915_active_put(ref); | |||
1200 | } | |||
1201 | ||||
1202 | struct i915_active *i915_active_create(void) | |||
1203 | { | |||
1204 | struct auto_active *aa; | |||
1205 | ||||
1206 | aa = kmalloc(sizeof(*aa), GFP_KERNEL(0x0001 | 0x0004)); | |||
1207 | if (!aa) | |||
1208 | return NULL((void *)0); | |||
1209 | ||||
1210 | kref_init(&aa->ref); | |||
1211 | i915_active_init(&aa->base, auto_active, auto_retire, 0)do { static struct lock_class_key __mkey; static struct lock_class_key __wkey; __i915_active_init(&aa->base, auto_active, auto_retire , 0, &__mkey, &__wkey); } while (0); | |||
1212 | ||||
1213 | return &aa->base; | |||
1214 | } | |||
1215 | ||||
1216 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 | |||
1217 | #include "selftests/i915_active.c" | |||
1218 | #endif | |||
1219 | ||||
1220 | void i915_active_module_exit(void) | |||
1221 | { | |||
1222 | #ifdef __linux__ | |||
1223 | kmem_cache_destroy(slab_cache); | |||
1224 | #else | |||
1225 | pool_destroy(&slab_cache); | |||
1226 | #endif | |||
1227 | } | |||
1228 | ||||
1229 | int __init i915_active_module_init(void) | |||
1230 | { | |||
1231 | #ifdef __linux__ | |||
1232 | slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); | |||
1233 | if (!slab_cache) | |||
1234 | return -ENOMEM12; | |||
1235 | #else | |||
1236 | pool_init(&slab_cache, sizeof(struct active_node), | |||
1237 | CACHELINESIZE64, IPL_TTY0x9, 0, "drmsc", NULL((void *)0)); | |||
1238 | #endif | |||
1239 | ||||
1240 | return 0; | |||
1241 | } |
1 | /* Public domain. */ | |||
2 | ||||
3 | #ifndef _LINUX_LLIST_H | |||
4 | #define _LINUX_LLIST_H | |||
5 | ||||
6 | #include <sys/atomic.h> | |||
7 | ||||
8 | struct llist_node { | |||
9 | struct llist_node *next; | |||
10 | }; | |||
11 | ||||
12 | struct llist_head { | |||
13 | struct llist_node *first; | |||
14 | }; | |||
15 | ||||
16 | #define llist_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) | |||
17 | ||||
18 | static inline struct llist_node * | |||
19 | llist_del_all(struct llist_head *head) | |||
20 | { | |||
21 | return atomic_swap_ptr(&head->first, NULL)_atomic_swap_ptr((&head->first), (((void *)0))); | |||
22 | } | |||
23 | ||||
24 | static inline struct llist_node * | |||
25 | llist_del_first(struct llist_head *head) | |||
26 | { | |||
27 | struct llist_node *first, *next; | |||
28 | ||||
29 | do { | |||
30 | first = head->first; | |||
31 | if (first == NULL((void *)0)) | |||
32 | return NULL((void *)0); | |||
33 | next = first->next; | |||
34 | } while (atomic_cas_ptr(&head->first, first, next)_atomic_cas_ptr((&head->first), (first), (next)) != first); | |||
35 | ||||
36 | return first; | |||
37 | } | |||
38 | ||||
39 | static inline bool_Bool | |||
40 | llist_add(struct llist_node *new, struct llist_head *head) | |||
41 | { | |||
42 | struct llist_node *first; | |||
43 | ||||
44 | do { | |||
45 | new->next = first = head->first; | |||
46 | } while (atomic_cas_ptr(&head->first, first, new)_atomic_cas_ptr((&head->first), (first), (new)) != first); | |||
47 | ||||
48 | return (first == NULL((void *)0)); | |||
49 | } | |||
50 | ||||
51 | static inline bool_Bool | |||
52 | llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | |||
53 | struct llist_head *head) | |||
54 | { | |||
55 | struct llist_node *first; | |||
56 | ||||
57 | do { | |||
58 | new_last->next = first = head->first; | |||
| ||||
59 | } while (atomic_cas_ptr(&head->first, first, new_first)_atomic_cas_ptr((&head->first), (first), (new_first)) != first); | |||
60 | ||||
61 | return (first == NULL((void *)0)); | |||
62 | } | |||
63 | ||||
64 | static inline void | |||
65 | init_llist_head(struct llist_head *head) | |||
66 | { | |||
67 | head->first = NULL((void *)0); | |||
68 | } | |||
69 | ||||
70 | static inline bool_Bool | |||
71 | llist_empty(struct llist_head *head) | |||
72 | { | |||
73 | return (head->first == NULL((void *)0)); | |||
74 | } | |||
75 | ||||
76 | #define llist_for_each_safe(pos, n, node)for ((pos) = (node); (pos) != ((void *)0) && ((n) = ( pos)->next, pos); (pos) = (n)) \ | |||
77 | for ((pos) = (node); \ | |||
78 | (pos) != NULL((void *)0) && \ | |||
79 | ((n) = (pos)->next, pos); \ | |||
80 | (pos) = (n)) | |||
81 | ||||
82 | #define llist_for_each_entry_safe(pos, n, node, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((node)); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); ((char *)(pos) + __builtin_offsetof (typeof(*(pos)), member)) != ((void *)0) && (n = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos-> member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}), pos); pos = n) \ | |||
83 | for (pos = llist_entry((node), __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((node)); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); \ | |||
84 | ((char *)(pos) + offsetof(typeof(*(pos)), member)__builtin_offsetof(typeof(*(pos)), member)) != NULL((void *)0) && \ | |||
85 | (n = llist_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}), pos); \ | |||
86 | pos = n) | |||
87 | ||||
88 | #define llist_for_each_entry(pos, node, member)for ((pos) = ({ const __typeof( ((__typeof(*(pos)) *)0)->member ) *__mptr = ((node)); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member) );}); ((char *) (pos) + __builtin_offsetof(typeof(*(pos)), member)) != ((void *)0); (pos) = ({ const __typeof( ((__typeof(*(pos)) *)0)-> member ) *__mptr = ((pos)->member.next); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member ) );})) \ | |||
89 | for ((pos) = llist_entry((node), __typeof(*(pos)), member)({ const __typeof( ((__typeof(*(pos)) *)0)->member ) *__mptr = ((node)); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof (__typeof(*(pos)), member) );}); \ | |||
90 | ((char *)(pos) + offsetof(typeof(*(pos)), member)__builtin_offsetof(typeof(*(pos)), member)) != NULL((void *)0); \ | |||
91 | (pos) = llist_entry((pos)->member.next, __typeof(*(pos)), member)({ const __typeof( ((__typeof(*(pos)) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member) );})) | |||
92 | ||||
93 | #endif |