File: | dev/pci/drm/include/linux/llist.h |
Warning: | line 59, column 18 Access to field 'next' results in a dereference of a null pointer (loaded from variable 'new_last') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * SPDX-License-Identifier: MIT | |||
3 | * | |||
4 | * Copyright © 2019 Intel Corporation | |||
5 | */ | |||
6 | ||||
7 | #include <linux/debugobjects.h> | |||
8 | ||||
9 | #include "gt/intel_context.h" | |||
10 | #include "gt/intel_engine_heartbeat.h" | |||
11 | #include "gt/intel_engine_pm.h" | |||
12 | #include "gt/intel_ring.h" | |||
13 | ||||
14 | #include "i915_drv.h" | |||
15 | #include "i915_active.h" | |||
16 | #include "i915_globals.h" | |||
17 | ||||
18 | /* | |||
19 | * Active refs memory management | |||
20 | * | |||
21 | * To be more economical with memory, we reap all the i915_active trees as | |||
22 | * they idle (when we know the active requests are inactive) and allocate the | |||
23 | * nodes from a local slab cache to hopefully reduce the fragmentation. | |||
24 | */ | |||
25 | static struct i915_global_active { | |||
26 | struct i915_global base; | |||
27 | #ifdef __linux__ | |||
28 | struct kmem_cache *slab_cache; | |||
29 | #else | |||
30 | struct pool slab_cache; | |||
31 | #endif | |||
32 | } global; | |||
33 | ||||
34 | struct active_node { | |||
35 | struct rb_node node; | |||
36 | struct i915_active_fence base; | |||
37 | struct i915_active *ref; | |||
38 | u64 timeline; | |||
39 | }; | |||
40 | ||||
41 | #define fetch_node(x)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(x) __tmp = *(volatile typeof(x) *)& (x); membar_datadep_consumer(); __tmp; })); (typeof(struct active_node ) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node ), node) );}) rb_entry(READ_ONCE(x), typeof(struct active_node), node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(x) __tmp = *(volatile typeof(x) *)& (x); membar_datadep_consumer(); __tmp; })); (typeof(struct active_node ) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node ), node) );}) | |||
42 | ||||
43 | static inline struct active_node * | |||
44 | node_from_active(struct i915_active_fence *active) | |||
45 | { | |||
46 | return container_of(active, struct active_node, base)({ const __typeof( ((struct active_node *)0)->base ) *__mptr = (active); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, base) );}); | |||
47 | } | |||
48 | ||||
49 | #define take_preallocated_barriers(x)llist_del_all(&(x)->preallocated_barriers) llist_del_all(&(x)->preallocated_barriers) | |||
50 | ||||
51 | static inline bool_Bool is_barrier(const struct i915_active_fence *active) | |||
52 | { | |||
53 | return IS_ERR(rcu_access_pointer(active->fence)(active->fence)); | |||
54 | } | |||
55 | ||||
56 | static inline struct llist_node *barrier_to_ll(struct active_node *node) | |||
57 | { | |||
58 | GEM_BUG_ON(!is_barrier(&node->base))((void)0); | |||
59 | return (struct llist_node *)&node->base.cb.node; | |||
60 | } | |||
61 | ||||
62 | static inline struct intel_engine_cs * | |||
63 | __barrier_to_engine(struct active_node *node) | |||
64 | { | |||
65 | return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev)({ typeof(node->base.cb.node.prev) __tmp = *(volatile typeof (node->base.cb.node.prev) *)&(node->base.cb.node.prev ); membar_datadep_consumer(); __tmp; }); | |||
66 | } | |||
67 | ||||
68 | static inline struct intel_engine_cs * | |||
69 | barrier_to_engine(struct active_node *node) | |||
70 | { | |||
71 | GEM_BUG_ON(!is_barrier(&node->base))((void)0); | |||
72 | return __barrier_to_engine(node); | |||
73 | } | |||
74 | ||||
75 | static inline struct active_node *barrier_from_ll(struct llist_node *x) | |||
76 | { | |||
77 | return container_of((struct list_head *)x,({ const __typeof( ((struct active_node *)0)->base.cb.node ) *__mptr = ((struct list_head *)x); (struct active_node *)( (char *)__mptr - __builtin_offsetof(struct active_node, base .cb.node) );}) | |||
78 | struct active_node, base.cb.node)({ const __typeof( ((struct active_node *)0)->base.cb.node ) *__mptr = ((struct list_head *)x); (struct active_node *)( (char *)__mptr - __builtin_offsetof(struct active_node, base .cb.node) );}); | |||
79 | } | |||
80 | ||||
81 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0 && IS_ENABLED(CONFIG_DEBUG_OBJECTS)0 | |||
82 | ||||
83 | static void *active_debug_hint(void *addr) | |||
84 | { | |||
85 | struct i915_active *ref = addr; | |||
86 | ||||
87 | return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; | |||
88 | } | |||
89 | ||||
90 | static const struct debug_obj_descr active_debug_desc = { | |||
91 | .name = "i915_active", | |||
92 | .debug_hint = active_debug_hint, | |||
93 | }; | |||
94 | ||||
95 | static void debug_active_init(struct i915_active *ref) | |||
96 | { | |||
97 | debug_object_init(ref, &active_debug_desc); | |||
98 | } | |||
99 | ||||
100 | static void debug_active_activate(struct i915_active *ref) | |||
101 | { | |||
102 | lockdep_assert_held(&ref->tree_lock)do { (void)(&ref->tree_lock); } while(0); | |||
103 | if (!atomic_read(&ref->count)({ typeof(*(&ref->count)) __tmp = *(volatile typeof(*( &ref->count)) *)&(*(&ref->count)); membar_datadep_consumer (); __tmp; })) /* before the first inc */ | |||
104 | debug_object_activate(ref, &active_debug_desc); | |||
105 | } | |||
106 | ||||
107 | static void debug_active_deactivate(struct i915_active *ref) | |||
108 | { | |||
109 | lockdep_assert_held(&ref->tree_lock)do { (void)(&ref->tree_lock); } while(0); | |||
110 | if (!atomic_read(&ref->count)({ typeof(*(&ref->count)) __tmp = *(volatile typeof(*( &ref->count)) *)&(*(&ref->count)); membar_datadep_consumer (); __tmp; })) /* after the last dec */ | |||
111 | debug_object_deactivate(ref, &active_debug_desc); | |||
112 | } | |||
113 | ||||
114 | static void debug_active_fini(struct i915_active *ref) | |||
115 | { | |||
116 | debug_object_free(ref, &active_debug_desc); | |||
117 | } | |||
118 | ||||
119 | static void debug_active_assert(struct i915_active *ref) | |||
120 | { | |||
121 | debug_object_assert_init(ref, &active_debug_desc); | |||
122 | } | |||
123 | ||||
124 | #else | |||
125 | ||||
126 | static inline void debug_active_init(struct i915_active *ref) { } | |||
127 | static inline void debug_active_activate(struct i915_active *ref) { } | |||
128 | static inline void debug_active_deactivate(struct i915_active *ref) { } | |||
129 | static inline void debug_active_fini(struct i915_active *ref) { } | |||
130 | static inline void debug_active_assert(struct i915_active *ref) { } | |||
131 | ||||
132 | #endif | |||
133 | ||||
134 | static void | |||
135 | __active_retire(struct i915_active *ref) | |||
136 | { | |||
137 | struct rb_root root = RB_ROOT(struct rb_root) { ((void *)0) }; | |||
138 | struct active_node *it, *n; | |||
139 | unsigned long flags; | |||
140 | ||||
141 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
142 | ||||
143 | /* return the unused nodes to our slabcache -- flushing the allocator */ | |||
144 | if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)atomic_dec_and_lock(&ref->count, &ref->tree_lock )) | |||
145 | return; | |||
146 | ||||
147 | GEM_BUG_ON(rcu_access_pointer(ref->excl.fence))((void)0); | |||
148 | debug_active_deactivate(ref); | |||
149 | ||||
150 | /* Even if we have not used the cache, we may still have a barrier */ | |||
151 | if (!ref->cache) | |||
152 | ref->cache = fetch_node(ref->tree.rb_node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(ref->tree.rb_node) __tmp = *(volatile typeof(ref->tree.rb_node) *)&(ref->tree.rb_node); membar_datadep_consumer (); __tmp; })); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node), node) );}); | |||
153 | ||||
154 | /* Keep the MRU cached node for reuse */ | |||
155 | if (ref->cache) { | |||
156 | /* Discard all other nodes in the tree */ | |||
157 | rb_erase(&ref->cache->node, &ref->tree)linux_root_RB_REMOVE((struct linux_root *)(&ref->tree) , (&ref->cache->node)); | |||
158 | root = ref->tree; | |||
159 | ||||
160 | /* Rebuild the tree with only the cached node */ | |||
161 | rb_link_node(&ref->cache->node, NULL((void *)0), &ref->tree.rb_node); | |||
162 | rb_insert_color(&ref->cache->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&ref->cache->node)); | |||
163 | GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node)((void)0); | |||
164 | ||||
165 | /* Make the cached node available for reuse with any timeline */ | |||
166 | if (IS_ENABLED(CONFIG_64BIT)1) | |||
167 | ref->cache->timeline = 0; /* needs cmpxchg(u64) */ | |||
168 | } | |||
169 | ||||
170 | spin_unlock_irqrestore(&ref->tree_lock, flags)do { (void)(flags); mtx_leave(&ref->tree_lock); } while (0); | |||
171 | ||||
172 | /* After the final retire, the entire struct may be freed */ | |||
173 | if (ref->retire) | |||
174 | ref->retire(ref); | |||
175 | ||||
176 | /* ... except if you wait on it, you must manage your own references! */ | |||
177 | wake_up_var(ref); | |||
178 | ||||
179 | /* Finally free the discarded timeline tree */ | |||
180 | rbtree_postorder_for_each_entry_safe(it, n, &root, node)for ((it) = (__rb_deepest_left((&root)->rb_node) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left ((&root)->rb_node)); (__typeof(*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node) );}) : ((void *)0) ); ((it) != ((void *)0)) && ((n) = (rb_next_postorder (&it->node) ? ({ const __typeof( ((typeof(*it) *)0)-> node ) *__mptr = (rb_next_postorder(&it->node)); (typeof (*it) *)( (char *)__mptr - __builtin_offsetof(typeof(*it), node ) );}) : ((void *)0)), 1); (it) = (n)) { | |||
181 | GEM_BUG_ON(i915_active_fence_isset(&it->base))((void)0); | |||
182 | #ifdef __linux__ | |||
183 | kmem_cache_free(global.slab_cache, it); | |||
184 | #else | |||
185 | pool_put(&global.slab_cache, it); | |||
186 | #endif | |||
187 | } | |||
188 | } | |||
189 | ||||
190 | static void | |||
191 | active_work(struct work_struct *wrk) | |||
192 | { | |||
193 | struct i915_active *ref = container_of(wrk, typeof(*ref), work)({ const __typeof( ((typeof(*ref) *)0)->work ) *__mptr = ( wrk); (typeof(*ref) *)( (char *)__mptr - __builtin_offsetof(typeof (*ref), work) );}); | |||
194 | ||||
195 | GEM_BUG_ON(!atomic_read(&ref->count))((void)0); | |||
196 | if (atomic_add_unless(&ref->count, -1, 1)) | |||
197 | return; | |||
198 | ||||
199 | __active_retire(ref); | |||
200 | } | |||
201 | ||||
202 | static void | |||
203 | active_retire(struct i915_active *ref) | |||
204 | { | |||
205 | GEM_BUG_ON(!atomic_read(&ref->count))((void)0); | |||
206 | if (atomic_add_unless(&ref->count, -1, 1)) | |||
207 | return; | |||
208 | ||||
209 | if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS(1UL << (0))) { | |||
210 | queue_work(system_unbound_wq, &ref->work); | |||
211 | return; | |||
212 | } | |||
213 | ||||
214 | __active_retire(ref); | |||
215 | } | |||
216 | ||||
217 | static inline struct dma_fence ** | |||
218 | __active_fence_slot(struct i915_active_fence *active) | |||
219 | { | |||
220 | return (struct dma_fence ** __force)&active->fence; | |||
221 | } | |||
222 | ||||
223 | static inline bool_Bool | |||
224 | active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
225 | { | |||
226 | struct i915_active_fence *active = | |||
227 | container_of(cb, typeof(*active), cb)({ const __typeof( ((typeof(*active) *)0)->cb ) *__mptr = ( cb); (typeof(*active) *)( (char *)__mptr - __builtin_offsetof (typeof(*active), cb) );}); | |||
228 | ||||
229 | return cmpxchg(__active_fence_slot(active), fence, NULL)__sync_val_compare_and_swap(__active_fence_slot(active), fence , ((void *)0)) == fence; | |||
230 | } | |||
231 | ||||
232 | static void | |||
233 | node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
234 | { | |||
235 | if (active_fence_cb(fence, cb)) | |||
236 | active_retire(container_of(cb, struct active_node, base.cb)({ const __typeof( ((struct active_node *)0)->base.cb ) *__mptr = (cb); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, base.cb) );})->ref); | |||
237 | } | |||
238 | ||||
239 | static void | |||
240 | excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
241 | { | |||
242 | if (active_fence_cb(fence, cb)) | |||
243 | active_retire(container_of(cb, struct i915_active, excl.cb)({ const __typeof( ((struct i915_active *)0)->excl.cb ) *__mptr = (cb); (struct i915_active *)( (char *)__mptr - __builtin_offsetof (struct i915_active, excl.cb) );})); | |||
244 | } | |||
245 | ||||
246 | static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) | |||
247 | { | |||
248 | struct active_node *it; | |||
249 | ||||
250 | GEM_BUG_ON(idx == 0)((void)0); /* 0 is the unordered timeline, rsvd for cache */ | |||
251 | ||||
252 | /* | |||
253 | * We track the most recently used timeline to skip a rbtree search | |||
254 | * for the common case, under typical loads we never need the rbtree | |||
255 | * at all. We can reuse the last slot if it is empty, that is | |||
256 | * after the previous activity has been retired, or if it matches the | |||
257 | * current timeline. | |||
258 | */ | |||
259 | it = READ_ONCE(ref->cache)({ typeof(ref->cache) __tmp = *(volatile typeof(ref->cache ) *)&(ref->cache); membar_datadep_consumer(); __tmp; } ); | |||
260 | if (it) { | |||
261 | u64 cached = READ_ONCE(it->timeline)({ typeof(it->timeline) __tmp = *(volatile typeof(it->timeline ) *)&(it->timeline); membar_datadep_consumer(); __tmp; }); | |||
262 | ||||
263 | /* Once claimed, this slot will only belong to this idx */ | |||
264 | if (cached == idx) | |||
265 | return it; | |||
266 | ||||
267 | #ifdef CONFIG_64BIT1 /* for cmpxchg(u64) */ | |||
268 | /* | |||
269 | * An unclaimed cache [.timeline=0] can only be claimed once. | |||
270 | * | |||
271 | * If the value is already non-zero, some other thread has | |||
272 | * claimed the cache and we know that is does not match our | |||
273 | * idx. If, and only if, the timeline is currently zero is it | |||
274 | * worth competing to claim it atomically for ourselves (for | |||
275 | * only the winner of that race will cmpxchg return the old | |||
276 | * value of 0). | |||
277 | */ | |||
278 | if (!cached && !cmpxchg(&it->timeline, 0, idx)__sync_val_compare_and_swap(&it->timeline, 0, idx)) | |||
279 | return it; | |||
280 | #endif | |||
281 | } | |||
282 | ||||
283 | BUILD_BUG_ON(offsetof(typeof(*it), node))extern char _ctassert[(!(__builtin_offsetof(typeof(*it), node ))) ? 1 : -1 ] __attribute__((__unused__)); | |||
284 | ||||
285 | /* While active, the tree can only be built; not destroyed */ | |||
286 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
287 | ||||
288 | it = fetch_node(ref->tree.rb_node)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(ref->tree.rb_node) __tmp = *(volatile typeof(ref->tree.rb_node) *)&(ref->tree.rb_node); membar_datadep_consumer (); __tmp; })); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof(typeof(struct active_node), node) );}); | |||
289 | while (it) { | |||
290 | if (it->timeline < idx) { | |||
291 | it = fetch_node(it->node.rb_right)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(it->node.__entry.rbe_right) __tmp = *(volatile typeof(it->node.__entry.rbe_right) *)&(it-> node.__entry.rbe_right); membar_datadep_consumer(); __tmp; }) ); (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof (typeof(struct active_node), node) );}); | |||
292 | } else if (it->timeline > idx) { | |||
293 | it = fetch_node(it->node.rb_left)({ const __typeof( ((typeof(struct active_node) *)0)->node ) *__mptr = (({ typeof(it->node.__entry.rbe_left) __tmp = *(volatile typeof(it->node.__entry.rbe_left) *)&(it-> node.__entry.rbe_left); membar_datadep_consumer(); __tmp; })) ; (typeof(struct active_node) *)( (char *)__mptr - __builtin_offsetof (typeof(struct active_node), node) );}); | |||
294 | } else { | |||
295 | WRITE_ONCE(ref->cache, it)({ typeof(ref->cache) __tmp = (it); *(volatile typeof(ref-> cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
296 | break; | |||
297 | } | |||
298 | } | |||
299 | ||||
300 | /* NB: If the tree rotated beneath us, we may miss our target. */ | |||
301 | return it; | |||
302 | } | |||
303 | ||||
304 | static struct i915_active_fence * | |||
305 | active_instance(struct i915_active *ref, u64 idx) | |||
306 | { | |||
307 | struct active_node *node, *prealloc; | |||
308 | struct rb_node **p, *parent; | |||
309 | ||||
310 | node = __active_lookup(ref, idx); | |||
311 | if (likely(node)__builtin_expect(!!(node), 1)) | |||
312 | return &node->base; | |||
313 | ||||
314 | /* Preallocate a replacement, just in case */ | |||
315 | #ifdef __linux__ | |||
316 | prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL(0x0001 | 0x0004)); | |||
317 | #else | |||
318 | prealloc = pool_get(&global.slab_cache, PR_WAITOK0x0001); | |||
319 | #endif | |||
320 | if (!prealloc) | |||
321 | return NULL((void *)0); | |||
322 | ||||
323 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); | |||
324 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
325 | ||||
326 | parent = NULL((void *)0); | |||
327 | p = &ref->tree.rb_node; | |||
328 | while (*p) { | |||
329 | parent = *p; | |||
330 | ||||
331 | node = rb_entry(parent, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (parent); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
332 | if (node->timeline == idx) { | |||
333 | #ifdef __linux__ | |||
334 | kmem_cache_free(global.slab_cache, prealloc); | |||
335 | #else | |||
336 | pool_put(&global.slab_cache, prealloc); | |||
337 | #endif | |||
338 | goto out; | |||
339 | } | |||
340 | ||||
341 | if (node->timeline < idx) | |||
342 | p = &parent->rb_right__entry.rbe_right; | |||
343 | else | |||
344 | p = &parent->rb_left__entry.rbe_left; | |||
345 | } | |||
346 | ||||
347 | node = prealloc; | |||
348 | __i915_active_fence_init(&node->base, NULL((void *)0), node_retire); | |||
349 | node->ref = ref; | |||
350 | node->timeline = idx; | |||
351 | ||||
352 | rb_link_node(&node->node, parent, p); | |||
353 | rb_insert_color(&node->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&node->node)); | |||
354 | ||||
355 | out: | |||
356 | WRITE_ONCE(ref->cache, node)({ typeof(ref->cache) __tmp = (node); *(volatile typeof(ref ->cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
357 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
358 | ||||
359 | return &node->base; | |||
360 | } | |||
361 | ||||
362 | void __i915_active_init(struct i915_active *ref, | |||
363 | int (*active)(struct i915_active *ref), | |||
364 | void (*retire)(struct i915_active *ref), | |||
365 | struct lock_class_key *mkey, | |||
366 | struct lock_class_key *wkey) | |||
367 | { | |||
368 | unsigned long bits; | |||
369 | ||||
370 | debug_active_init(ref); | |||
371 | ||||
372 | ref->flags = 0; | |||
373 | ref->active = active; | |||
374 | ref->retire = ptr_unpack_bits(retire, &bits, 2)({ unsigned long __v = (unsigned long)(retire); *(&bits) = __v & ((1UL << (2)) - 1); (typeof(retire))(__v & -(1UL << (2))); }); | |||
375 | if (bits & I915_ACTIVE_MAY_SLEEP(1UL << (0))) | |||
376 | ref->flags |= I915_ACTIVE_RETIRE_SLEEPS(1UL << (0)); | |||
377 | ||||
378 | mtx_init(&ref->tree_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&ref-> tree_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | |||
379 | ref->tree = RB_ROOT(struct rb_root) { ((void *)0) }; | |||
380 | ref->cache = NULL((void *)0); | |||
381 | ||||
382 | init_llist_head(&ref->preallocated_barriers); | |||
383 | atomic_set(&ref->count, 0)({ typeof(*(&ref->count)) __tmp = ((0)); *(volatile typeof (*(&ref->count)) *)&(*(&ref->count)) = __tmp ; __tmp; }); | |||
384 | #ifdef __linux__ | |||
385 | __mutex_init(&ref->mutex, "i915_active", mkey); | |||
386 | #else | |||
387 | rw_init(&ref->mutex, "i915_active")_rw_init_flags(&ref->mutex, "i915_active", 0, ((void * )0)); | |||
388 | #endif | |||
389 | __i915_active_fence_init(&ref->excl, NULL((void *)0), excl_retire); | |||
390 | INIT_WORK(&ref->work, active_work); | |||
391 | #if IS_ENABLED(CONFIG_LOCKDEP)0 | |||
392 | lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); | |||
393 | #endif | |||
394 | } | |||
395 | ||||
396 | static bool_Bool ____active_del_barrier(struct i915_active *ref, | |||
397 | struct active_node *node, | |||
398 | struct intel_engine_cs *engine) | |||
399 | ||||
400 | { | |||
401 | struct llist_node *head = NULL((void *)0), *tail = NULL((void *)0); | |||
402 | struct llist_node *pos, *next; | |||
403 | ||||
404 | GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context)((void)0); | |||
405 | ||||
406 | /* | |||
407 | * Rebuild the llist excluding our node. We may perform this | |||
408 | * outside of the kernel_context timeline mutex and so someone | |||
409 | * else may be manipulating the engine->barrier_tasks, in | |||
410 | * which case either we or they will be upset :) | |||
411 | * | |||
412 | * A second __active_del_barrier() will report failure to claim | |||
413 | * the active_node and the caller will just shrug and know not to | |||
414 | * claim ownership of its node. | |||
415 | * | |||
416 | * A concurrent i915_request_add_active_barriers() will miss adding | |||
417 | * any of the tasks, but we will try again on the next -- and since | |||
418 | * we are actively using the barrier, we know that there will be | |||
419 | * at least another opportunity when we idle. | |||
420 | */ | |||
421 | llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks))for ((pos) = (llist_del_all(&engine->barrier_tasks)); ( pos) != ((void *)0) && ((next) = (pos)->next, pos) ; (pos) = (next)) { | |||
422 | if (node == barrier_from_ll(pos)) { | |||
423 | node = NULL((void *)0); | |||
424 | continue; | |||
425 | } | |||
426 | ||||
427 | pos->next = head; | |||
428 | head = pos; | |||
429 | if (!tail) | |||
430 | tail = pos; | |||
431 | } | |||
432 | if (head) | |||
433 | llist_add_batch(head, tail, &engine->barrier_tasks); | |||
434 | ||||
435 | return !node; | |||
436 | } | |||
437 | ||||
438 | static bool_Bool | |||
439 | __active_del_barrier(struct i915_active *ref, struct active_node *node) | |||
440 | { | |||
441 | return ____active_del_barrier(ref, node, barrier_to_engine(node)); | |||
442 | } | |||
443 | ||||
444 | static bool_Bool | |||
445 | replace_barrier(struct i915_active *ref, struct i915_active_fence *active) | |||
446 | { | |||
447 | if (!is_barrier(active)) /* proto-node used by our idle barrier? */ | |||
448 | return false0; | |||
449 | ||||
450 | /* | |||
451 | * This request is on the kernel_context timeline, and so | |||
452 | * we can use it to substitute for the pending idle-barrer | |||
453 | * request that we want to emit on the kernel_context. | |||
454 | */ | |||
455 | __active_del_barrier(ref, node_from_active(active)); | |||
456 | return true1; | |||
457 | } | |||
458 | ||||
459 | int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) | |||
460 | { | |||
461 | struct i915_active_fence *active; | |||
462 | int err; | |||
463 | ||||
464 | /* Prevent reaping in case we malloc/wait while building the tree */ | |||
465 | err = i915_active_acquire(ref); | |||
466 | if (err) | |||
467 | return err; | |||
468 | ||||
469 | active = active_instance(ref, idx); | |||
470 | if (!active) { | |||
471 | err = -ENOMEM12; | |||
472 | goto out; | |||
473 | } | |||
474 | ||||
475 | if (replace_barrier(ref, active)) { | |||
476 | RCU_INIT_POINTER(active->fence, NULL)do { (active->fence) = (((void *)0)); } while(0); | |||
477 | atomic_dec(&ref->count)__sync_fetch_and_sub(&ref->count, 1); | |||
478 | } | |||
479 | if (!__i915_active_fence_set(active, fence)) | |||
480 | __i915_active_acquire(ref); | |||
481 | ||||
482 | out: | |||
483 | i915_active_release(ref); | |||
484 | return err; | |||
485 | } | |||
486 | ||||
487 | static struct dma_fence * | |||
488 | __i915_active_set_fence(struct i915_active *ref, | |||
489 | struct i915_active_fence *active, | |||
490 | struct dma_fence *fence) | |||
491 | { | |||
492 | struct dma_fence *prev; | |||
493 | ||||
494 | if (replace_barrier(ref, active)) { | |||
495 | RCU_INIT_POINTER(active->fence, fence)do { (active->fence) = (fence); } while(0); | |||
496 | return NULL((void *)0); | |||
497 | } | |||
498 | ||||
499 | rcu_read_lock(); | |||
500 | prev = __i915_active_fence_set(active, fence); | |||
501 | if (prev) | |||
502 | prev = dma_fence_get_rcu(prev); | |||
503 | else | |||
504 | __i915_active_acquire(ref); | |||
505 | rcu_read_unlock(); | |||
506 | ||||
507 | return prev; | |||
508 | } | |||
509 | ||||
510 | static struct i915_active_fence * | |||
511 | __active_fence(struct i915_active *ref, u64 idx) | |||
512 | { | |||
513 | struct active_node *it; | |||
514 | ||||
515 | it = __active_lookup(ref, idx); | |||
516 | if (unlikely(!it)__builtin_expect(!!(!it), 0)) { /* Contention with parallel tree builders! */ | |||
517 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); | |||
518 | it = __active_lookup(ref, idx); | |||
519 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
520 | } | |||
521 | GEM_BUG_ON(!it)((void)0); /* slot must be preallocated */ | |||
522 | ||||
523 | return &it->base; | |||
524 | } | |||
525 | ||||
526 | struct dma_fence * | |||
527 | __i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) | |||
528 | { | |||
529 | /* Only valid while active, see i915_active_acquire_for_context() */ | |||
530 | return __i915_active_set_fence(ref, __active_fence(ref, idx), fence); | |||
531 | } | |||
532 | ||||
533 | struct dma_fence * | |||
534 | i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) | |||
535 | { | |||
536 | /* We expect the caller to manage the exclusive timeline ordering */ | |||
537 | return __i915_active_set_fence(ref, &ref->excl, f); | |||
538 | } | |||
539 | ||||
540 | bool_Bool i915_active_acquire_if_busy(struct i915_active *ref) | |||
541 | { | |||
542 | debug_active_assert(ref); | |||
543 | return atomic_add_unless(&ref->count, 1, 0); | |||
544 | } | |||
545 | ||||
546 | static void __i915_active_activate(struct i915_active *ref) | |||
547 | { | |||
548 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); /* __active_retire() */ | |||
549 | if (!atomic_fetch_inc(&ref->count)__sync_fetch_and_add(&ref->count, 1)) | |||
550 | debug_active_activate(ref); | |||
551 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
552 | } | |||
553 | ||||
554 | int i915_active_acquire(struct i915_active *ref) | |||
555 | { | |||
556 | int err; | |||
557 | ||||
558 | if (i915_active_acquire_if_busy(ref)) | |||
559 | return 0; | |||
560 | ||||
561 | if (!ref->active) { | |||
562 | __i915_active_activate(ref); | |||
563 | return 0; | |||
564 | } | |||
565 | ||||
566 | err = mutex_lock_interruptible(&ref->mutex); | |||
567 | if (err) | |||
568 | return err; | |||
569 | ||||
570 | if (likely(!i915_active_acquire_if_busy(ref))__builtin_expect(!!(!i915_active_acquire_if_busy(ref)), 1)) { | |||
571 | err = ref->active(ref); | |||
572 | if (!err) | |||
573 | __i915_active_activate(ref); | |||
574 | } | |||
575 | ||||
576 | mutex_unlock(&ref->mutex)rw_exit_write(&ref->mutex); | |||
577 | ||||
578 | return err; | |||
579 | } | |||
580 | ||||
581 | int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) | |||
582 | { | |||
583 | struct i915_active_fence *active; | |||
584 | int err; | |||
585 | ||||
586 | err = i915_active_acquire(ref); | |||
587 | if (err) | |||
588 | return err; | |||
589 | ||||
590 | active = active_instance(ref, idx); | |||
591 | if (!active) { | |||
592 | i915_active_release(ref); | |||
593 | return -ENOMEM12; | |||
594 | } | |||
595 | ||||
596 | return 0; /* return with active ref */ | |||
597 | } | |||
598 | ||||
599 | void i915_active_release(struct i915_active *ref) | |||
600 | { | |||
601 | debug_active_assert(ref); | |||
602 | active_retire(ref); | |||
603 | } | |||
604 | ||||
605 | static void enable_signaling(struct i915_active_fence *active) | |||
606 | { | |||
607 | struct dma_fence *fence; | |||
608 | ||||
609 | if (unlikely(is_barrier(active))__builtin_expect(!!(is_barrier(active)), 0)) | |||
610 | return; | |||
611 | ||||
612 | fence = i915_active_fence_get(active); | |||
613 | if (!fence) | |||
614 | return; | |||
615 | ||||
616 | dma_fence_enable_sw_signaling(fence); | |||
617 | dma_fence_put(fence); | |||
618 | } | |||
619 | ||||
620 | static int flush_barrier(struct active_node *it) | |||
621 | { | |||
622 | struct intel_engine_cs *engine; | |||
623 | ||||
624 | if (likely(!is_barrier(&it->base))__builtin_expect(!!(!is_barrier(&it->base)), 1)) | |||
625 | return 0; | |||
626 | ||||
627 | engine = __barrier_to_engine(it); | |||
628 | smp_rmb()do { __asm volatile("" ::: "memory"); } while (0); /* serialise with add_active_barriers */ | |||
629 | if (!is_barrier(&it->base)) | |||
630 | return 0; | |||
631 | ||||
632 | return intel_engine_flush_barriers(engine); | |||
633 | } | |||
634 | ||||
635 | static int flush_lazy_signals(struct i915_active *ref) | |||
636 | { | |||
637 | struct active_node *it, *n; | |||
638 | int err = 0; | |||
639 | ||||
640 | enable_signaling(&ref->excl); | |||
641 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)for ((it) = (__rb_deepest_left((&ref->tree)->rb_node ) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left((&ref->tree)->rb_node)); (__typeof (*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node ) );}) : ((void *)0)); ((it) != ((void *)0)) && ((n) = (rb_next_postorder(&it->node) ? ({ const __typeof( (( typeof(*it) *)0)->node ) *__mptr = (rb_next_postorder(& it->node)); (typeof(*it) *)( (char *)__mptr - __builtin_offsetof (typeof(*it), node) );}) : ((void *)0)), 1); (it) = (n)) { | |||
642 | err = flush_barrier(it); /* unconnected idle barrier? */ | |||
643 | if (err) | |||
644 | break; | |||
645 | ||||
646 | enable_signaling(&it->base); | |||
647 | } | |||
648 | ||||
649 | return err; | |||
650 | } | |||
651 | ||||
652 | int __i915_active_wait(struct i915_active *ref, int state) | |||
653 | { | |||
654 | might_sleep()assertwaitok(); | |||
655 | ||||
656 | /* Any fence added after the wait begins will not be auto-signaled */ | |||
657 | if (i915_active_acquire_if_busy(ref)) { | |||
658 | int err; | |||
659 | ||||
660 | err = flush_lazy_signals(ref); | |||
661 | i915_active_release(ref); | |||
662 | if (err) | |||
663 | return err; | |||
664 | ||||
665 | if (___wait_var_event(ref, i915_active_is_idle(ref),({ long __ret = 0 ; if (state & 0x100) __ret = ({ int __ret = 0; if (!((i915_active_is_idle(ref)))) __ret = ({ long ret = 0; do { int __error; unsigned long deadline; ((!cold) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 666, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies + ret; __error = msleep(&var_waitq, &sch_mtx, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error == -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break; } if ((0) > 0 && (ret <= 0 || __error == 35)) { mtx_leave(&sch_mtx); ret = (((i915_active_is_idle(ref))) ) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while (ret > 0 && !((i915_active_is_idle(ref)))); ret; }); __ret; }); else do { if (!(((i915_active_is_idle(ref))))) ({ long ret = 0; do { int __error; unsigned long deadline; ((!cold) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 666, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies + ret; __error = msleep(&var_waitq, &sch_mtx, 0, "drmweti" , ret); ret = deadline - jiffies; if (__error == -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break; } if ((0) > 0 && (ret <= 0 || __error == 35)) { mtx_leave(& sch_mtx); ret = ((((i915_active_is_idle(ref))))) ? 1 : 0; break ; } mtx_leave(&sch_mtx); } while (ret > 0 && ! (((i915_active_is_idle(ref))))); ret; }); } while (0); __ret; }) | |||
666 | state, 0, 0, schedule())({ long __ret = 0 ; if (state & 0x100) __ret = ({ int __ret = 0; if (!((i915_active_is_idle(ref)))) __ret = ({ long ret = 0; do { int __error; unsigned long deadline; ((!cold) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 666, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies + ret; __error = msleep(&var_waitq, &sch_mtx, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error == -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break; } if ((0) > 0 && (ret <= 0 || __error == 35)) { mtx_leave(&sch_mtx); ret = (((i915_active_is_idle(ref))) ) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while (ret > 0 && !((i915_active_is_idle(ref)))); ret; }); __ret; }); else do { if (!(((i915_active_is_idle(ref))))) ({ long ret = 0; do { int __error; unsigned long deadline; ((!cold) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/i915_active.c" , 666, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies + ret; __error = msleep(&var_waitq, &sch_mtx, 0, "drmweti" , ret); ret = deadline - jiffies; if (__error == -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break; } if ((0) > 0 && (ret <= 0 || __error == 35)) { mtx_leave(& sch_mtx); ret = ((((i915_active_is_idle(ref))))) ? 1 : 0; break ; } mtx_leave(&sch_mtx); } while (ret > 0 && ! (((i915_active_is_idle(ref))))); ret; }); } while (0); __ret; })) | |||
667 | return -EINTR4; | |||
668 | } | |||
669 | ||||
670 | /* | |||
671 | * After the wait is complete, the caller may free the active. | |||
672 | * We have to flush any concurrent retirement before returning. | |||
673 | */ | |||
674 | flush_work(&ref->work); | |||
675 | return 0; | |||
676 | } | |||
677 | ||||
678 | static int __await_active(struct i915_active_fence *active, | |||
679 | int (*fn)(void *arg, struct dma_fence *fence), | |||
680 | void *arg) | |||
681 | { | |||
682 | struct dma_fence *fence; | |||
683 | ||||
684 | if (is_barrier(active)) /* XXX flush the barrier? */ | |||
685 | return 0; | |||
686 | ||||
687 | fence = i915_active_fence_get(active); | |||
688 | if (fence) { | |||
689 | int err; | |||
690 | ||||
691 | err = fn(arg, fence); | |||
692 | dma_fence_put(fence); | |||
693 | if (err < 0) | |||
694 | return err; | |||
695 | } | |||
696 | ||||
697 | return 0; | |||
698 | } | |||
699 | ||||
700 | struct wait_barrier { | |||
701 | struct wait_queue_entry base; | |||
702 | struct i915_active *ref; | |||
703 | }; | |||
704 | ||||
705 | static int | |||
706 | barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key) | |||
707 | { | |||
708 | struct wait_barrier *wb = container_of(wq, typeof(*wb), base)({ const __typeof( ((typeof(*wb) *)0)->base ) *__mptr = (wq ); (typeof(*wb) *)( (char *)__mptr - __builtin_offsetof(typeof (*wb), base) );}); | |||
709 | ||||
710 | if (i915_active_is_idle(wb->ref)) { | |||
711 | list_del(&wq->entry); | |||
712 | i915_sw_fence_complete(wq->private); | |||
713 | kfree(wq); | |||
714 | } | |||
715 | ||||
716 | return 0; | |||
717 | } | |||
718 | ||||
719 | static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) | |||
720 | { | |||
721 | struct wait_barrier *wb; | |||
722 | ||||
723 | wb = kmalloc(sizeof(*wb), GFP_KERNEL(0x0001 | 0x0004)); | |||
724 | if (unlikely(!wb)__builtin_expect(!!(!wb), 0)) | |||
725 | return -ENOMEM12; | |||
726 | ||||
727 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
728 | if (!i915_sw_fence_await(fence)) { | |||
729 | kfree(wb); | |||
730 | return -EINVAL22; | |||
731 | } | |||
732 | ||||
733 | wb->base.flags = 0; | |||
734 | wb->base.func = barrier_wake; | |||
735 | wb->base.private = fence; | |||
736 | wb->ref = ref; | |||
737 | ||||
738 | add_wait_queue(__var_waitqueue(ref), &wb->base); | |||
739 | return 0; | |||
740 | } | |||
741 | ||||
742 | static int await_active(struct i915_active *ref, | |||
743 | unsigned int flags, | |||
744 | int (*fn)(void *arg, struct dma_fence *fence), | |||
745 | void *arg, struct i915_sw_fence *barrier) | |||
746 | { | |||
747 | int err = 0; | |||
748 | ||||
749 | if (!i915_active_acquire_if_busy(ref)) | |||
750 | return 0; | |||
751 | ||||
752 | if (flags & I915_ACTIVE_AWAIT_EXCL(1UL << (0)) && | |||
753 | rcu_access_pointer(ref->excl.fence)(ref->excl.fence)) { | |||
754 | err = __await_active(&ref->excl, fn, arg); | |||
755 | if (err) | |||
756 | goto out; | |||
757 | } | |||
758 | ||||
759 | if (flags & I915_ACTIVE_AWAIT_ACTIVE(1UL << (1))) { | |||
760 | struct active_node *it, *n; | |||
761 | ||||
762 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)for ((it) = (__rb_deepest_left((&ref->tree)->rb_node ) ? ({ const __typeof( ((__typeof(*it) *)0)->node ) *__mptr = (__rb_deepest_left((&ref->tree)->rb_node)); (__typeof (*it) *)( (char *)__mptr - __builtin_offsetof(__typeof(*it), node ) );}) : ((void *)0)); ((it) != ((void *)0)) && ((n) = (rb_next_postorder(&it->node) ? ({ const __typeof( (( typeof(*it) *)0)->node ) *__mptr = (rb_next_postorder(& it->node)); (typeof(*it) *)( (char *)__mptr - __builtin_offsetof (typeof(*it), node) );}) : ((void *)0)), 1); (it) = (n)) { | |||
763 | err = __await_active(&it->base, fn, arg); | |||
764 | if (err) | |||
765 | goto out; | |||
766 | } | |||
767 | } | |||
768 | ||||
769 | if (flags & I915_ACTIVE_AWAIT_BARRIER(1UL << (2))) { | |||
770 | err = flush_lazy_signals(ref); | |||
771 | if (err) | |||
772 | goto out; | |||
773 | ||||
774 | err = __await_barrier(ref, barrier); | |||
775 | if (err) | |||
776 | goto out; | |||
777 | } | |||
778 | ||||
779 | out: | |||
780 | i915_active_release(ref); | |||
781 | return err; | |||
782 | } | |||
783 | ||||
784 | static int rq_await_fence(void *arg, struct dma_fence *fence) | |||
785 | { | |||
786 | return i915_request_await_dma_fence(arg, fence); | |||
787 | } | |||
788 | ||||
789 | int i915_request_await_active(struct i915_request *rq, | |||
790 | struct i915_active *ref, | |||
791 | unsigned int flags) | |||
792 | { | |||
793 | return await_active(ref, flags, rq_await_fence, rq, &rq->submit); | |||
794 | } | |||
795 | ||||
796 | static int sw_await_fence(void *arg, struct dma_fence *fence) | |||
797 | { | |||
798 | return i915_sw_fence_await_dma_fence(arg, fence, 0, | |||
799 | GFP_NOWAIT0x0002 | __GFP_NOWARN0); | |||
800 | } | |||
801 | ||||
802 | int i915_sw_fence_await_active(struct i915_sw_fence *fence, | |||
803 | struct i915_active *ref, | |||
804 | unsigned int flags) | |||
805 | { | |||
806 | return await_active(ref, flags, sw_await_fence, fence, fence); | |||
807 | } | |||
808 | ||||
809 | void i915_active_fini(struct i915_active *ref) | |||
810 | { | |||
811 | debug_active_fini(ref); | |||
812 | GEM_BUG_ON(atomic_read(&ref->count))((void)0); | |||
813 | GEM_BUG_ON(work_pending(&ref->work))((void)0); | |||
814 | mutex_destroy(&ref->mutex); | |||
815 | ||||
816 | if (ref->cache) | |||
817 | #ifdef __linux__ | |||
818 | kmem_cache_free(global.slab_cache, ref->cache); | |||
819 | #else | |||
820 | pool_put(&global.slab_cache, ref->cache); | |||
821 | #endif | |||
822 | } | |||
823 | ||||
824 | static inline bool_Bool is_idle_barrier(struct active_node *node, u64 idx) | |||
825 | { | |||
826 | return node->timeline == idx && !i915_active_fence_isset(&node->base); | |||
827 | } | |||
828 | ||||
829 | static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) | |||
830 | { | |||
831 | struct rb_node *prev, *p; | |||
832 | ||||
833 | if (RB_EMPTY_ROOT(&ref->tree)((&ref->tree)->rb_node == ((void *)0))) | |||
834 | return NULL((void *)0); | |||
835 | ||||
836 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
837 | ||||
838 | /* | |||
839 | * Try to reuse any existing barrier nodes already allocated for this | |||
840 | * i915_active, due to overlapping active phases there is likely a | |||
841 | * node kept alive (as we reuse before parking). We prefer to reuse | |||
842 | * completely idle barriers (less hassle in manipulating the llists), | |||
843 | * but otherwise any will do. | |||
844 | */ | |||
845 | if (ref->cache && is_idle_barrier(ref->cache, idx)) { | |||
846 | p = &ref->cache->node; | |||
847 | goto match; | |||
848 | } | |||
849 | ||||
850 | prev = NULL((void *)0); | |||
851 | p = ref->tree.rb_node; | |||
852 | while (p) { | |||
853 | struct active_node *node = | |||
854 | rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
855 | ||||
856 | if (is_idle_barrier(node, idx)) | |||
857 | goto match; | |||
858 | ||||
859 | prev = p; | |||
860 | if (node->timeline < idx) | |||
861 | p = READ_ONCE(p->rb_right)({ typeof(p->__entry.rbe_right) __tmp = *(volatile typeof( p->__entry.rbe_right) *)&(p->__entry.rbe_right); membar_datadep_consumer (); __tmp; }); | |||
862 | else | |||
863 | p = READ_ONCE(p->rb_left)({ typeof(p->__entry.rbe_left) __tmp = *(volatile typeof(p ->__entry.rbe_left) *)&(p->__entry.rbe_left); membar_datadep_consumer (); __tmp; }); | |||
864 | } | |||
865 | ||||
866 | /* | |||
867 | * No quick match, but we did find the leftmost rb_node for the | |||
868 | * kernel_context. Walk the rb_tree in-order to see if there were | |||
869 | * any idle-barriers on this timeline that we missed, or just use | |||
870 | * the first pending barrier. | |||
871 | */ | |||
872 | for (p = prev; p; p = rb_next(p)linux_root_RB_NEXT((p))) { | |||
873 | struct active_node *node = | |||
874 | rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
875 | struct intel_engine_cs *engine; | |||
876 | ||||
877 | if (node->timeline > idx) | |||
878 | break; | |||
879 | ||||
880 | if (node->timeline < idx) | |||
881 | continue; | |||
882 | ||||
883 | if (is_idle_barrier(node, idx)) | |||
884 | goto match; | |||
885 | ||||
886 | /* | |||
887 | * The list of pending barriers is protected by the | |||
888 | * kernel_context timeline, which notably we do not hold | |||
889 | * here. i915_request_add_active_barriers() may consume | |||
890 | * the barrier before we claim it, so we have to check | |||
891 | * for success. | |||
892 | */ | |||
893 | engine = __barrier_to_engine(node); | |||
894 | smp_rmb()do { __asm volatile("" ::: "memory"); } while (0); /* serialise with add_active_barriers */ | |||
895 | if (is_barrier(&node->base) && | |||
896 | ____active_del_barrier(ref, node, engine)) | |||
897 | goto match; | |||
898 | } | |||
899 | ||||
900 | return NULL((void *)0); | |||
901 | ||||
902 | match: | |||
903 | spin_lock_irq(&ref->tree_lock)mtx_enter(&ref->tree_lock); | |||
904 | rb_erase(p, &ref->tree)linux_root_RB_REMOVE((struct linux_root *)(&ref->tree) , (p)); /* Hide from waits and sibling allocations */ | |||
905 | if (p == &ref->cache->node) | |||
906 | WRITE_ONCE(ref->cache, NULL)({ typeof(ref->cache) __tmp = (((void *)0)); *(volatile typeof (ref->cache) *)&(ref->cache) = __tmp; __tmp; }); | |||
907 | spin_unlock_irq(&ref->tree_lock)mtx_leave(&ref->tree_lock); | |||
908 | ||||
909 | return rb_entry(p, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (p); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
910 | } | |||
911 | ||||
912 | int i915_active_acquire_preallocate_barrier(struct i915_active *ref, | |||
913 | struct intel_engine_cs *engine) | |||
914 | { | |||
915 | intel_engine_mask_t tmp, mask = engine->mask; | |||
916 | struct llist_node *first = NULL((void *)0), *last = NULL((void *)0); | |||
| ||||
917 | struct intel_gt *gt = engine->gt; | |||
918 | ||||
919 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
920 | ||||
921 | /* Wait until the previous preallocation is completed */ | |||
922 | while (!llist_empty(&ref->preallocated_barriers)) | |||
923 | cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield (); } while (0); | |||
924 | ||||
925 | /* | |||
926 | * Preallocate a node for each physical engine supporting the target | |||
927 | * engine (remember virtual engines have more than one sibling). | |||
928 | * We can then use the preallocated nodes in | |||
929 | * i915_active_acquire_barrier() | |||
930 | */ | |||
931 | GEM_BUG_ON(!mask)((void)0); | |||
932 | for_each_engine_masked(engine, gt, mask, tmp)for ((tmp) = (mask) & (gt)->info.engine_mask; (tmp) ? ( (engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;) { | |||
933 | u64 idx = engine->kernel_context->timeline->fence_context; | |||
934 | struct llist_node *prev = first; | |||
935 | struct active_node *node; | |||
936 | ||||
937 | rcu_read_lock(); | |||
938 | node = reuse_idle_barrier(ref, idx); | |||
939 | rcu_read_unlock(); | |||
940 | if (!node) { | |||
941 | #ifdef __linux__ | |||
942 | node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL(0x0001 | 0x0004)); | |||
943 | #else | |||
944 | node = pool_get(&global.slab_cache, PR_WAITOK0x0001); | |||
945 | #endif | |||
946 | if (!node) | |||
947 | goto unwind; | |||
948 | ||||
949 | RCU_INIT_POINTER(node->base.fence, NULL)do { (node->base.fence) = (((void *)0)); } while(0); | |||
950 | node->base.cb.func = node_retire; | |||
951 | node->timeline = idx; | |||
952 | node->ref = ref; | |||
953 | } | |||
954 | ||||
955 | if (!i915_active_fence_isset(&node->base)) { | |||
956 | /* | |||
957 | * Mark this as being *our* unconnected proto-node. | |||
958 | * | |||
959 | * Since this node is not in any list, and we have | |||
960 | * decoupled it from the rbtree, we can reuse the | |||
961 | * request to indicate this is an idle-barrier node | |||
962 | * and then we can use the rb_node and list pointers | |||
963 | * for our tracking of the pending barrier. | |||
964 | */ | |||
965 | RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN))do { (node->base.fence) = (ERR_PTR(-35)); } while(0); | |||
966 | node->base.cb.node.prev = (void *)engine; | |||
967 | __i915_active_acquire(ref); | |||
968 | } | |||
969 | GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN))((void)0); | |||
970 | ||||
971 | GEM_BUG_ON(barrier_to_engine(node) != engine)((void)0); | |||
972 | first = barrier_to_ll(node); | |||
973 | first->next = prev; | |||
974 | if (!last) | |||
975 | last = first; | |||
976 | intel_engine_pm_get(engine); | |||
977 | } | |||
978 | ||||
979 | GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers))((void)0); | |||
980 | llist_add_batch(first, last, &ref->preallocated_barriers); | |||
981 | ||||
982 | return 0; | |||
983 | ||||
984 | unwind: | |||
985 | while (first) { | |||
986 | struct active_node *node = barrier_from_ll(first); | |||
987 | ||||
988 | first = first->next; | |||
989 | ||||
990 | atomic_dec(&ref->count)__sync_fetch_and_sub(&ref->count, 1); | |||
991 | intel_engine_pm_put(barrier_to_engine(node)); | |||
992 | ||||
993 | #ifdef __linux__ | |||
994 | kmem_cache_free(global.slab_cache, node); | |||
995 | #else | |||
996 | pool_put(&global.slab_cache, node); | |||
997 | #endif | |||
998 | } | |||
999 | return -ENOMEM12; | |||
1000 | } | |||
1001 | ||||
1002 | void i915_active_acquire_barrier(struct i915_active *ref) | |||
1003 | { | |||
1004 | struct llist_node *pos, *next; | |||
1005 | unsigned long flags; | |||
1006 | ||||
1007 | GEM_BUG_ON(i915_active_is_idle(ref))((void)0); | |||
1008 | ||||
1009 | /* | |||
1010 | * Transfer the list of preallocated barriers into the | |||
1011 | * i915_active rbtree, but only as proto-nodes. They will be | |||
1012 | * populated by i915_request_add_active_barriers() to point to the | |||
1013 | * request that will eventually release them. | |||
1014 | */ | |||
1015 | llist_for_each_safe(pos, next, take_preallocated_barriers(ref))for ((pos) = (llist_del_all(&(ref)->preallocated_barriers )); (pos) != ((void *)0) && ((next) = (pos)->next, pos); (pos) = (next)) { | |||
1016 | struct active_node *node = barrier_from_ll(pos); | |||
1017 | struct intel_engine_cs *engine = barrier_to_engine(node); | |||
1018 | struct rb_node **p, *parent; | |||
1019 | ||||
1020 | spin_lock_irqsave_nested(&ref->tree_lock, flags,do { (void)(0); flags = 0; mtx_enter(&ref->tree_lock); } while (0) | |||
1021 | SINGLE_DEPTH_NESTING)do { (void)(0); flags = 0; mtx_enter(&ref->tree_lock); } while (0); | |||
1022 | parent = NULL((void *)0); | |||
1023 | p = &ref->tree.rb_node; | |||
1024 | while (*p) { | |||
1025 | struct active_node *it; | |||
1026 | ||||
1027 | parent = *p; | |||
1028 | ||||
1029 | it = rb_entry(parent, struct active_node, node)({ const __typeof( ((struct active_node *)0)->node ) *__mptr = (parent); (struct active_node *)( (char *)__mptr - __builtin_offsetof (struct active_node, node) );}); | |||
1030 | if (it->timeline < node->timeline) | |||
1031 | p = &parent->rb_right__entry.rbe_right; | |||
1032 | else | |||
1033 | p = &parent->rb_left__entry.rbe_left; | |||
1034 | } | |||
1035 | rb_link_node(&node->node, parent, p); | |||
1036 | rb_insert_color(&node->node, &ref->tree)linux_root_RB_INSERT_COLOR((struct linux_root *)(&ref-> tree), (&node->node)); | |||
1037 | spin_unlock_irqrestore(&ref->tree_lock, flags)do { (void)(flags); mtx_leave(&ref->tree_lock); } while (0); | |||
1038 | ||||
1039 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine))((void)0); | |||
1040 | llist_add(barrier_to_ll(node), &engine->barrier_tasks); | |||
1041 | intel_engine_pm_put_delay(engine, 1); | |||
1042 | } | |||
1043 | } | |||
1044 | ||||
1045 | static struct dma_fence **ll_to_fence_slot(struct llist_node *node) | |||
1046 | { | |||
1047 | return __active_fence_slot(&barrier_from_ll(node)->base); | |||
1048 | } | |||
1049 | ||||
1050 | void i915_request_add_active_barriers(struct i915_request *rq) | |||
1051 | { | |||
1052 | struct intel_engine_cs *engine = rq->engine; | |||
1053 | struct llist_node *node, *next; | |||
1054 | unsigned long flags; | |||
1055 | ||||
1056 | GEM_BUG_ON(!intel_context_is_barrier(rq->context))((void)0); | |||
1057 | GEM_BUG_ON(intel_engine_is_virtual(engine))((void)0); | |||
1058 | GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline)((void)0); | |||
1059 | ||||
1060 | node = llist_del_all(&engine->barrier_tasks); | |||
1061 | if (!node) | |||
1062 | return; | |||
1063 | /* | |||
1064 | * Attach the list of proto-fences to the in-flight request such | |||
1065 | * that the parent i915_active will be released when this request | |||
1066 | * is retired. | |||
1067 | */ | |||
1068 | spin_lock_irqsave(&rq->lock, flags)do { flags = 0; mtx_enter(&rq->lock); } while (0); | |||
1069 | llist_for_each_safe(node, next, node)for ((node) = (node); (node) != ((void *)0) && ((next ) = (node)->next, node); (node) = (next)) { | |||
1070 | /* serialise with reuse_idle_barrier */ | |||
1071 | smp_store_mb(*ll_to_fence_slot(node), &rq->fence)do { *ll_to_fence_slot(node) = &rq->fence; do { __asm volatile ("mfence" ::: "memory"); } while (0); } while (0); | |||
1072 | list_add_tail((struct list_head *)node, &rq->fence.cb_list); | |||
1073 | } | |||
1074 | spin_unlock_irqrestore(&rq->lock, flags)do { (void)(flags); mtx_leave(&rq->lock); } while (0); | |||
1075 | } | |||
1076 | ||||
1077 | /* | |||
1078 | * __i915_active_fence_set: Update the last active fence along its timeline | |||
1079 | * @active: the active tracker | |||
1080 | * @fence: the new fence (under construction) | |||
1081 | * | |||
1082 | * Records the new @fence as the last active fence along its timeline in | |||
1083 | * this active tracker, moving the tracking callbacks from the previous | |||
1084 | * fence onto this one. Returns the previous fence (if not already completed), | |||
1085 | * which the caller must ensure is executed before the new fence. To ensure | |||
1086 | * that the order of fences within the timeline of the i915_active_fence is | |||
1087 | * understood, it should be locked by the caller. | |||
1088 | */ | |||
1089 | struct dma_fence * | |||
1090 | __i915_active_fence_set(struct i915_active_fence *active, | |||
1091 | struct dma_fence *fence) | |||
1092 | { | |||
1093 | struct dma_fence *prev; | |||
1094 | unsigned long flags; | |||
1095 | ||||
1096 | if (fence == rcu_access_pointer(active->fence)(active->fence)) | |||
1097 | return fence; | |||
1098 | ||||
1099 | GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))((void)0); | |||
1100 | ||||
1101 | /* | |||
1102 | * Consider that we have two threads arriving (A and B), with | |||
1103 | * C already resident as the active->fence. | |||
1104 | * | |||
1105 | * A does the xchg first, and so it sees C or NULL depending | |||
1106 | * on the timing of the interrupt handler. If it is NULL, the | |||
1107 | * previous fence must have been signaled and we know that | |||
1108 | * we are first on the timeline. If it is still present, | |||
1109 | * we acquire the lock on that fence and serialise with the interrupt | |||
1110 | * handler, in the process removing it from any future interrupt | |||
1111 | * callback. A will then wait on C before executing (if present). | |||
1112 | * | |||
1113 | * As B is second, it sees A as the previous fence and so waits for | |||
1114 | * it to complete its transition and takes over the occupancy for | |||
1115 | * itself -- remembering that it needs to wait on A before executing. | |||
1116 | * | |||
1117 | * Note the strong ordering of the timeline also provides consistent | |||
1118 | * nesting rules for the fence->lock; the inner lock is always the | |||
1119 | * older lock. | |||
1120 | */ | |||
1121 | spin_lock_irqsave(fence->lock, flags)do { flags = 0; mtx_enter(fence->lock); } while (0); | |||
1122 | prev = xchg(__active_fence_slot(active), fence)__sync_lock_test_and_set(__active_fence_slot(active), fence); | |||
1123 | if (prev) { | |||
1124 | GEM_BUG_ON(prev == fence)((void)0); | |||
1125 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING)mtx_enter(prev->lock); | |||
1126 | __list_del_entry(&active->cb.node)list_del(&active->cb.node); | |||
1127 | spin_unlock(prev->lock)mtx_leave(prev->lock); /* serialise with prev->cb_list */ | |||
1128 | } | |||
1129 | list_add_tail(&active->cb.node, &fence->cb_list); | |||
1130 | spin_unlock_irqrestore(fence->lock, flags)do { (void)(flags); mtx_leave(fence->lock); } while (0); | |||
1131 | ||||
1132 | return prev; | |||
1133 | } | |||
1134 | ||||
1135 | int i915_active_fence_set(struct i915_active_fence *active, | |||
1136 | struct i915_request *rq) | |||
1137 | { | |||
1138 | struct dma_fence *fence; | |||
1139 | int err = 0; | |||
1140 | ||||
1141 | /* Must maintain timeline ordering wrt previous active requests */ | |||
1142 | rcu_read_lock(); | |||
1143 | fence = __i915_active_fence_set(active, &rq->fence); | |||
1144 | if (fence) /* but the previous fence may not belong to that timeline! */ | |||
1145 | fence = dma_fence_get_rcu(fence); | |||
1146 | rcu_read_unlock(); | |||
1147 | if (fence) { | |||
1148 | err = i915_request_await_dma_fence(rq, fence); | |||
1149 | dma_fence_put(fence); | |||
1150 | } | |||
1151 | ||||
1152 | return err; | |||
1153 | } | |||
1154 | ||||
1155 | void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) | |||
1156 | { | |||
1157 | active_fence_cb(fence, cb); | |||
1158 | } | |||
1159 | ||||
1160 | struct auto_active { | |||
1161 | struct i915_active base; | |||
1162 | struct kref ref; | |||
1163 | }; | |||
1164 | ||||
1165 | struct i915_active *i915_active_get(struct i915_active *ref) | |||
1166 | { | |||
1167 | struct auto_active *aa = container_of(ref, typeof(*aa), base)({ const __typeof( ((typeof(*aa) *)0)->base ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), base) );}); | |||
1168 | ||||
1169 | kref_get(&aa->ref); | |||
1170 | return &aa->base; | |||
1171 | } | |||
1172 | ||||
1173 | static void auto_release(struct kref *ref) | |||
1174 | { | |||
1175 | struct auto_active *aa = container_of(ref, typeof(*aa), ref)({ const __typeof( ((typeof(*aa) *)0)->ref ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), ref) );}); | |||
1176 | ||||
1177 | i915_active_fini(&aa->base); | |||
1178 | kfree(aa); | |||
1179 | } | |||
1180 | ||||
1181 | void i915_active_put(struct i915_active *ref) | |||
1182 | { | |||
1183 | struct auto_active *aa = container_of(ref, typeof(*aa), base)({ const __typeof( ((typeof(*aa) *)0)->base ) *__mptr = (ref ); (typeof(*aa) *)( (char *)__mptr - __builtin_offsetof(typeof (*aa), base) );}); | |||
1184 | ||||
1185 | kref_put(&aa->ref, auto_release); | |||
1186 | } | |||
1187 | ||||
1188 | static int auto_active(struct i915_active *ref) | |||
1189 | { | |||
1190 | i915_active_get(ref); | |||
1191 | return 0; | |||
1192 | } | |||
1193 | ||||
1194 | __i915_active_call__attribute__((__aligned__(4))) static void | |||
1195 | auto_retire(struct i915_active *ref) | |||
1196 | { | |||
1197 | i915_active_put(ref); | |||
1198 | } | |||
1199 | ||||
1200 | struct i915_active *i915_active_create(void) | |||
1201 | { | |||
1202 | struct auto_active *aa; | |||
1203 | ||||
1204 | aa = kmalloc(sizeof(*aa), GFP_KERNEL(0x0001 | 0x0004)); | |||
1205 | if (!aa) | |||
1206 | return NULL((void *)0); | |||
1207 | ||||
1208 | kref_init(&aa->ref); | |||
1209 | i915_active_init(&aa->base, auto_active, auto_retire)do { static struct lock_class_key __mkey; static struct lock_class_key __wkey; __i915_active_init(&aa->base, auto_active, auto_retire , &__mkey, &__wkey); } while (0); | |||
1210 | ||||
1211 | return &aa->base; | |||
1212 | } | |||
1213 | ||||
1214 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 | |||
1215 | #include "selftests/i915_active.c" | |||
1216 | #endif | |||
1217 | ||||
1218 | static void i915_global_active_shrink(void) | |||
1219 | { | |||
1220 | #ifdef notyet | |||
1221 | kmem_cache_shrink(global.slab_cache); | |||
1222 | #endif | |||
1223 | } | |||
1224 | ||||
1225 | static void i915_global_active_exit(void) | |||
1226 | { | |||
1227 | #ifdef __linux__ | |||
1228 | kmem_cache_destroy(global.slab_cache); | |||
1229 | #else | |||
1230 | pool_destroy(&global.slab_cache); | |||
1231 | #endif | |||
1232 | } | |||
1233 | ||||
1234 | static struct i915_global_active global = { { | |||
1235 | .shrink = i915_global_active_shrink, | |||
1236 | .exit = i915_global_active_exit, | |||
1237 | } }; | |||
1238 | ||||
1239 | int __init i915_global_active_init(void) | |||
1240 | { | |||
1241 | #ifdef __linux__ | |||
1242 | global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); | |||
1243 | if (!global.slab_cache) | |||
1244 | return -ENOMEM12; | |||
1245 | #else | |||
1246 | pool_init(&global.slab_cache, sizeof(struct active_node), | |||
1247 | CACHELINESIZE64, IPL_TTY0x9, 0, "drmsc", NULL((void *)0)); | |||
1248 | #endif | |||
1249 | ||||
1250 | i915_global_register(&global.base); | |||
1251 | return 0; | |||
1252 | } |
1 | /* Public domain. */ | |||
2 | ||||
3 | #ifndef _LINUX_LLIST_H | |||
4 | #define _LINUX_LLIST_H | |||
5 | ||||
6 | #include <sys/atomic.h> | |||
7 | ||||
8 | struct llist_node { | |||
9 | struct llist_node *next; | |||
10 | }; | |||
11 | ||||
12 | struct llist_head { | |||
13 | struct llist_node *first; | |||
14 | }; | |||
15 | ||||
16 | #define llist_entry(ptr, type, member)((ptr) ? ({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) : ((void *)0)) \ | |||
17 | ((ptr) ? container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) : NULL((void *)0)) | |||
18 | ||||
19 | static inline struct llist_node * | |||
20 | llist_del_all(struct llist_head *head) | |||
21 | { | |||
22 | return atomic_swap_ptr(&head->first, NULL)_atomic_swap_ptr((&head->first), (((void *)0))); | |||
23 | } | |||
24 | ||||
25 | static inline struct llist_node * | |||
26 | llist_del_first(struct llist_head *head) | |||
27 | { | |||
28 | struct llist_node *first, *next; | |||
29 | ||||
30 | do { | |||
31 | first = head->first; | |||
32 | if (first == NULL((void *)0)) | |||
33 | return NULL((void *)0); | |||
34 | next = first->next; | |||
35 | } while (atomic_cas_ptr(&head->first, first, next)_atomic_cas_ptr((&head->first), (first), (next)) != first); | |||
36 | ||||
37 | return first; | |||
38 | } | |||
39 | ||||
40 | static inline bool_Bool | |||
41 | llist_add(struct llist_node *new, struct llist_head *head) | |||
42 | { | |||
43 | struct llist_node *first; | |||
44 | ||||
45 | do { | |||
46 | new->next = first = head->first; | |||
47 | } while (atomic_cas_ptr(&head->first, first, new)_atomic_cas_ptr((&head->first), (first), (new)) != first); | |||
48 | ||||
49 | return (first == NULL((void *)0)); | |||
50 | } | |||
51 | ||||
52 | static inline bool_Bool | |||
53 | llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | |||
54 | struct llist_head *head) | |||
55 | { | |||
56 | struct llist_node *first; | |||
57 | ||||
58 | do { | |||
59 | new_last->next = first = head->first; | |||
| ||||
60 | } while (atomic_cas_ptr(&head->first, first, new_first)_atomic_cas_ptr((&head->first), (first), (new_first)) != first); | |||
61 | ||||
62 | return (first == NULL((void *)0)); | |||
63 | } | |||
64 | ||||
65 | static inline void | |||
66 | init_llist_head(struct llist_head *head) | |||
67 | { | |||
68 | head->first = NULL((void *)0); | |||
69 | } | |||
70 | ||||
71 | static inline bool_Bool | |||
72 | llist_empty(struct llist_head *head) | |||
73 | { | |||
74 | return (head->first == NULL((void *)0)); | |||
75 | } | |||
76 | ||||
77 | #define llist_for_each_safe(pos, n, node)for ((pos) = (node); (pos) != ((void *)0) && ((n) = ( pos)->next, pos); (pos) = (n)) \ | |||
78 | for ((pos) = (node); \ | |||
79 | (pos) != NULL((void *)0) && \ | |||
80 | ((n) = (pos)->next, pos); \ | |||
81 | (pos) = (n)) | |||
82 | ||||
83 | #define llist_for_each_entry_safe(pos, n, node, member)for (pos = (((node)) ? ({ const __typeof( ((__typeof(*pos) *) 0)->member ) *__mptr = ((node)); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}) : ((void *)0)); pos != ((void *)0) && (n = ((pos->member .next) ? ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char * )__mptr - __builtin_offsetof(__typeof(*pos), member) );}) : ( (void *)0)), pos); pos = n) \ | |||
84 | for (pos = llist_entry((node), __typeof(*pos), member)(((node)) ? ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((node)); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}) : ((void *)0)); \ | |||
85 | pos != NULL((void *)0) && \ | |||
86 | (n = llist_entry(pos->member.next, __typeof(*pos), member)((pos->member.next) ? ({ const __typeof( ((__typeof(*pos) * )0)->member ) *__mptr = (pos->member.next); (__typeof(* pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );}) : ((void *)0)), pos); \ | |||
87 | pos = n) | |||
88 | ||||
89 | #define llist_for_each_entry(pos, node, member)for ((pos) = (((node)) ? ({ const __typeof( ((__typeof(*(pos) ) *)0)->member ) *__mptr = ((node)); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member ) );}) : ((void *)0)); (pos) != ((void *)0); (pos) = (((pos)-> member.next) ? ({ const __typeof( ((__typeof(*(pos)) *)0)-> member ) *__mptr = ((pos)->member.next); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member ) );}) : ((void *)0))) \ | |||
90 | for ((pos) = llist_entry((node), __typeof(*(pos)), member)(((node)) ? ({ const __typeof( ((__typeof(*(pos)) *)0)->member ) *__mptr = ((node)); (__typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos)), member) );}) : ((void * )0)); \ | |||
91 | (pos) != NULL((void *)0); \ | |||
92 | (pos) = llist_entry((pos)->member.next, __typeof(*(pos)), member)(((pos)->member.next) ? ({ const __typeof( ((__typeof(*(pos )) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof (*(pos)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(pos )), member) );}) : ((void *)0))) | |||
93 | ||||
94 | #endif |