File: | dev/pci/drm/i915/i915_scheduler.c |
Warning: | line 83, column 4 Value stored to 'first' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2018 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/mutex.h> |
8 | |
9 | #include "i915_drv.h" |
10 | #include "i915_request.h" |
11 | #include "i915_scheduler.h" |
12 | |
13 | static struct pool slab_dependencies; |
14 | static struct pool slab_priorities; |
15 | |
16 | static DEFINE_SPINLOCK(schedule_lock)struct mutex schedule_lock = { ((void *)0), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9))), 0x0 }; |
17 | |
18 | static const struct i915_request * |
19 | node_to_request(const struct i915_sched_node *node) |
20 | { |
21 | return container_of(node, const struct i915_request, sched)({ const __typeof( ((const struct i915_request *)0)->sched ) *__mptr = (node); (const struct i915_request *)( (char *)__mptr - __builtin_offsetof(const struct i915_request, sched) );}); |
22 | } |
23 | |
24 | static inline bool_Bool node_started(const struct i915_sched_node *node) |
25 | { |
26 | return i915_request_started(node_to_request(node)); |
27 | } |
28 | |
29 | static inline bool_Bool node_signaled(const struct i915_sched_node *node) |
30 | { |
31 | return i915_request_completed(node_to_request(node)); |
32 | } |
33 | |
34 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) |
35 | { |
36 | return rb_entry(rb, struct i915_priolist, node)({ const __typeof( ((struct i915_priolist *)0)->node ) *__mptr = (rb); (struct i915_priolist *)( (char *)__mptr - __builtin_offsetof (struct i915_priolist, node) );}); |
37 | } |
38 | |
39 | static void assert_priolists(struct i915_sched_engine * const sched_engine) |
40 | { |
41 | struct rb_node *rb; |
42 | long last_prio; |
43 | |
44 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0) |
45 | return; |
46 | |
47 | GEM_BUG_ON(rb_first_cached(&sched_engine->queue) !=((void)0) |
48 | rb_first(&sched_engine->queue.rb_root))((void)0); |
49 | |
50 | last_prio = INT_MAX0x7fffffff; |
51 | for (rb = rb_first_cached(&sched_engine->queue)linux_root_RB_MINMAX((struct linux_root *)(&(&sched_engine ->queue)->rb_root), -1); rb; rb = rb_next(rb)linux_root_RB_NEXT((rb))) { |
52 | const struct i915_priolist *p = to_priolist(rb); |
53 | |
54 | GEM_BUG_ON(p->priority > last_prio)((void)0); |
55 | last_prio = p->priority; |
56 | } |
57 | } |
58 | |
59 | struct list_head * |
60 | i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio) |
61 | { |
62 | struct i915_priolist *p; |
63 | struct rb_node **parent, *rb; |
64 | bool_Bool first = true1; |
65 | |
66 | lockdep_assert_held(&sched_engine->lock)do { (void)(&sched_engine->lock); } while(0); |
67 | assert_priolists(sched_engine); |
68 | |
69 | if (unlikely(sched_engine->no_priolist)__builtin_expect(!!(sched_engine->no_priolist), 0)) |
70 | prio = I915_PRIORITY_NORMAL; |
71 | |
72 | find_priolist: |
73 | /* most positive priority is scheduled first, equal priorities fifo */ |
74 | rb = NULL((void *)0); |
75 | parent = &sched_engine->queue.rb_root.rb_node; |
76 | while (*parent) { |
77 | rb = *parent; |
78 | p = to_priolist(rb); |
79 | if (prio > p->priority) { |
80 | parent = &rb->rb_left__entry.rbe_left; |
81 | } else if (prio < p->priority) { |
82 | parent = &rb->rb_right__entry.rbe_right; |
83 | first = false0; |
Value stored to 'first' is never read | |
84 | } else { |
85 | return &p->requests; |
86 | } |
87 | } |
88 | |
89 | if (prio == I915_PRIORITY_NORMAL) { |
90 | p = &sched_engine->default_priolist; |
91 | } else { |
92 | #ifdef __linux__ |
93 | p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC0x0002); |
94 | #else |
95 | p = pool_get(&slab_priorities, PR_NOWAIT0x0002); |
96 | #endif |
97 | /* Convert an allocation failure to a priority bump */ |
98 | if (unlikely(!p)__builtin_expect(!!(!p), 0)) { |
99 | prio = I915_PRIORITY_NORMAL; /* recurses just once */ |
100 | |
101 | /* To maintain ordering with all rendering, after an |
102 | * allocation failure we have to disable all scheduling. |
103 | * Requests will then be executed in fifo, and schedule |
104 | * will ensure that dependencies are emitted in fifo. |
105 | * There will be still some reordering with existing |
106 | * requests, so if userspace lied about their |
107 | * dependencies that reordering may be visible. |
108 | */ |
109 | sched_engine->no_priolist = true1; |
110 | goto find_priolist; |
111 | } |
112 | } |
113 | |
114 | p->priority = prio; |
115 | INIT_LIST_HEAD(&p->requests); |
116 | |
117 | rb_link_node(&p->node, rb, parent); |
118 | rb_insert_color_cached(&p->node, &sched_engine->queue, first)linux_root_RB_INSERT_COLOR((struct linux_root *)(&(&sched_engine ->queue)->rb_root), (&p->node)); |
119 | |
120 | return &p->requests; |
121 | } |
122 | |
123 | void __i915_priolist_free(struct i915_priolist *p) |
124 | { |
125 | #ifdef __linux__ |
126 | kmem_cache_free(slab_priorities, p); |
127 | #else |
128 | pool_put(&slab_priorities, p); |
129 | #endif |
130 | } |
131 | |
132 | struct sched_cache { |
133 | struct list_head *priolist; |
134 | }; |
135 | |
136 | static struct i915_sched_engine * |
137 | lock_sched_engine(struct i915_sched_node *node, |
138 | struct i915_sched_engine *locked, |
139 | struct sched_cache *cache) |
140 | { |
141 | const struct i915_request *rq = node_to_request(node); |
142 | struct i915_sched_engine *sched_engine; |
143 | |
144 | GEM_BUG_ON(!locked)((void)0); |
145 | |
146 | /* |
147 | * Virtual engines complicate acquiring the engine timeline lock, |
148 | * as their rq->engine pointer is not stable until under that |
149 | * engine lock. The simple ploy we use is to take the lock then |
150 | * check that the rq still belongs to the newly locked engine. |
151 | */ |
152 | while (locked != (sched_engine = READ_ONCE(rq->engine)({ typeof(rq->engine) __tmp = *(volatile typeof(rq->engine ) *)&(rq->engine); membar_datadep_consumer(); __tmp; } )->sched_engine)) { |
153 | spin_unlock(&locked->lock)mtx_leave(&locked->lock); |
154 | memset(cache, 0, sizeof(*cache))__builtin_memset((cache), (0), (sizeof(*cache))); |
155 | spin_lock(&sched_engine->lock)mtx_enter(&sched_engine->lock); |
156 | locked = sched_engine; |
157 | } |
158 | |
159 | GEM_BUG_ON(locked != sched_engine)((void)0); |
160 | return locked; |
161 | } |
162 | |
163 | static void __i915_schedule(struct i915_sched_node *node, |
164 | const struct i915_sched_attr *attr) |
165 | { |
166 | const int prio = max(attr->priority, node->attr.priority)(((attr->priority)>(node->attr.priority))?(attr-> priority):(node->attr.priority)); |
167 | struct i915_sched_engine *sched_engine; |
168 | struct i915_dependency *dep, *p; |
169 | struct i915_dependency stack; |
170 | struct sched_cache cache; |
171 | DRM_LIST_HEAD(dfs)struct list_head dfs = { &(dfs), &(dfs) }; |
172 | |
173 | /* Needed in order to use the temporary link inside i915_dependency */ |
174 | lockdep_assert_held(&schedule_lock)do { (void)(&schedule_lock); } while(0); |
175 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID)((void)0); |
176 | |
177 | if (node_signaled(node)) |
178 | return; |
179 | |
180 | stack.signaler = node; |
181 | list_add(&stack.dfs_link, &dfs); |
182 | |
183 | /* |
184 | * Recursively bump all dependent priorities to match the new request. |
185 | * |
186 | * A naive approach would be to use recursion: |
187 | * static void update_priorities(struct i915_sched_node *node, prio) { |
188 | * list_for_each_entry(dep, &node->signalers_list, signal_link) |
189 | * update_priorities(dep->signal, prio) |
190 | * queue_request(node); |
191 | * } |
192 | * but that may have unlimited recursion depth and so runs a very |
193 | * real risk of overunning the kernel stack. Instead, we build |
194 | * a flat list of all dependencies starting with the current request. |
195 | * As we walk the list of dependencies, we add all of its dependencies |
196 | * to the end of the list (this may include an already visited |
197 | * request) and continue to walk onwards onto the new dependencies. The |
198 | * end result is a topological list of requests in reverse order, the |
199 | * last element in the list is the request we must execute first. |
200 | */ |
201 | list_for_each_entry(dep, &dfs, dfs_link)for (dep = ({ const __typeof( ((__typeof(*dep) *)0)->dfs_link ) *__mptr = ((&dfs)->next); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep), dfs_link) );}) ; &dep->dfs_link != (&dfs); dep = ({ const __typeof ( ((__typeof(*dep) *)0)->dfs_link ) *__mptr = (dep->dfs_link .next); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof (__typeof(*dep), dfs_link) );})) { |
202 | struct i915_sched_node *node = dep->signaler; |
203 | |
204 | /* If we are already flying, we know we have no signalers */ |
205 | if (node_started(node)) |
206 | continue; |
207 | |
208 | /* |
209 | * Within an engine, there can be no cycle, but we may |
210 | * refer to the same dependency chain multiple times |
211 | * (redundant dependencies are not eliminated) and across |
212 | * engines. |
213 | */ |
214 | list_for_each_entry(p, &node->signalers_list, signal_link)for (p = ({ const __typeof( ((__typeof(*p) *)0)->signal_link ) *__mptr = ((&node->signalers_list)->next); (__typeof (*p) *)( (char *)__mptr - __builtin_offsetof(__typeof(*p), signal_link ) );}); &p->signal_link != (&node->signalers_list ); p = ({ const __typeof( ((__typeof(*p) *)0)->signal_link ) *__mptr = (p->signal_link.next); (__typeof(*p) *)( (char *)__mptr - __builtin_offsetof(__typeof(*p), signal_link) );} )) { |
215 | GEM_BUG_ON(p == dep)((void)0); /* no cycles! */ |
216 | |
217 | if (node_signaled(p->signaler)) |
218 | continue; |
219 | |
220 | if (prio > READ_ONCE(p->signaler->attr.priority)({ typeof(p->signaler->attr.priority) __tmp = *(volatile typeof(p->signaler->attr.priority) *)&(p->signaler ->attr.priority); membar_datadep_consumer(); __tmp; })) |
221 | list_move_tail(&p->dfs_link, &dfs); |
222 | } |
223 | } |
224 | |
225 | /* |
226 | * If we didn't need to bump any existing priorities, and we haven't |
227 | * yet submitted this request (i.e. there is no potential race with |
228 | * execlists_submit_request()), we can set our own priority and skip |
229 | * acquiring the engine locks. |
230 | */ |
231 | if (node->attr.priority == I915_PRIORITY_INVALID((-0x7fffffff-1))) { |
232 | GEM_BUG_ON(!list_empty(&node->link))((void)0); |
233 | node->attr = *attr; |
234 | |
235 | if (stack.dfs_link.next == stack.dfs_link.prev) |
236 | return; |
237 | |
238 | __list_del_entry(&stack.dfs_link)list_del(&stack.dfs_link); |
239 | } |
240 | |
241 | memset(&cache, 0, sizeof(cache))__builtin_memset((&cache), (0), (sizeof(cache))); |
242 | sched_engine = node_to_request(node)->engine->sched_engine; |
243 | spin_lock(&sched_engine->lock)mtx_enter(&sched_engine->lock); |
244 | |
245 | /* Fifo and depth-first replacement ensure our deps execute before us */ |
246 | sched_engine = lock_sched_engine(node, sched_engine, &cache); |
247 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link)for (dep = ({ const __typeof( ((__typeof(*dep) *)0)->dfs_link ) *__mptr = ((&dfs)->prev); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep), dfs_link) );}) , p = ({ const __typeof( ((__typeof(*dep) *)0)->dfs_link ) *__mptr = ((dep)->dfs_link.prev); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep), dfs_link) );}) ; &(dep)->dfs_link != (&dfs); dep = p, p = ({ const __typeof( ((__typeof(*p) *)0)->dfs_link ) *__mptr = (p-> dfs_link.prev); (__typeof(*p) *)( (char *)__mptr - __builtin_offsetof (__typeof(*p), dfs_link) );})) { |
248 | struct i915_request *from = container_of(dep->signaler,({ const __typeof( ((struct i915_request *)0)->sched ) *__mptr = (dep->signaler); (struct i915_request *)( (char *)__mptr - __builtin_offsetof(struct i915_request, sched) );}) |
249 | struct i915_request,({ const __typeof( ((struct i915_request *)0)->sched ) *__mptr = (dep->signaler); (struct i915_request *)( (char *)__mptr - __builtin_offsetof(struct i915_request, sched) );}) |
250 | sched)({ const __typeof( ((struct i915_request *)0)->sched ) *__mptr = (dep->signaler); (struct i915_request *)( (char *)__mptr - __builtin_offsetof(struct i915_request, sched) );}); |
251 | INIT_LIST_HEAD(&dep->dfs_link); |
252 | |
253 | node = dep->signaler; |
254 | sched_engine = lock_sched_engine(node, sched_engine, &cache); |
255 | lockdep_assert_held(&sched_engine->lock)do { (void)(&sched_engine->lock); } while(0); |
256 | |
257 | /* Recheck after acquiring the engine->timeline.lock */ |
258 | if (prio <= node->attr.priority || node_signaled(node)) |
259 | continue; |
260 | |
261 | GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=((void)0) |
262 | sched_engine)((void)0); |
263 | |
264 | /* Must be called before changing the nodes priority */ |
265 | if (sched_engine->bump_inflight_request_prio) |
266 | sched_engine->bump_inflight_request_prio(from, prio); |
267 | |
268 | WRITE_ONCE(node->attr.priority, prio)({ typeof(node->attr.priority) __tmp = (prio); *(volatile typeof (node->attr.priority) *)&(node->attr.priority) = __tmp ; __tmp; }); |
269 | |
270 | /* |
271 | * Once the request is ready, it will be placed into the |
272 | * priority lists and then onto the HW runlist. Before the |
273 | * request is ready, it does not contribute to our preemption |
274 | * decisions and we can safely ignore it, as it will, and |
275 | * any preemption required, be dealt with upon submission. |
276 | * See engine->submit_request() |
277 | */ |
278 | if (list_empty(&node->link)) |
279 | continue; |
280 | |
281 | if (i915_request_in_priority_queue(node_to_request(node))) { |
282 | if (!cache.priolist) |
283 | cache.priolist = |
284 | i915_sched_lookup_priolist(sched_engine, |
285 | prio); |
286 | list_move_tail(&node->link, cache.priolist); |
287 | } |
288 | |
289 | /* Defer (tasklet) submission until after all of our updates. */ |
290 | if (sched_engine->kick_backend) |
291 | sched_engine->kick_backend(node_to_request(node), prio); |
292 | } |
293 | |
294 | spin_unlock(&sched_engine->lock)mtx_leave(&sched_engine->lock); |
295 | } |
296 | |
297 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) |
298 | { |
299 | spin_lock_irq(&schedule_lock)mtx_enter(&schedule_lock); |
300 | __i915_schedule(&rq->sched, attr); |
301 | spin_unlock_irq(&schedule_lock)mtx_leave(&schedule_lock); |
302 | } |
303 | |
304 | void i915_sched_node_init(struct i915_sched_node *node) |
305 | { |
306 | INIT_LIST_HEAD(&node->signalers_list); |
307 | INIT_LIST_HEAD(&node->waiters_list); |
308 | INIT_LIST_HEAD(&node->link); |
309 | |
310 | i915_sched_node_reinit(node); |
311 | } |
312 | |
313 | void i915_sched_node_reinit(struct i915_sched_node *node) |
314 | { |
315 | node->attr.priority = I915_PRIORITY_INVALID((-0x7fffffff-1)); |
316 | node->semaphores = 0; |
317 | node->flags = 0; |
318 | |
319 | GEM_BUG_ON(!list_empty(&node->signalers_list))((void)0); |
320 | GEM_BUG_ON(!list_empty(&node->waiters_list))((void)0); |
321 | GEM_BUG_ON(!list_empty(&node->link))((void)0); |
322 | } |
323 | |
324 | static struct i915_dependency * |
325 | i915_dependency_alloc(void) |
326 | { |
327 | #ifdef __linux__ |
328 | return kmem_cache_alloc(slab_dependencies, GFP_KERNEL(0x0001 | 0x0004)); |
329 | #else |
330 | return pool_get(&slab_dependencies, PR_WAITOK0x0001); |
331 | #endif |
332 | } |
333 | |
334 | static void |
335 | i915_dependency_free(struct i915_dependency *dep) |
336 | { |
337 | #ifdef __linux__ |
338 | kmem_cache_free(slab_dependencies, dep); |
339 | #else |
340 | pool_put(&slab_dependencies, dep); |
341 | #endif |
342 | } |
343 | |
344 | bool_Bool __i915_sched_node_add_dependency(struct i915_sched_node *node, |
345 | struct i915_sched_node *signal, |
346 | struct i915_dependency *dep, |
347 | unsigned long flags) |
348 | { |
349 | bool_Bool ret = false0; |
350 | |
351 | spin_lock_irq(&schedule_lock)mtx_enter(&schedule_lock); |
352 | |
353 | if (!node_signaled(signal)) { |
354 | INIT_LIST_HEAD(&dep->dfs_link); |
355 | dep->signaler = signal; |
356 | dep->waiter = node; |
357 | dep->flags = flags; |
358 | |
359 | /* All set, now publish. Beware the lockless walkers. */ |
360 | list_add_rcu(&dep->signal_link, &node->signalers_list)list_add(&dep->signal_link, &node->signalers_list ); |
361 | list_add_rcu(&dep->wait_link, &signal->waiters_list)list_add(&dep->wait_link, &signal->waiters_list ); |
362 | |
363 | /* Propagate the chains */ |
364 | node->flags |= signal->flags; |
365 | ret = true1; |
366 | } |
367 | |
368 | spin_unlock_irq(&schedule_lock)mtx_leave(&schedule_lock); |
369 | |
370 | return ret; |
371 | } |
372 | |
373 | int i915_sched_node_add_dependency(struct i915_sched_node *node, |
374 | struct i915_sched_node *signal, |
375 | unsigned long flags) |
376 | { |
377 | struct i915_dependency *dep; |
378 | |
379 | dep = i915_dependency_alloc(); |
380 | if (!dep) |
381 | return -ENOMEM12; |
382 | |
383 | if (!__i915_sched_node_add_dependency(node, signal, dep, |
384 | flags | I915_DEPENDENCY_ALLOC(1UL << (0)))) |
385 | i915_dependency_free(dep); |
386 | |
387 | return 0; |
388 | } |
389 | |
390 | void i915_sched_node_fini(struct i915_sched_node *node) |
391 | { |
392 | struct i915_dependency *dep, *tmp; |
393 | |
394 | spin_lock_irq(&schedule_lock)mtx_enter(&schedule_lock); |
395 | |
396 | /* |
397 | * Everyone we depended upon (the fences we wait to be signaled) |
398 | * should retire before us and remove themselves from our list. |
399 | * However, retirement is run independently on each timeline and |
400 | * so we may be called out-of-order. |
401 | */ |
402 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link)for (dep = ({ const __typeof( ((__typeof(*dep) *)0)->signal_link ) *__mptr = ((&node->signalers_list)->next); (__typeof (*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep) , signal_link) );}), tmp = ({ const __typeof( ((__typeof(*dep ) *)0)->signal_link ) *__mptr = (dep->signal_link.next) ; (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof (*dep), signal_link) );}); &dep->signal_link != (& node->signalers_list); dep = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)->signal_link ) *__mptr = (tmp->signal_link .next); (__typeof(*tmp) *)( (char *)__mptr - __builtin_offsetof (__typeof(*tmp), signal_link) );})) { |
403 | GEM_BUG_ON(!list_empty(&dep->dfs_link))((void)0); |
404 | |
405 | list_del_rcu(&dep->wait_link)list_del(&dep->wait_link); |
406 | if (dep->flags & I915_DEPENDENCY_ALLOC(1UL << (0))) |
407 | i915_dependency_free(dep); |
408 | } |
409 | INIT_LIST_HEAD(&node->signalers_list); |
410 | |
411 | /* Remove ourselves from everyone who depends upon us */ |
412 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link)for (dep = ({ const __typeof( ((__typeof(*dep) *)0)->wait_link ) *__mptr = ((&node->waiters_list)->next); (__typeof (*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep) , wait_link) );}), tmp = ({ const __typeof( ((__typeof(*dep) * )0)->wait_link ) *__mptr = (dep->wait_link.next); (__typeof (*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof(*dep) , wait_link) );}); &dep->wait_link != (&node->waiters_list ); dep = tmp, tmp = ({ const __typeof( ((__typeof(*tmp) *)0)-> wait_link ) *__mptr = (tmp->wait_link.next); (__typeof(*tmp ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*tmp), wait_link ) );})) { |
413 | GEM_BUG_ON(dep->signaler != node)((void)0); |
414 | GEM_BUG_ON(!list_empty(&dep->dfs_link))((void)0); |
415 | |
416 | list_del_rcu(&dep->signal_link)list_del(&dep->signal_link); |
417 | if (dep->flags & I915_DEPENDENCY_ALLOC(1UL << (0))) |
418 | i915_dependency_free(dep); |
419 | } |
420 | INIT_LIST_HEAD(&node->waiters_list); |
421 | |
422 | spin_unlock_irq(&schedule_lock)mtx_leave(&schedule_lock); |
423 | } |
424 | |
425 | void i915_request_show_with_schedule(struct drm_printer *m, |
426 | const struct i915_request *rq, |
427 | const char *prefix, |
428 | int indent) |
429 | { |
430 | struct i915_dependency *dep; |
431 | |
432 | i915_request_show(m, rq, prefix, indent); |
433 | if (i915_request_completed(rq)) |
434 | return; |
435 | |
436 | rcu_read_lock(); |
437 | for_each_signaler(dep, rq)for (dep = ({ const __typeof( ((__typeof(*dep) *)0)->signal_link ) *__mptr = ((&(rq)->sched.signalers_list)->next); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof(__typeof (*dep), signal_link) );}); &dep->signal_link != (& (rq)->sched.signalers_list); dep = ({ const __typeof( ((__typeof (*dep) *)0)->signal_link ) *__mptr = (dep->signal_link. next); (__typeof(*dep) *)( (char *)__mptr - __builtin_offsetof (__typeof(*dep), signal_link) );})) { |
438 | const struct i915_request *signaler = |
439 | node_to_request(dep->signaler); |
440 | |
441 | /* Dependencies along the same timeline are expected. */ |
442 | if (signaler->timeline == rq->timeline) |
443 | continue; |
444 | |
445 | if (__i915_request_is_complete(signaler)) |
446 | continue; |
447 | |
448 | i915_request_show(m, signaler, prefix, indent + 2); |
449 | } |
450 | rcu_read_unlock(); |
451 | } |
452 | |
453 | static void default_destroy(struct kref *kref) |
454 | { |
455 | struct i915_sched_engine *sched_engine = |
456 | container_of(kref, typeof(*sched_engine), ref)({ const __typeof( ((typeof(*sched_engine) *)0)->ref ) *__mptr = (kref); (typeof(*sched_engine) *)( (char *)__mptr - __builtin_offsetof (typeof(*sched_engine), ref) );}); |
457 | |
458 | tasklet_kill(&sched_engine->tasklet); /* flush the callback */ |
459 | kfree(sched_engine); |
460 | } |
461 | |
462 | static bool_Bool default_disabled(struct i915_sched_engine *sched_engine) |
463 | { |
464 | return false0; |
465 | } |
466 | |
467 | struct i915_sched_engine * |
468 | i915_sched_engine_create(unsigned int subclass) |
469 | { |
470 | struct i915_sched_engine *sched_engine; |
471 | |
472 | sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL(0x0001 | 0x0004)); |
473 | if (!sched_engine) |
474 | return NULL((void *)0); |
475 | |
476 | kref_init(&sched_engine->ref); |
477 | |
478 | sched_engine->queue = RB_ROOT_CACHED(struct rb_root_cached) { ((void *)0) }; |
479 | sched_engine->queue_priority_hint = INT_MIN(-0x7fffffff-1); |
480 | sched_engine->destroy = default_destroy; |
481 | sched_engine->disabled = default_disabled; |
482 | |
483 | INIT_LIST_HEAD(&sched_engine->requests); |
484 | INIT_LIST_HEAD(&sched_engine->hold); |
485 | |
486 | mtx_init(&sched_engine->lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&sched_engine ->lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
487 | lockdep_set_subclass(&sched_engine->lock, subclass); |
488 | |
489 | /* |
490 | * Due to an interesting quirk in lockdep's internal debug tracking, |
491 | * after setting a subclass we must ensure the lock is used. Otherwise, |
492 | * nr_unused_locks is incremented once too often. |
493 | */ |
494 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
495 | local_irq_disable()intr_disable(); |
496 | lock_map_acquire(&sched_engine->lock.dep_map); |
497 | lock_map_release(&sched_engine->lock.dep_map); |
498 | local_irq_enable()intr_enable(); |
499 | #endif |
500 | |
501 | return sched_engine; |
502 | } |
503 | |
504 | void i915_scheduler_module_exit(void) |
505 | { |
506 | #ifdef __linux__ |
507 | kmem_cache_destroy(slab_dependencies); |
508 | kmem_cache_destroy(slab_priorities); |
509 | #else |
510 | pool_destroy(&slab_dependencies); |
511 | pool_destroy(&slab_priorities); |
512 | #endif |
513 | } |
514 | |
515 | int __init i915_scheduler_module_init(void) |
516 | { |
517 | #ifdef __linux__ |
518 | slab_dependencies = KMEM_CACHE(i915_dependency, |
519 | SLAB_HWCACHE_ALIGN | |
520 | SLAB_TYPESAFE_BY_RCU); |
521 | if (!slab_dependencies) |
522 | return -ENOMEM12; |
523 | |
524 | slab_priorities = KMEM_CACHE(i915_priolist, 0); |
525 | if (!slab_priorities) |
526 | goto err_priorities; |
527 | |
528 | return 0; |
529 | |
530 | err_priorities: |
531 | kmem_cache_destroy(slab_priorities); |
532 | return -ENOMEM12; |
533 | #else |
534 | pool_init(&slab_dependencies, sizeof(struct i915_dependency), |
535 | CACHELINESIZE64, IPL_TTY0x9, 0, "gsdep", NULL((void *)0)); |
536 | pool_init(&slab_priorities, sizeof(struct i915_priolist), |
537 | CACHELINESIZE64, IPL_TTY0x9, 0, "gspri", NULL((void *)0)); |
538 | |
539 | return 0; |
540 | #endif |
541 | } |