File: | dev/pci/drm/dma-resv.c |
Warning: | line 608, column 30 Array access results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // SPDX-License-Identifier: MIT | |||
2 | /* | |||
3 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) | |||
4 | * | |||
5 | * Based on bo.c which bears the following copyright notice, | |||
6 | * but is dual licensed: | |||
7 | * | |||
8 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |||
9 | * All Rights Reserved. | |||
10 | * | |||
11 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
12 | * copy of this software and associated documentation files (the | |||
13 | * "Software"), to deal in the Software without restriction, including | |||
14 | * without limitation the rights to use, copy, modify, merge, publish, | |||
15 | * distribute, sub license, and/or sell copies of the Software, and to | |||
16 | * permit persons to whom the Software is furnished to do so, subject to | |||
17 | * the following conditions: | |||
18 | * | |||
19 | * The above copyright notice and this permission notice (including the | |||
20 | * next paragraph) shall be included in all copies or substantial portions | |||
21 | * of the Software. | |||
22 | * | |||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
25 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |||
26 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |||
27 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |||
28 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |||
29 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
30 | * | |||
31 | **************************************************************************/ | |||
32 | /* | |||
33 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |||
34 | */ | |||
35 | ||||
36 | #include <linux/dma-resv.h> | |||
37 | #include <linux/dma-fence-array.h> | |||
38 | #include <linux/export.h> | |||
39 | #include <linux/mm.h> | |||
40 | #include <linux/sched/mm.h> | |||
41 | #include <linux/mmu_notifier.h> | |||
42 | #include <linux/seq_file.h> | |||
43 | ||||
44 | /** | |||
45 | * DOC: Reservation Object Overview | |||
46 | * | |||
47 | * The reservation object provides a mechanism to manage a container of | |||
48 | * dma_fence object associated with a resource. A reservation object | |||
49 | * can have any number of fences attaches to it. Each fence carries an usage | |||
50 | * parameter determining how the operation represented by the fence is using the | |||
51 | * resource. The RCU mechanism is used to protect read access to fences from | |||
52 | * locked write-side updates. | |||
53 | * | |||
54 | * See struct dma_resv for more details. | |||
55 | */ | |||
56 | ||||
57 | DEFINE_WD_CLASS(reservation_ww_class)struct ww_class reservation_ww_class = { .stamp = 0, .name = "reservation_ww_class" }; | |||
58 | EXPORT_SYMBOL(reservation_ww_class); | |||
59 | ||||
60 | /* Mask for the lower fence pointer bits */ | |||
61 | #define DMA_RESV_LIST_MASK0x3 0x3 | |||
62 | ||||
63 | struct dma_resv_list { | |||
64 | struct rcu_head rcu; | |||
65 | u32 num_fences, max_fences; | |||
66 | struct dma_fence __rcu *table[]; | |||
67 | }; | |||
68 | ||||
69 | /* Extract the fence and usage flags from an RCU protected entry in the list. */ | |||
70 | static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index, | |||
71 | struct dma_resv *resv, struct dma_fence **fence, | |||
72 | enum dma_resv_usage *usage) | |||
73 | { | |||
74 | long tmp; | |||
75 | ||||
76 | tmp = (long)rcu_dereference_check(list->table[index],(list->table[index]) | |||
77 | resv ? dma_resv_held(resv) : true)(list->table[index]); | |||
78 | *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK0x3); | |||
79 | if (usage) | |||
80 | *usage = tmp & DMA_RESV_LIST_MASK0x3; | |||
81 | } | |||
82 | ||||
83 | /* Set the fence and usage flags at the specific index in the list. */ | |||
84 | static void dma_resv_list_set(struct dma_resv_list *list, | |||
85 | unsigned int index, | |||
86 | struct dma_fence *fence, | |||
87 | enum dma_resv_usage usage) | |||
88 | { | |||
89 | long tmp = ((long)fence) | usage; | |||
90 | ||||
91 | RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp)do { (list->table[index]) = ((struct dma_fence *)tmp); } while (0); | |||
92 | } | |||
93 | ||||
94 | /* | |||
95 | * Allocate a new dma_resv_list and make sure to correctly initialize | |||
96 | * max_fences. | |||
97 | */ | |||
98 | static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) | |||
99 | { | |||
100 | struct dma_resv_list *list; | |||
101 | ||||
102 | list = kmalloc(struct_size(list, table, max_fences)(sizeof(*(list)) + ((max_fences) * (sizeof(*(list)->table) ))), GFP_KERNEL(0x0001 | 0x0004)); | |||
103 | if (!list) | |||
104 | return NULL((void *)0); | |||
105 | ||||
106 | #ifdef __linux__ | |||
107 | list->max_fences = (ksize(list) - offsetof(typeof(*list), table)__builtin_offsetof(typeof(*list), table)) / | |||
108 | sizeof(*list->table); | |||
109 | #else | |||
110 | list->max_fences = (offsetof(typeof(*list), table[max_fences])__builtin_offsetof(typeof(*list), table[max_fences]) - | |||
111 | offsetof(typeof(*list), table)__builtin_offsetof(typeof(*list), table)) / sizeof(*list->table); | |||
112 | #endif | |||
113 | ||||
114 | return list; | |||
115 | } | |||
116 | ||||
117 | /* Free a dma_resv_list and make sure to drop all references. */ | |||
118 | static void dma_resv_list_free(struct dma_resv_list *list) | |||
119 | { | |||
120 | unsigned int i; | |||
121 | ||||
122 | if (!list) | |||
123 | return; | |||
124 | ||||
125 | for (i = 0; i < list->num_fences; ++i) { | |||
126 | struct dma_fence *fence; | |||
127 | ||||
128 | dma_resv_list_entry(list, i, NULL((void *)0), &fence, NULL((void *)0)); | |||
129 | dma_fence_put(fence); | |||
130 | } | |||
131 | kfree_rcu(list, rcu)do { free((void *)list, 145, 0); } while(0); | |||
132 | } | |||
133 | ||||
134 | /** | |||
135 | * dma_resv_init - initialize a reservation object | |||
136 | * @obj: the reservation object | |||
137 | */ | |||
138 | void dma_resv_init(struct dma_resv *obj) | |||
139 | { | |||
140 | ww_mutex_init(&obj->lock, &reservation_ww_class); | |||
141 | ||||
142 | RCU_INIT_POINTER(obj->fences, NULL)do { (obj->fences) = (((void *)0)); } while(0); | |||
143 | } | |||
144 | EXPORT_SYMBOL(dma_resv_init); | |||
145 | ||||
146 | /** | |||
147 | * dma_resv_fini - destroys a reservation object | |||
148 | * @obj: the reservation object | |||
149 | */ | |||
150 | void dma_resv_fini(struct dma_resv *obj) | |||
151 | { | |||
152 | /* | |||
153 | * This object should be dead and all references must have | |||
154 | * been released to it, so no need to be protected with rcu. | |||
155 | */ | |||
156 | dma_resv_list_free(rcu_dereference_protected(obj->fences, true)(obj->fences)); | |||
157 | ww_mutex_destroy(&obj->lock); | |||
158 | } | |||
159 | EXPORT_SYMBOL(dma_resv_fini); | |||
160 | ||||
161 | /* Dereference the fences while ensuring RCU rules */ | |||
162 | static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj) | |||
163 | { | |||
164 | return rcu_dereference_check(obj->fences, dma_resv_held(obj))(obj->fences); | |||
165 | } | |||
166 | ||||
167 | /** | |||
168 | * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object. | |||
169 | * @obj: reservation object | |||
170 | * @num_fences: number of fences we want to add | |||
171 | * | |||
172 | * Should be called before dma_resv_add_fence(). Must be called with @obj | |||
173 | * locked through dma_resv_lock(). | |||
174 | * | |||
175 | * Note that the preallocated slots need to be re-reserved if @obj is unlocked | |||
176 | * at any time before calling dma_resv_add_fence(). This is validated when | |||
177 | * CONFIG_DEBUG_MUTEXES is enabled. | |||
178 | * | |||
179 | * RETURNS | |||
180 | * Zero for success, or -errno | |||
181 | */ | |||
182 | int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) | |||
183 | { | |||
184 | struct dma_resv_list *old, *new; | |||
185 | unsigned int i, j, k, max; | |||
186 | ||||
187 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
188 | ||||
189 | old = dma_resv_fences_list(obj); | |||
190 | if (old && old->max_fences) { | |||
191 | if ((old->num_fences + num_fences) <= old->max_fences) | |||
192 | return 0; | |||
193 | max = max(old->num_fences + num_fences, old->max_fences * 2)(((old->num_fences + num_fences)>(old->max_fences * 2 ))?(old->num_fences + num_fences):(old->max_fences * 2) ); | |||
194 | } else { | |||
195 | max = max(4ul, roundup_pow_of_two(num_fences))(((4ul)>(roundup_pow_of_two(num_fences)))?(4ul):(roundup_pow_of_two (num_fences))); | |||
196 | } | |||
197 | ||||
198 | new = dma_resv_list_alloc(max); | |||
199 | if (!new) | |||
200 | return -ENOMEM12; | |||
201 | ||||
202 | /* | |||
203 | * no need to bump fence refcounts, rcu_read access | |||
204 | * requires the use of kref_get_unless_zero, and the | |||
205 | * references from the old struct are carried over to | |||
206 | * the new. | |||
207 | */ | |||
208 | for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) { | |||
209 | enum dma_resv_usage usage; | |||
210 | struct dma_fence *fence; | |||
211 | ||||
212 | dma_resv_list_entry(old, i, obj, &fence, &usage); | |||
213 | if (dma_fence_is_signaled(fence)) | |||
214 | RCU_INIT_POINTER(new->table[--k], fence)do { (new->table[--k]) = (fence); } while(0); | |||
215 | else | |||
216 | dma_resv_list_set(new, j++, fence, usage); | |||
217 | } | |||
218 | new->num_fences = j; | |||
219 | ||||
220 | /* | |||
221 | * We are not changing the effective set of fences here so can | |||
222 | * merely update the pointer to the new array; both existing | |||
223 | * readers and new readers will see exactly the same set of | |||
224 | * active (unsignaled) fences. Individual fences and the | |||
225 | * old array are protected by RCU and so will not vanish under | |||
226 | * the gaze of the rcu_read_lock() readers. | |||
227 | */ | |||
228 | rcu_assign_pointer(obj->fences, new)do { (obj->fences) = (new); } while(0); | |||
229 | ||||
230 | if (!old) | |||
231 | return 0; | |||
232 | ||||
233 | /* Drop the references to the signaled fences */ | |||
234 | for (i = k; i < max; ++i) { | |||
235 | struct dma_fence *fence; | |||
236 | ||||
237 | fence = rcu_dereference_protected(new->table[i],(new->table[i]) | |||
238 | dma_resv_held(obj))(new->table[i]); | |||
239 | dma_fence_put(fence); | |||
240 | } | |||
241 | kfree_rcu(old, rcu)do { free((void *)old, 145, 0); } while(0); | |||
242 | ||||
243 | return 0; | |||
244 | } | |||
245 | EXPORT_SYMBOL(dma_resv_reserve_fences); | |||
246 | ||||
247 | #ifdef CONFIG_DEBUG_MUTEXES | |||
248 | /** | |||
249 | * dma_resv_reset_max_fences - reset fences for debugging | |||
250 | * @obj: the dma_resv object to reset | |||
251 | * | |||
252 | * Reset the number of pre-reserved fence slots to test that drivers do | |||
253 | * correct slot allocation using dma_resv_reserve_fences(). See also | |||
254 | * &dma_resv_list.max_fences. | |||
255 | */ | |||
256 | void dma_resv_reset_max_fences(struct dma_resv *obj) | |||
257 | { | |||
258 | struct dma_resv_list *fences = dma_resv_fences_list(obj); | |||
259 | ||||
260 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
261 | ||||
262 | /* Test fence slot reservation */ | |||
263 | if (fences) | |||
264 | fences->max_fences = fences->num_fences; | |||
265 | } | |||
266 | EXPORT_SYMBOL(dma_resv_reset_max_fences); | |||
267 | #endif | |||
268 | ||||
269 | /** | |||
270 | * dma_resv_add_fence - Add a fence to the dma_resv obj | |||
271 | * @obj: the reservation object | |||
272 | * @fence: the fence to add | |||
273 | * @usage: how the fence is used, see enum dma_resv_usage | |||
274 | * | |||
275 | * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and | |||
276 | * dma_resv_reserve_fences() has been called. | |||
277 | * | |||
278 | * See also &dma_resv.fence for a discussion of the semantics. | |||
279 | */ | |||
280 | void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, | |||
281 | enum dma_resv_usage usage) | |||
282 | { | |||
283 | struct dma_resv_list *fobj; | |||
284 | struct dma_fence *old; | |||
285 | unsigned int i, count; | |||
286 | ||||
287 | dma_fence_get(fence); | |||
288 | ||||
289 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
290 | ||||
291 | /* Drivers should not add containers here, instead add each fence | |||
292 | * individually. | |||
293 | */ | |||
294 | WARN_ON(dma_fence_is_container(fence))({ int __ret = !!(dma_fence_is_container(fence)); if (__ret) printf ("WARNING %s failed at %s:%d\n", "dma_fence_is_container(fence)" , "/usr/src/sys/dev/pci/drm/dma-resv.c", 294); __builtin_expect (!!(__ret), 0); }); | |||
295 | ||||
296 | fobj = dma_resv_fences_list(obj); | |||
297 | count = fobj->num_fences; | |||
298 | ||||
299 | for (i = 0; i < count; ++i) { | |||
300 | enum dma_resv_usage old_usage; | |||
301 | ||||
302 | dma_resv_list_entry(fobj, i, obj, &old, &old_usage); | |||
303 | if ((old->context == fence->context && old_usage >= usage && | |||
304 | dma_fence_is_later_or_same(fence, old)) || | |||
305 | dma_fence_is_signaled(old)) { | |||
306 | dma_resv_list_set(fobj, i, fence, usage); | |||
307 | dma_fence_put(old); | |||
308 | return; | |||
309 | } | |||
310 | } | |||
311 | ||||
312 | BUG_ON(fobj->num_fences >= fobj->max_fences)((!(fobj->num_fences >= fobj->max_fences)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/dma-resv.c" , 312, "!(fobj->num_fences >= fobj->max_fences)")); | |||
313 | count++; | |||
314 | ||||
315 | dma_resv_list_set(fobj, i, fence, usage); | |||
316 | /* pointer update must be visible before we extend the num_fences */ | |||
317 | smp_store_mb(fobj->num_fences, count)do { fobj->num_fences = count; do { __asm volatile("mfence" ::: "memory"); } while (0); } while (0); | |||
318 | } | |||
319 | EXPORT_SYMBOL(dma_resv_add_fence); | |||
320 | ||||
321 | /** | |||
322 | * dma_resv_replace_fences - replace fences in the dma_resv obj | |||
323 | * @obj: the reservation object | |||
324 | * @context: the context of the fences to replace | |||
325 | * @replacement: the new fence to use instead | |||
326 | * @usage: how the new fence is used, see enum dma_resv_usage | |||
327 | * | |||
328 | * Replace fences with a specified context with a new fence. Only valid if the | |||
329 | * operation represented by the original fence has no longer access to the | |||
330 | * resources represented by the dma_resv object when the new fence completes. | |||
331 | * | |||
332 | * And example for using this is replacing a preemption fence with a page table | |||
333 | * update fence which makes the resource inaccessible. | |||
334 | */ | |||
335 | void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, | |||
336 | struct dma_fence *replacement, | |||
337 | enum dma_resv_usage usage) | |||
338 | { | |||
339 | struct dma_resv_list *list; | |||
340 | unsigned int i; | |||
341 | ||||
342 | dma_resv_assert_held(obj)do { (void)(&(obj)->lock.base); } while(0); | |||
343 | ||||
344 | list = dma_resv_fences_list(obj); | |||
345 | for (i = 0; list && i < list->num_fences; ++i) { | |||
346 | struct dma_fence *old; | |||
347 | ||||
348 | dma_resv_list_entry(list, i, obj, &old, NULL((void *)0)); | |||
349 | if (old->context != context) | |||
350 | continue; | |||
351 | ||||
352 | dma_resv_list_set(list, i, dma_fence_get(replacement), usage); | |||
353 | dma_fence_put(old); | |||
354 | } | |||
355 | } | |||
356 | EXPORT_SYMBOL(dma_resv_replace_fences); | |||
357 | ||||
358 | /* Restart the unlocked iteration by initializing the cursor object. */ | |||
359 | static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) | |||
360 | { | |||
361 | cursor->index = 0; | |||
362 | cursor->num_fences = 0; | |||
363 | cursor->fences = dma_resv_fences_list(cursor->obj); | |||
364 | if (cursor->fences) | |||
365 | cursor->num_fences = cursor->fences->num_fences; | |||
366 | cursor->is_restarted = true1; | |||
367 | } | |||
368 | ||||
369 | /* Walk to the next not signaled fence and grab a reference to it */ | |||
370 | static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) | |||
371 | { | |||
372 | if (!cursor->fences) | |||
373 | return; | |||
374 | ||||
375 | do { | |||
376 | /* Drop the reference from the previous round */ | |||
377 | dma_fence_put(cursor->fence); | |||
378 | ||||
379 | if (cursor->index >= cursor->num_fences) { | |||
380 | cursor->fence = NULL((void *)0); | |||
381 | break; | |||
382 | ||||
383 | } | |||
384 | ||||
385 | dma_resv_list_entry(cursor->fences, cursor->index++, | |||
386 | cursor->obj, &cursor->fence, | |||
387 | &cursor->fence_usage); | |||
388 | cursor->fence = dma_fence_get_rcu(cursor->fence); | |||
389 | if (!cursor->fence) { | |||
390 | dma_resv_iter_restart_unlocked(cursor); | |||
391 | continue; | |||
392 | } | |||
393 | ||||
394 | if (!dma_fence_is_signaled(cursor->fence) && | |||
395 | cursor->usage >= cursor->fence_usage) | |||
396 | break; | |||
397 | } while (true1); | |||
398 | } | |||
399 | ||||
400 | /** | |||
401 | * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. | |||
402 | * @cursor: the cursor with the current position | |||
403 | * | |||
404 | * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). | |||
405 | * | |||
406 | * Beware that the iterator can be restarted. Code which accumulates statistics | |||
407 | * or similar needs to check for this with dma_resv_iter_is_restarted(). For | |||
408 | * this reason prefer the locked dma_resv_iter_first() whenver possible. | |||
409 | * | |||
410 | * Returns the first fence from an unlocked dma_resv obj. | |||
411 | */ | |||
412 | struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) | |||
413 | { | |||
414 | rcu_read_lock(); | |||
415 | do { | |||
416 | dma_resv_iter_restart_unlocked(cursor); | |||
417 | dma_resv_iter_walk_unlocked(cursor); | |||
418 | } while (dma_resv_fences_list(cursor->obj) != cursor->fences); | |||
419 | rcu_read_unlock(); | |||
420 | ||||
421 | return cursor->fence; | |||
422 | } | |||
423 | EXPORT_SYMBOL(dma_resv_iter_first_unlocked); | |||
424 | ||||
425 | /** | |||
426 | * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. | |||
427 | * @cursor: the cursor with the current position | |||
428 | * | |||
429 | * Beware that the iterator can be restarted. Code which accumulates statistics | |||
430 | * or similar needs to check for this with dma_resv_iter_is_restarted(). For | |||
431 | * this reason prefer the locked dma_resv_iter_next() whenver possible. | |||
432 | * | |||
433 | * Returns the next fence from an unlocked dma_resv obj. | |||
434 | */ | |||
435 | struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) | |||
436 | { | |||
437 | bool_Bool restart; | |||
438 | ||||
439 | rcu_read_lock(); | |||
440 | cursor->is_restarted = false0; | |||
441 | restart = dma_resv_fences_list(cursor->obj) != cursor->fences; | |||
442 | do { | |||
443 | if (restart) | |||
444 | dma_resv_iter_restart_unlocked(cursor); | |||
445 | dma_resv_iter_walk_unlocked(cursor); | |||
446 | restart = true1; | |||
447 | } while (dma_resv_fences_list(cursor->obj) != cursor->fences); | |||
448 | rcu_read_unlock(); | |||
449 | ||||
450 | return cursor->fence; | |||
451 | } | |||
452 | EXPORT_SYMBOL(dma_resv_iter_next_unlocked); | |||
453 | ||||
454 | /** | |||
455 | * dma_resv_iter_first - first fence from a locked dma_resv object | |||
456 | * @cursor: cursor to record the current position | |||
457 | * | |||
458 | * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). | |||
459 | * | |||
460 | * Return the first fence in the dma_resv object while holding the | |||
461 | * &dma_resv.lock. | |||
462 | */ | |||
463 | struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) | |||
464 | { | |||
465 | struct dma_fence *fence; | |||
466 | ||||
467 | dma_resv_assert_held(cursor->obj)do { (void)(&(cursor->obj)->lock.base); } while(0); | |||
468 | ||||
469 | cursor->index = 0; | |||
470 | cursor->fences = dma_resv_fences_list(cursor->obj); | |||
471 | ||||
472 | fence = dma_resv_iter_next(cursor); | |||
473 | cursor->is_restarted = true1; | |||
474 | return fence; | |||
475 | } | |||
476 | EXPORT_SYMBOL_GPL(dma_resv_iter_first); | |||
477 | ||||
478 | /** | |||
479 | * dma_resv_iter_next - next fence from a locked dma_resv object | |||
480 | * @cursor: cursor to record the current position | |||
481 | * | |||
482 | * Return the next fences from the dma_resv object while holding the | |||
483 | * &dma_resv.lock. | |||
484 | */ | |||
485 | struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) | |||
486 | { | |||
487 | struct dma_fence *fence; | |||
488 | ||||
489 | dma_resv_assert_held(cursor->obj)do { (void)(&(cursor->obj)->lock.base); } while(0); | |||
490 | ||||
491 | cursor->is_restarted = false0; | |||
492 | ||||
493 | do { | |||
494 | if (!cursor->fences || | |||
495 | cursor->index >= cursor->fences->num_fences) | |||
496 | return NULL((void *)0); | |||
497 | ||||
498 | dma_resv_list_entry(cursor->fences, cursor->index++, | |||
499 | cursor->obj, &fence, &cursor->fence_usage); | |||
500 | } while (cursor->fence_usage > cursor->usage); | |||
501 | ||||
502 | return fence; | |||
503 | } | |||
504 | EXPORT_SYMBOL_GPL(dma_resv_iter_next); | |||
505 | ||||
506 | /** | |||
507 | * dma_resv_copy_fences - Copy all fences from src to dst. | |||
508 | * @dst: the destination reservation object | |||
509 | * @src: the source reservation object | |||
510 | * | |||
511 | * Copy all fences from src to dst. dst-lock must be held. | |||
512 | */ | |||
513 | int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) | |||
514 | { | |||
515 | struct dma_resv_iter cursor; | |||
516 | struct dma_resv_list *list; | |||
517 | struct dma_fence *f; | |||
518 | ||||
519 | dma_resv_assert_held(dst)do { (void)(&(dst)->lock.base); } while(0); | |||
520 | ||||
521 | list = NULL((void *)0); | |||
522 | ||||
523 | dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP); | |||
524 | dma_resv_for_each_fence_unlocked(&cursor, f)for (f = dma_resv_iter_first_unlocked(&cursor); f; f = dma_resv_iter_next_unlocked (&cursor)) { | |||
525 | ||||
526 | if (dma_resv_iter_is_restarted(&cursor)) { | |||
527 | dma_resv_list_free(list); | |||
528 | ||||
529 | list = dma_resv_list_alloc(cursor.num_fences); | |||
530 | if (!list) { | |||
531 | dma_resv_iter_end(&cursor); | |||
532 | return -ENOMEM12; | |||
533 | } | |||
534 | list->num_fences = 0; | |||
535 | } | |||
536 | ||||
537 | dma_fence_get(f); | |||
538 | dma_resv_list_set(list, list->num_fences++, f, | |||
539 | dma_resv_iter_usage(&cursor)); | |||
540 | } | |||
541 | dma_resv_iter_end(&cursor); | |||
542 | ||||
543 | list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst))({ __typeof(dst->fences) __r = dst->fences; dst->fences = list; __r; }); | |||
544 | dma_resv_list_free(list); | |||
545 | return 0; | |||
546 | } | |||
547 | EXPORT_SYMBOL(dma_resv_copy_fences); | |||
548 | ||||
549 | /** | |||
550 | * dma_resv_get_fences - Get an object's fences | |||
551 | * fences without update side lock held | |||
552 | * @obj: the reservation object | |||
553 | * @usage: controls which fences to include, see enum dma_resv_usage. | |||
554 | * @num_fences: the number of fences returned | |||
555 | * @fences: the array of fence ptrs returned (array is krealloc'd to the | |||
556 | * required size, and must be freed by caller) | |||
557 | * | |||
558 | * Retrieve all fences from the reservation object. | |||
559 | * Returns either zero or -ENOMEM. | |||
560 | */ | |||
561 | int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, | |||
562 | unsigned int *num_fences, struct dma_fence ***fences) | |||
563 | { | |||
564 | struct dma_resv_iter cursor; | |||
565 | struct dma_fence *fence; | |||
566 | struct dma_fence **nfences; | |||
567 | ||||
568 | *num_fences = 0; | |||
569 | *fences = NULL((void *)0); | |||
570 | ||||
571 | dma_resv_iter_begin(&cursor, obj, usage); | |||
572 | dma_resv_for_each_fence_unlocked(&cursor, fence)for (fence = dma_resv_iter_first_unlocked(&cursor); fence ; fence = dma_resv_iter_next_unlocked(&cursor)) { | |||
573 | ||||
574 | if (dma_resv_iter_is_restarted(&cursor)) { | |||
575 | struct dma_fence **new_fences; | |||
576 | unsigned int count; | |||
577 | ||||
578 | while (*num_fences) | |||
579 | dma_fence_put((*fences)[--(*num_fences)]); | |||
580 | ||||
581 | count = cursor.num_fences + 1; | |||
582 | ||||
583 | /* Eventually re-allocate the array */ | |||
584 | #ifdef __linux__ | |||
585 | new_fences = krealloc_array(*fences, count, | |||
586 | sizeof(void *), | |||
587 | GFP_KERNEL(0x0001 | 0x0004)); | |||
588 | #else | |||
589 | nfences = kmalloc(count * sizeof(void *), | |||
590 | GFP_KERNEL(0x0001 | 0x0004)); | |||
591 | if (nfences != NULL((void *)0) && *fences != NULL((void *)0)) { | |||
592 | memcpy(nfences, *fences,__builtin_memcpy((nfences), (*fences), ((count - 1) * sizeof( void *))) | |||
593 | (count - 1) * sizeof(void *))__builtin_memcpy((nfences), (*fences), ((count - 1) * sizeof( void *))); | |||
594 | kfree(*fences); | |||
595 | } | |||
596 | new_fences = nfences; | |||
597 | #endif | |||
598 | if (count && !new_fences) { | |||
599 | kfree(*fences); | |||
600 | *fences = NULL((void *)0); | |||
601 | *num_fences = 0; | |||
602 | dma_resv_iter_end(&cursor); | |||
603 | return -ENOMEM12; | |||
604 | } | |||
605 | *fences = new_fences; | |||
606 | } | |||
607 | ||||
608 | (*fences)[(*num_fences)++] = dma_fence_get(fence); | |||
| ||||
609 | } | |||
610 | dma_resv_iter_end(&cursor); | |||
611 | ||||
612 | return 0; | |||
613 | } | |||
614 | EXPORT_SYMBOL_GPL(dma_resv_get_fences); | |||
615 | ||||
616 | /** | |||
617 | * dma_resv_get_singleton - Get a single fence for all the fences | |||
618 | * @obj: the reservation object | |||
619 | * @usage: controls which fences to include, see enum dma_resv_usage. | |||
620 | * @fence: the resulting fence | |||
621 | * | |||
622 | * Get a single fence representing all the fences inside the resv object. | |||
623 | * Returns either 0 for success or -ENOMEM. | |||
624 | * | |||
625 | * Warning: This can't be used like this when adding the fence back to the resv | |||
626 | * object since that can lead to stack corruption when finalizing the | |||
627 | * dma_fence_array. | |||
628 | * | |||
629 | * Returns 0 on success and negative error values on failure. | |||
630 | */ | |||
631 | int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, | |||
632 | struct dma_fence **fence) | |||
633 | { | |||
634 | struct dma_fence_array *array; | |||
635 | struct dma_fence **fences; | |||
636 | unsigned count; | |||
637 | int r; | |||
638 | ||||
639 | r = dma_resv_get_fences(obj, usage, &count, &fences); | |||
| ||||
640 | if (r) | |||
641 | return r; | |||
642 | ||||
643 | if (count == 0) { | |||
644 | *fence = NULL((void *)0); | |||
645 | return 0; | |||
646 | } | |||
647 | ||||
648 | if (count == 1) { | |||
649 | *fence = fences[0]; | |||
650 | kfree(fences); | |||
651 | return 0; | |||
652 | } | |||
653 | ||||
654 | array = dma_fence_array_create(count, fences, | |||
655 | dma_fence_context_alloc(1), | |||
656 | 1, false0); | |||
657 | if (!array) { | |||
658 | while (count--) | |||
659 | dma_fence_put(fences[count]); | |||
660 | kfree(fences); | |||
661 | return -ENOMEM12; | |||
662 | } | |||
663 | ||||
664 | *fence = &array->base; | |||
665 | return 0; | |||
666 | } | |||
667 | EXPORT_SYMBOL_GPL(dma_resv_get_singleton); | |||
668 | ||||
669 | /** | |||
670 | * dma_resv_wait_timeout - Wait on reservation's objects fences | |||
671 | * @obj: the reservation object | |||
672 | * @usage: controls which fences to include, see enum dma_resv_usage. | |||
673 | * @intr: if true, do interruptible wait | |||
674 | * @timeout: timeout value in jiffies or zero to return immediately | |||
675 | * | |||
676 | * Callers are not required to hold specific locks, but maybe hold | |||
677 | * dma_resv_lock() already | |||
678 | * RETURNS | |||
679 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | |||
680 | * greater than zer on success. | |||
681 | */ | |||
682 | long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, | |||
683 | bool_Bool intr, unsigned long timeout) | |||
684 | { | |||
685 | long ret = timeout ? timeout : 1; | |||
686 | struct dma_resv_iter cursor; | |||
687 | struct dma_fence *fence; | |||
688 | ||||
689 | dma_resv_iter_begin(&cursor, obj, usage); | |||
690 | dma_resv_for_each_fence_unlocked(&cursor, fence)for (fence = dma_resv_iter_first_unlocked(&cursor); fence ; fence = dma_resv_iter_next_unlocked(&cursor)) { | |||
691 | ||||
692 | ret = dma_fence_wait_timeout(fence, intr, ret); | |||
693 | if (ret <= 0) { | |||
694 | dma_resv_iter_end(&cursor); | |||
695 | return ret; | |||
696 | } | |||
697 | } | |||
698 | dma_resv_iter_end(&cursor); | |||
699 | ||||
700 | return ret; | |||
701 | } | |||
702 | EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); | |||
703 | ||||
704 | ||||
705 | /** | |||
706 | * dma_resv_test_signaled - Test if a reservation object's fences have been | |||
707 | * signaled. | |||
708 | * @obj: the reservation object | |||
709 | * @usage: controls which fences to include, see enum dma_resv_usage. | |||
710 | * | |||
711 | * Callers are not required to hold specific locks, but maybe hold | |||
712 | * dma_resv_lock() already. | |||
713 | * | |||
714 | * RETURNS | |||
715 | * | |||
716 | * True if all fences signaled, else false. | |||
717 | */ | |||
718 | bool_Bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) | |||
719 | { | |||
720 | struct dma_resv_iter cursor; | |||
721 | struct dma_fence *fence; | |||
722 | ||||
723 | dma_resv_iter_begin(&cursor, obj, usage); | |||
724 | dma_resv_for_each_fence_unlocked(&cursor, fence)for (fence = dma_resv_iter_first_unlocked(&cursor); fence ; fence = dma_resv_iter_next_unlocked(&cursor)) { | |||
725 | dma_resv_iter_end(&cursor); | |||
726 | return false0; | |||
727 | } | |||
728 | dma_resv_iter_end(&cursor); | |||
729 | return true1; | |||
730 | } | |||
731 | EXPORT_SYMBOL_GPL(dma_resv_test_signaled); | |||
732 | ||||
733 | /** | |||
734 | * dma_resv_describe - Dump description of the resv object into seq_file | |||
735 | * @obj: the reservation object | |||
736 | * @seq: the seq_file to dump the description into | |||
737 | * | |||
738 | * Dump a textual description of the fences inside an dma_resv object into the | |||
739 | * seq_file. | |||
740 | */ | |||
741 | void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) | |||
742 | { | |||
743 | STUB()do { printf("%s: stub\n", __func__); } while(0); | |||
744 | #ifdef notyet | |||
745 | static const char *usage[] = { "kernel", "write", "read", "bookkeep" }; | |||
746 | struct dma_resv_iter cursor; | |||
747 | struct dma_fence *fence; | |||
748 | ||||
749 | dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence)for (dma_resv_iter_begin(&cursor, obj, DMA_RESV_USAGE_READ ), fence = dma_resv_iter_first(&cursor); fence; fence = dma_resv_iter_next (&cursor)) { | |||
750 | seq_printf(seq, "\t%s fence:", | |||
751 | usage[dma_resv_iter_usage(&cursor)]); | |||
752 | dma_fence_describe(fence, seq); | |||
753 | } | |||
754 | #endif | |||
755 | } | |||
756 | EXPORT_SYMBOL_GPL(dma_resv_describe); | |||
757 | ||||
758 | #if IS_ENABLED(CONFIG_LOCKDEP)0 | |||
759 | static int __init dma_resv_lockdep(void) | |||
760 | { | |||
761 | struct mm_struct *mm = mm_alloc(); | |||
762 | struct ww_acquire_ctx ctx; | |||
763 | struct dma_resv obj; | |||
764 | struct address_space mapping; | |||
765 | int ret; | |||
766 | ||||
767 | if (!mm) | |||
768 | return -ENOMEM12; | |||
769 | ||||
770 | dma_resv_init(&obj); | |||
771 | address_space_init_once(&mapping); | |||
772 | ||||
773 | mmap_read_lock(mm); | |||
774 | ww_acquire_init(&ctx, &reservation_ww_class); | |||
775 | ret = dma_resv_lock(&obj, &ctx); | |||
776 | if (ret == -EDEADLK11) | |||
777 | dma_resv_lock_slow(&obj, &ctx); | |||
778 | fs_reclaim_acquire(GFP_KERNEL(0x0001 | 0x0004)); | |||
779 | /* for unmap_mapping_range on trylocked buffer objects in shrinkers */ | |||
780 | i_mmap_lock_write(&mapping); | |||
781 | i_mmap_unlock_write(&mapping); | |||
782 | #ifdef CONFIG_MMU_NOTIFIER | |||
783 | lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); | |||
784 | __dma_fence_might_wait(); | |||
785 | lock_map_release(&__mmu_notifier_invalidate_range_start_map); | |||
786 | #else | |||
787 | __dma_fence_might_wait(); | |||
788 | #endif | |||
789 | fs_reclaim_release(GFP_KERNEL(0x0001 | 0x0004)); | |||
790 | ww_mutex_unlock(&obj.lock); | |||
791 | ww_acquire_fini(&ctx); | |||
792 | mmap_read_unlock(mm); | |||
793 | ||||
794 | mmput(mm); | |||
795 | ||||
796 | return 0; | |||
797 | } | |||
798 | subsys_initcall(dma_resv_lockdep); | |||
799 | #endif |