File: | dev/pci/drm/include/linux/list.h |
Warning: | line 262, column 13 Access to field 'prev' results in a dereference of a null pointer (loaded from variable 'next') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||
2 | * Copyright © 2008-2015 Intel Corporation | ||||
3 | * | ||||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||
5 | * copy of this software and associated documentation files (the "Software"), | ||||
6 | * to deal in the Software without restriction, including without limitation | ||||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||||
9 | * Software is furnished to do so, subject to the following conditions: | ||||
10 | * | ||||
11 | * The above copyright notice and this permission notice (including the next | ||||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||||
13 | * Software. | ||||
14 | * | ||||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
21 | * IN THE SOFTWARE. | ||||
22 | * | ||||
23 | * Authors: | ||||
24 | * Eric Anholt <eric@anholt.net> | ||||
25 | * | ||||
26 | */ | ||||
27 | |||||
28 | #include <linux/dma-fence-array.h> | ||||
29 | #include <linux/kthread.h> | ||||
30 | #include <linux/dma-resv.h> | ||||
31 | #include <linux/shmem_fs.h> | ||||
32 | #include <linux/slab.h> | ||||
33 | #include <linux/stop_machine.h> | ||||
34 | #include <linux/swap.h> | ||||
35 | #include <linux/pci.h> | ||||
36 | #include <linux/dma-buf.h> | ||||
37 | #include <linux/mman.h> | ||||
38 | |||||
39 | #include <drm/drm_cache.h> | ||||
40 | #include <drm/drm_vma_manager.h> | ||||
41 | |||||
42 | #include <dev/pci/agpvar.h> | ||||
43 | |||||
44 | #include "display/intel_display.h" | ||||
45 | #include "display/intel_frontbuffer.h" | ||||
46 | |||||
47 | #include "gem/i915_gem_clflush.h" | ||||
48 | #include "gem/i915_gem_context.h" | ||||
49 | #include "gem/i915_gem_ioctls.h" | ||||
50 | #include "gem/i915_gem_mman.h" | ||||
51 | #include "gem/i915_gem_pm.h" | ||||
52 | #include "gem/i915_gem_region.h" | ||||
53 | #include "gem/i915_gem_userptr.h" | ||||
54 | #include "gt/intel_engine_user.h" | ||||
55 | #include "gt/intel_gt.h" | ||||
56 | #include "gt/intel_gt_pm.h" | ||||
57 | #include "gt/intel_workarounds.h" | ||||
58 | |||||
59 | #include "i915_drv.h" | ||||
60 | #include "i915_file_private.h" | ||||
61 | #include "i915_trace.h" | ||||
62 | #include "i915_vgpu.h" | ||||
63 | #include "intel_pm.h" | ||||
64 | |||||
65 | static int | ||||
66 | insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) | ||||
67 | { | ||||
68 | int err; | ||||
69 | |||||
70 | err = mutex_lock_interruptible(&ggtt->vm.mutex); | ||||
71 | if (err) | ||||
72 | return err; | ||||
73 | |||||
74 | memset(node, 0, sizeof(*node))__builtin_memset((node), (0), (sizeof(*node))); | ||||
75 | err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node, | ||||
76 | size, 0, I915_COLOR_UNEVICTABLE(-1), | ||||
77 | 0, ggtt->mappable_end, | ||||
78 | DRM_MM_INSERT_LOW); | ||||
79 | |||||
80 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||
81 | |||||
82 | return err; | ||||
83 | } | ||||
84 | |||||
85 | static void | ||||
86 | remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node) | ||||
87 | { | ||||
88 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); | ||||
89 | drm_mm_remove_node(node); | ||||
90 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||
91 | } | ||||
92 | |||||
93 | int | ||||
94 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | ||||
95 | struct drm_file *file) | ||||
96 | { | ||||
97 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||
98 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
99 | struct drm_i915_gem_get_aperture *args = data; | ||||
100 | struct i915_vma *vma; | ||||
101 | u64 pinned; | ||||
102 | |||||
103 | if (mutex_lock_interruptible(&ggtt->vm.mutex)) | ||||
104 | return -EINTR4; | ||||
105 | |||||
106 | pinned = ggtt->vm.reserved; | ||||
107 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof (*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma) , vm_link) );}); &vma->vm_link != (&ggtt->vm.bound_list ); vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );})) | ||||
108 | if (i915_vma_is_pinned(vma)) | ||||
109 | pinned += vma->node.size; | ||||
110 | |||||
111 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||
112 | |||||
113 | args->aper_size = ggtt->vm.total; | ||||
114 | args->aper_available_size = args->aper_size - pinned; | ||||
115 | |||||
116 | return 0; | ||||
117 | } | ||||
118 | |||||
119 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj, | ||||
120 | unsigned long flags) | ||||
121 | { | ||||
122 | struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm; | ||||
123 | bool_Bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK(1UL << (3))); | ||||
| |||||
124 | DRM_LIST_HEAD(still_in_list)struct list_head still_in_list = { &(still_in_list), & (still_in_list) }; | ||||
125 | intel_wakeref_t wakeref; | ||||
126 | struct i915_vma *vma; | ||||
127 | int ret; | ||||
128 | |||||
129 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); | ||||
130 | |||||
131 | if (list_empty(&obj->vma.list)) | ||||
132 | return 0; | ||||
133 | |||||
134 | /* | ||||
135 | * As some machines use ACPI to handle runtime-resume callbacks, and | ||||
136 | * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex | ||||
137 | * as they are required by the shrinker. Ergo, we wake the device up | ||||
138 | * first just in case. | ||||
139 | */ | ||||
140 | wakeref = intel_runtime_pm_get(rpm); | ||||
141 | |||||
142 | try_again: | ||||
143 | ret = 0; | ||||
144 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||
145 | while (!ret
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})) | ||||
146 | struct i915_vma,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof ( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})) | ||||
147 | obj_link)(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof ( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})))) { | ||||
148 | list_move_tail(&vma->obj_link, &still_in_list); | ||||
149 | if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11)))))) | ||||
150 | continue; | ||||
151 | |||||
152 | if (flags & I915_GEM_OBJECT_UNBIND_TEST(1UL << (2))) { | ||||
153 | ret = -EBUSY16; | ||||
154 | break; | ||||
155 | } | ||||
156 | |||||
157 | /* | ||||
158 | * Requiring the vm destructor to take the object lock | ||||
159 | * before destroying a vma would help us eliminate the | ||||
160 | * i915_vm_tryget() here, AND thus also the barrier stuff | ||||
161 | * at the end. That's an easy fix, but sleeping locks in | ||||
162 | * a kthread should generally be avoided. | ||||
163 | */ | ||||
164 | ret = -EAGAIN35; | ||||
165 | if (!i915_vm_tryget(vma->vm)) | ||||
166 | break; | ||||
167 | |||||
168 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||
169 | |||||
170 | /* | ||||
171 | * Since i915_vma_parked() takes the object lock | ||||
172 | * before vma destruction, it won't race us here, | ||||
173 | * and destroy the vma from under us. | ||||
174 | */ | ||||
175 | |||||
176 | ret = -EBUSY16; | ||||
177 | if (flags & I915_GEM_OBJECT_UNBIND_ASYNC(1UL << (4))) { | ||||
178 | assert_object_held(vma->obj)do { (void)(&((vma->obj)->base.resv)->lock.base) ; } while(0); | ||||
179 | ret = i915_vma_unbind_async(vma, vm_trylock); | ||||
180 | } | ||||
181 | |||||
182 | if (ret == -EBUSY16 && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0)) || | ||||
183 | !i915_vma_is_active(vma))) { | ||||
184 | if (vm_trylock
| ||||
185 | if (mutex_trylock(&vma->vm->mutex)(rw_enter(&vma->vm->mutex, 0x0001UL | 0x0040UL) == 0 )) { | ||||
186 | ret = __i915_vma_unbind(vma); | ||||
187 | mutex_unlock(&vma->vm->mutex)rw_exit_write(&vma->vm->mutex); | ||||
188 | } | ||||
189 | } else { | ||||
190 | ret = i915_vma_unbind(vma); | ||||
191 | } | ||||
192 | } | ||||
193 | |||||
194 | i915_vm_put(vma->vm); | ||||
195 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||
196 | } | ||||
197 | list_splice_init(&still_in_list, &obj->vma.list); | ||||
198 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||
199 | |||||
200 | if (ret == -EAGAIN35 && flags & I915_GEM_OBJECT_UNBIND_BARRIER(1UL << (1))) { | ||||
201 | rcu_barrier()__asm volatile("" : : : "memory"); /* flush the i915_vm_release() */ | ||||
202 | goto try_again; | ||||
203 | } | ||||
204 | |||||
205 | intel_runtime_pm_put(rpm, wakeref); | ||||
206 | |||||
207 | return ret; | ||||
208 | } | ||||
209 | |||||
210 | static int | ||||
211 | shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data, | ||||
212 | bool_Bool needs_clflush) | ||||
213 | { | ||||
214 | char *vaddr; | ||||
215 | int ret; | ||||
216 | |||||
217 | vaddr = kmap(page); | ||||
218 | |||||
219 | if (needs_clflush) | ||||
220 | drm_clflush_virt_range(vaddr + offset, len); | ||||
221 | |||||
222 | ret = __copy_to_user(user_data, vaddr + offset, len); | ||||
223 | |||||
224 | kunmap_va(vaddr); | ||||
225 | |||||
226 | return ret ? -EFAULT14 : 0; | ||||
227 | } | ||||
228 | |||||
229 | static int | ||||
230 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, | ||||
231 | struct drm_i915_gem_pread *args) | ||||
232 | { | ||||
233 | unsigned int needs_clflush; | ||||
234 | unsigned int idx, offset; | ||||
235 | char __user *user_data; | ||||
236 | u64 remain; | ||||
237 | int ret; | ||||
238 | |||||
239 | ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||
240 | if (ret) | ||||
241 | return ret; | ||||
242 | |||||
243 | ret = i915_gem_object_pin_pages(obj); | ||||
244 | if (ret) | ||||
245 | goto err_unlock; | ||||
246 | |||||
247 | ret = i915_gem_object_prepare_read(obj, &needs_clflush); | ||||
248 | if (ret) | ||||
249 | goto err_unpin; | ||||
250 | |||||
251 | i915_gem_object_finish_access(obj); | ||||
252 | i915_gem_object_unlock(obj); | ||||
253 | |||||
254 | remain = args->size; | ||||
255 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||
256 | offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1)); | ||||
257 | for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) { | ||||
258 | struct vm_page *page = i915_gem_object_get_page(obj, idx); | ||||
259 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset ); __min_a < __min_b ? __min_a : __min_b; }); | ||||
260 | |||||
261 | ret = shmem_pread(page, offset, length, user_data, | ||||
262 | needs_clflush); | ||||
263 | if (ret) | ||||
264 | break; | ||||
265 | |||||
266 | remain -= length; | ||||
267 | user_data += length; | ||||
268 | offset = 0; | ||||
269 | } | ||||
270 | |||||
271 | i915_gem_object_unpin_pages(obj); | ||||
272 | return ret; | ||||
273 | |||||
274 | err_unpin: | ||||
275 | i915_gem_object_unpin_pages(obj); | ||||
276 | err_unlock: | ||||
277 | i915_gem_object_unlock(obj); | ||||
278 | return ret; | ||||
279 | } | ||||
280 | |||||
281 | #ifdef __linux__ | ||||
282 | static inline bool_Bool | ||||
283 | gtt_user_read(struct io_mapping *mapping, | ||||
284 | loff_t base, int offset, | ||||
285 | char __user *user_data, int length) | ||||
286 | { | ||||
287 | void __iomem *vaddr; | ||||
288 | unsigned long unwritten; | ||||
289 | |||||
290 | /* We can use the cpu mem copy function because this is X86. */ | ||||
291 | vaddr = io_mapping_map_atomic_wc(mapping, base); | ||||
292 | unwritten = __copy_to_user_inatomic(user_data, | ||||
293 | (void __force *)vaddr + offset, | ||||
294 | length); | ||||
295 | io_mapping_unmap_atomic(vaddr); | ||||
296 | if (unwritten) { | ||||
297 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12)); | ||||
298 | unwritten = copy_to_user(user_data, | ||||
299 | (void __force *)vaddr + offset, | ||||
300 | length); | ||||
301 | io_mapping_unmap(vaddr); | ||||
302 | } | ||||
303 | return unwritten; | ||||
304 | } | ||||
305 | #else | ||||
306 | static inline bool_Bool | ||||
307 | gtt_user_read(struct drm_i915_privateinteldrm_softc *dev_priv, | ||||
308 | loff_t base, int offset, | ||||
309 | char __user *user_data, int length) | ||||
310 | { | ||||
311 | bus_space_handle_t bsh; | ||||
312 | void __iomem *vaddr; | ||||
313 | unsigned long unwritten; | ||||
314 | |||||
315 | /* We can use the cpu mem copy function because this is X86. */ | ||||
316 | agp_map_atomic(dev_priv->agph, base, &bsh); | ||||
317 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||
318 | unwritten = __copy_to_user_inatomic(user_data, | ||||
319 | (void __force *)vaddr + offset, | ||||
320 | length); | ||||
321 | agp_unmap_atomic(dev_priv->agph, bsh); | ||||
322 | if (unwritten) { | ||||
323 | agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh); | ||||
324 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||
325 | unwritten = copy_to_user(user_data, | ||||
326 | (void __force *)vaddr + offset, | ||||
327 | length); | ||||
328 | agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12)); | ||||
329 | } | ||||
330 | return unwritten; | ||||
331 | } | ||||
332 | #endif | ||||
333 | |||||
334 | static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, | ||||
335 | struct drm_mm_node *node, | ||||
336 | bool_Bool write) | ||||
337 | { | ||||
338 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
339 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
340 | struct i915_vma *vma; | ||||
341 | struct i915_gem_ww_ctx ww; | ||||
342 | int ret; | ||||
343 | |||||
344 | i915_gem_ww_ctx_init(&ww, true1); | ||||
345 | retry: | ||||
346 | vma = ERR_PTR(-ENODEV19); | ||||
347 | ret = i915_gem_object_lock(obj, &ww); | ||||
348 | if (ret) | ||||
349 | goto err_ww; | ||||
350 | |||||
351 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||||
352 | if (ret) | ||||
353 | goto err_ww; | ||||
354 | |||||
355 | if (!i915_gem_object_is_tiled(obj)) | ||||
356 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL((void *)0), 0, 0, | ||||
357 | PIN_MAPPABLE(1ULL << (3)) | | ||||
358 | PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ | | ||||
359 | PIN_NOEVICT(1ULL << (0))); | ||||
360 | if (vma == ERR_PTR(-EDEADLK11)) { | ||||
361 | ret = -EDEADLK11; | ||||
362 | goto err_ww; | ||||
363 | } else if (!IS_ERR(vma)) { | ||||
364 | node->start = i915_ggtt_offset(vma); | ||||
365 | node->flags = 0; | ||||
366 | } else { | ||||
367 | ret = insert_mappable_node(ggtt, node, PAGE_SIZE(1 << 12)); | ||||
368 | if (ret) | ||||
369 | goto err_ww; | ||||
370 | GEM_BUG_ON(!drm_mm_node_allocated(node))((void)0); | ||||
371 | vma = NULL((void *)0); | ||||
372 | } | ||||
373 | |||||
374 | ret = i915_gem_object_pin_pages(obj); | ||||
375 | if (ret) { | ||||
376 | if (drm_mm_node_allocated(node)) { | ||||
377 | ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); | ||||
378 | remove_mappable_node(ggtt, node); | ||||
379 | } else { | ||||
380 | i915_vma_unpin(vma); | ||||
381 | } | ||||
382 | } | ||||
383 | |||||
384 | err_ww: | ||||
385 | if (ret == -EDEADLK11) { | ||||
386 | ret = i915_gem_ww_ctx_backoff(&ww); | ||||
387 | if (!ret) | ||||
388 | goto retry; | ||||
389 | } | ||||
390 | i915_gem_ww_ctx_fini(&ww); | ||||
391 | |||||
392 | return ret ? ERR_PTR(ret) : vma; | ||||
393 | } | ||||
394 | |||||
395 | static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, | ||||
396 | struct drm_mm_node *node, | ||||
397 | struct i915_vma *vma) | ||||
398 | { | ||||
399 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
400 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
401 | |||||
402 | i915_gem_object_unpin_pages(obj); | ||||
403 | if (drm_mm_node_allocated(node)) { | ||||
404 | ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); | ||||
405 | remove_mappable_node(ggtt, node); | ||||
406 | } else { | ||||
407 | i915_vma_unpin(vma); | ||||
408 | } | ||||
409 | } | ||||
410 | |||||
411 | static int | ||||
412 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, | ||||
413 | const struct drm_i915_gem_pread *args) | ||||
414 | { | ||||
415 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
416 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
417 | intel_wakeref_t wakeref; | ||||
418 | struct drm_mm_node node; | ||||
419 | void __user *user_data; | ||||
420 | struct i915_vma *vma; | ||||
421 | u64 remain, offset; | ||||
422 | int ret = 0; | ||||
423 | |||||
424 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); | ||||
425 | |||||
426 | vma = i915_gem_gtt_prepare(obj, &node, false0); | ||||
427 | if (IS_ERR(vma)) { | ||||
428 | ret = PTR_ERR(vma); | ||||
429 | goto out_rpm; | ||||
430 | } | ||||
431 | |||||
432 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||
433 | remain = args->size; | ||||
434 | offset = args->offset; | ||||
435 | |||||
436 | while (remain > 0) { | ||||
437 | /* Operation in this page | ||||
438 | * | ||||
439 | * page_base = page offset within aperture | ||||
440 | * page_offset = offset within page | ||||
441 | * page_length = bytes to copy for this page | ||||
442 | */ | ||||
443 | u32 page_base = node.start; | ||||
444 | unsigned page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1)); | ||||
445 | unsigned page_length = PAGE_SIZE(1 << 12) - page_offset; | ||||
446 | page_length = remain < page_length ? remain : page_length; | ||||
447 | if (drm_mm_node_allocated(&node)) { | ||||
448 | ggtt->vm.insert_page(&ggtt->vm, | ||||
449 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12), | ||||
450 | node.start, I915_CACHE_NONE, 0); | ||||
451 | } else { | ||||
452 | page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
453 | } | ||||
454 | |||||
455 | if (gtt_user_read(i915, page_base, page_offset, | ||||
456 | user_data, page_length)) { | ||||
457 | ret = -EFAULT14; | ||||
458 | break; | ||||
459 | } | ||||
460 | |||||
461 | remain -= page_length; | ||||
462 | user_data += page_length; | ||||
463 | offset += page_length; | ||||
464 | } | ||||
465 | |||||
466 | i915_gem_gtt_cleanup(obj, &node, vma); | ||||
467 | out_rpm: | ||||
468 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | ||||
469 | return ret; | ||||
470 | } | ||||
471 | |||||
472 | /** | ||||
473 | * Reads data from the object referenced by handle. | ||||
474 | * @dev: drm device pointer | ||||
475 | * @data: ioctl data blob | ||||
476 | * @file: drm file pointer | ||||
477 | * | ||||
478 | * On error, the contents of *data are undefined. | ||||
479 | */ | ||||
480 | int | ||||
481 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | ||||
482 | struct drm_file *file) | ||||
483 | { | ||||
484 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||
485 | struct drm_i915_gem_pread *args = data; | ||||
486 | struct drm_i915_gem_object *obj; | ||||
487 | int ret; | ||||
488 | |||||
489 | /* PREAD is disallowed for all platforms after TGL-LP. This also | ||||
490 | * covers all platforms with local memory. | ||||
491 | */ | ||||
492 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12 && !IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) | ||||
493 | return -EOPNOTSUPP45; | ||||
494 | |||||
495 | if (args->size == 0) | ||||
496 | return 0; | ||||
497 | |||||
498 | if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), | ||||
499 | args->size)) | ||||
500 | return -EFAULT14; | ||||
501 | |||||
502 | obj = i915_gem_object_lookup(file, args->handle); | ||||
503 | if (!obj) | ||||
504 | return -ENOENT2; | ||||
505 | |||||
506 | /* Bounds check source. */ | ||||
507 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset )); typeof((u64)(args->size)) size__ = ((u64)(args->size )); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base .size)); (void)(&start__ == &size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__ ; })) { | ||||
508 | ret = -EINVAL22; | ||||
509 | goto out; | ||||
510 | } | ||||
511 | |||||
512 | trace_i915_gem_object_pread(obj, args->offset, args->size); | ||||
513 | ret = -ENODEV19; | ||||
514 | if (obj->ops->pread) | ||||
515 | ret = obj->ops->pread(obj, args); | ||||
516 | if (ret != -ENODEV19) | ||||
517 | goto out; | ||||
518 | |||||
519 | ret = i915_gem_object_wait(obj, | ||||
520 | I915_WAIT_INTERRUPTIBLE(1UL << (0)), | ||||
521 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | ||||
522 | if (ret) | ||||
523 | goto out; | ||||
524 | |||||
525 | ret = i915_gem_shmem_pread(obj, args); | ||||
526 | if (ret == -EFAULT14 || ret == -ENODEV19) | ||||
527 | ret = i915_gem_gtt_pread(obj, args); | ||||
528 | |||||
529 | out: | ||||
530 | i915_gem_object_put(obj); | ||||
531 | return ret; | ||||
532 | } | ||||
533 | |||||
534 | /* This is the fast write path which cannot handle | ||||
535 | * page faults in the source data | ||||
536 | */ | ||||
537 | #ifdef __linux__ | ||||
538 | static inline bool_Bool | ||||
539 | ggtt_write(struct io_mapping *mapping, | ||||
540 | loff_t base, int offset, | ||||
541 | char __user *user_data, int length) | ||||
542 | { | ||||
543 | void __iomem *vaddr; | ||||
544 | unsigned long unwritten; | ||||
545 | |||||
546 | /* We can use the cpu mem copy function because this is X86. */ | ||||
547 | vaddr = io_mapping_map_atomic_wc(mapping, base); | ||||
548 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, | ||||
549 | user_data, length); | ||||
550 | io_mapping_unmap_atomic(vaddr); | ||||
551 | if (unwritten) { | ||||
552 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12)); | ||||
553 | unwritten = copy_from_user((void __force *)vaddr + offset, | ||||
554 | user_data, length); | ||||
555 | io_mapping_unmap(vaddr); | ||||
556 | } | ||||
557 | |||||
558 | return unwritten; | ||||
559 | } | ||||
560 | #else | ||||
561 | static inline bool_Bool | ||||
562 | ggtt_write(struct drm_i915_privateinteldrm_softc *dev_priv, | ||||
563 | loff_t base, int offset, | ||||
564 | char __user *user_data, int length) | ||||
565 | { | ||||
566 | bus_space_handle_t bsh; | ||||
567 | void __iomem *vaddr; | ||||
568 | unsigned long unwritten; | ||||
569 | |||||
570 | /* We can use the cpu mem copy function because this is X86. */ | ||||
571 | agp_map_atomic(dev_priv->agph, base, &bsh); | ||||
572 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||
573 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, | ||||
574 | user_data, length); | ||||
575 | agp_unmap_atomic(dev_priv->agph, bsh); | ||||
576 | if (unwritten) { | ||||
577 | agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh); | ||||
578 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||
579 | unwritten = copy_from_user((void __force *)vaddr + offset, | ||||
580 | user_data, length); | ||||
581 | agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12)); | ||||
582 | } | ||||
583 | |||||
584 | return unwritten; | ||||
585 | } | ||||
586 | #endif | ||||
587 | |||||
588 | /** | ||||
589 | * This is the fast pwrite path, where we copy the data directly from the | ||||
590 | * user into the GTT, uncached. | ||||
591 | * @obj: i915 GEM object | ||||
592 | * @args: pwrite arguments structure | ||||
593 | */ | ||||
594 | static int | ||||
595 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, | ||||
596 | const struct drm_i915_gem_pwrite *args) | ||||
597 | { | ||||
598 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
599 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
600 | struct intel_runtime_pm *rpm = &i915->runtime_pm; | ||||
601 | intel_wakeref_t wakeref; | ||||
602 | struct drm_mm_node node; | ||||
603 | struct i915_vma *vma; | ||||
604 | u64 remain, offset; | ||||
605 | void __user *user_data; | ||||
606 | int ret = 0; | ||||
607 | |||||
608 | if (i915_gem_object_has_struct_page(obj)) { | ||||
609 | /* | ||||
610 | * Avoid waking the device up if we can fallback, as | ||||
611 | * waking/resuming is very slow (worst-case 10-100 ms | ||||
612 | * depending on PCI sleeps and our own resume time). | ||||
613 | * This easily dwarfs any performance advantage from | ||||
614 | * using the cache bypass of indirect GGTT access. | ||||
615 | */ | ||||
616 | wakeref = intel_runtime_pm_get_if_in_use(rpm); | ||||
617 | if (!wakeref) | ||||
618 | return -EFAULT14; | ||||
619 | } else { | ||||
620 | /* No backing pages, no fallback, we must force GGTT access */ | ||||
621 | wakeref = intel_runtime_pm_get(rpm); | ||||
622 | } | ||||
623 | |||||
624 | vma = i915_gem_gtt_prepare(obj, &node, true1); | ||||
625 | if (IS_ERR(vma)) { | ||||
626 | ret = PTR_ERR(vma); | ||||
627 | goto out_rpm; | ||||
628 | } | ||||
629 | |||||
630 | i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); | ||||
631 | |||||
632 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||
633 | offset = args->offset; | ||||
634 | remain = args->size; | ||||
635 | while (remain) { | ||||
636 | /* Operation in this page | ||||
637 | * | ||||
638 | * page_base = page offset within aperture | ||||
639 | * page_offset = offset within page | ||||
640 | * page_length = bytes to copy for this page | ||||
641 | */ | ||||
642 | u32 page_base = node.start; | ||||
643 | unsigned int page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1)); | ||||
644 | unsigned int page_length = PAGE_SIZE(1 << 12) - page_offset; | ||||
645 | page_length = remain < page_length ? remain : page_length; | ||||
646 | if (drm_mm_node_allocated(&node)) { | ||||
647 | /* flush the write before we modify the GGTT */ | ||||
648 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); | ||||
649 | ggtt->vm.insert_page(&ggtt->vm, | ||||
650 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12), | ||||
651 | node.start, I915_CACHE_NONE, 0); | ||||
652 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); /* flush modifications to the GGTT (insert_page) */ | ||||
653 | } else { | ||||
654 | page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||
655 | } | ||||
656 | /* If we get a fault while copying data, then (presumably) our | ||||
657 | * source page isn't available. Return the error and we'll | ||||
658 | * retry in the slow path. | ||||
659 | * If the object is non-shmem backed, we retry again with the | ||||
660 | * path that handles page fault. | ||||
661 | */ | ||||
662 | if (ggtt_write(i915, page_base, page_offset, | ||||
663 | user_data, page_length)) { | ||||
664 | ret = -EFAULT14; | ||||
665 | break; | ||||
666 | } | ||||
667 | |||||
668 | remain -= page_length; | ||||
669 | user_data += page_length; | ||||
670 | offset += page_length; | ||||
671 | } | ||||
672 | |||||
673 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); | ||||
674 | i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); | ||||
675 | |||||
676 | i915_gem_gtt_cleanup(obj, &node, vma); | ||||
677 | out_rpm: | ||||
678 | intel_runtime_pm_put(rpm, wakeref); | ||||
679 | return ret; | ||||
680 | } | ||||
681 | |||||
682 | /* Per-page copy function for the shmem pwrite fastpath. | ||||
683 | * Flushes invalid cachelines before writing to the target if | ||||
684 | * needs_clflush_before is set and flushes out any written cachelines after | ||||
685 | * writing if needs_clflush is set. | ||||
686 | */ | ||||
687 | static int | ||||
688 | shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data, | ||||
689 | bool_Bool needs_clflush_before, | ||||
690 | bool_Bool needs_clflush_after) | ||||
691 | { | ||||
692 | char *vaddr; | ||||
693 | int ret; | ||||
694 | |||||
695 | vaddr = kmap(page); | ||||
696 | |||||
697 | if (needs_clflush_before) | ||||
698 | drm_clflush_virt_range(vaddr + offset, len); | ||||
699 | |||||
700 | ret = __copy_from_user(vaddr + offset, user_data, len); | ||||
701 | if (!ret && needs_clflush_after) | ||||
702 | drm_clflush_virt_range(vaddr + offset, len); | ||||
703 | |||||
704 | kunmap_va(vaddr); | ||||
705 | |||||
706 | return ret ? -EFAULT14 : 0; | ||||
707 | } | ||||
708 | |||||
709 | static int | ||||
710 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, | ||||
711 | const struct drm_i915_gem_pwrite *args) | ||||
712 | { | ||||
713 | unsigned int partial_cacheline_write; | ||||
714 | unsigned int needs_clflush; | ||||
715 | unsigned int offset, idx; | ||||
716 | void __user *user_data; | ||||
717 | u64 remain; | ||||
718 | int ret; | ||||
719 | |||||
720 | ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||
721 | if (ret) | ||||
722 | return ret; | ||||
723 | |||||
724 | ret = i915_gem_object_pin_pages(obj); | ||||
725 | if (ret) | ||||
726 | goto err_unlock; | ||||
727 | |||||
728 | ret = i915_gem_object_prepare_write(obj, &needs_clflush); | ||||
729 | if (ret) | ||||
730 | goto err_unpin; | ||||
731 | |||||
732 | i915_gem_object_finish_access(obj); | ||||
733 | i915_gem_object_unlock(obj); | ||||
734 | |||||
735 | /* If we don't overwrite a cacheline completely we need to be | ||||
736 | * careful to have up-to-date data by first clflushing. Don't | ||||
737 | * overcomplicate things and flush the entire patch. | ||||
738 | */ | ||||
739 | partial_cacheline_write = 0; | ||||
740 | if (needs_clflush & CLFLUSH_BEFORE(1UL << (0))) | ||||
741 | partial_cacheline_write = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cflushsz - 1; | ||||
742 | |||||
743 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||
744 | remain = args->size; | ||||
745 | offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1)); | ||||
746 | for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) { | ||||
747 | struct vm_page *page = i915_gem_object_get_page(obj, idx); | ||||
748 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset ); __min_a < __min_b ? __min_a : __min_b; }); | ||||
749 | |||||
750 | ret = shmem_pwrite(page, offset, length, user_data, | ||||
751 | (offset | length) & partial_cacheline_write, | ||||
752 | needs_clflush & CLFLUSH_AFTER(1UL << (1))); | ||||
753 | if (ret) | ||||
754 | break; | ||||
755 | |||||
756 | remain -= length; | ||||
757 | user_data += length; | ||||
758 | offset = 0; | ||||
759 | } | ||||
760 | |||||
761 | i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); | ||||
762 | |||||
763 | i915_gem_object_unpin_pages(obj); | ||||
764 | return ret; | ||||
765 | |||||
766 | err_unpin: | ||||
767 | i915_gem_object_unpin_pages(obj); | ||||
768 | err_unlock: | ||||
769 | i915_gem_object_unlock(obj); | ||||
770 | return ret; | ||||
771 | } | ||||
772 | |||||
773 | /** | ||||
774 | * Writes data to the object referenced by handle. | ||||
775 | * @dev: drm device | ||||
776 | * @data: ioctl data blob | ||||
777 | * @file: drm file | ||||
778 | * | ||||
779 | * On error, the contents of the buffer that were to be modified are undefined. | ||||
780 | */ | ||||
781 | int | ||||
782 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | ||||
783 | struct drm_file *file) | ||||
784 | { | ||||
785 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||
786 | struct drm_i915_gem_pwrite *args = data; | ||||
787 | struct drm_i915_gem_object *obj; | ||||
788 | int ret; | ||||
789 | |||||
790 | /* PWRITE is disallowed for all platforms after TGL-LP. This also | ||||
791 | * covers all platforms with local memory. | ||||
792 | */ | ||||
793 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12 && !IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) | ||||
794 | return -EOPNOTSUPP45; | ||||
795 | |||||
796 | if (args->size == 0) | ||||
797 | return 0; | ||||
798 | |||||
799 | if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), args->size)) | ||||
800 | return -EFAULT14; | ||||
801 | |||||
802 | obj = i915_gem_object_lookup(file, args->handle); | ||||
803 | if (!obj) | ||||
804 | return -ENOENT2; | ||||
805 | |||||
806 | /* Bounds check destination. */ | ||||
807 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset )); typeof((u64)(args->size)) size__ = ((u64)(args->size )); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base .size)); (void)(&start__ == &size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__ ; })) { | ||||
808 | ret = -EINVAL22; | ||||
809 | goto err; | ||||
810 | } | ||||
811 | |||||
812 | /* Writes not allowed into this read-only object */ | ||||
813 | if (i915_gem_object_is_readonly(obj)) { | ||||
814 | ret = -EINVAL22; | ||||
815 | goto err; | ||||
816 | } | ||||
817 | |||||
818 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | ||||
819 | |||||
820 | ret = -ENODEV19; | ||||
821 | if (obj->ops->pwrite) | ||||
822 | ret = obj->ops->pwrite(obj, args); | ||||
823 | if (ret != -ENODEV19) | ||||
824 | goto err; | ||||
825 | |||||
826 | ret = i915_gem_object_wait(obj, | ||||
827 | I915_WAIT_INTERRUPTIBLE(1UL << (0)) | | ||||
828 | I915_WAIT_ALL(1UL << (2)), | ||||
829 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | ||||
830 | if (ret) | ||||
831 | goto err; | ||||
832 | |||||
833 | ret = -EFAULT14; | ||||
834 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | ||||
835 | * it would end up going through the fenced access, and we'll get | ||||
836 | * different detiling behavior between reading and writing. | ||||
837 | * pread/pwrite currently are reading and writing from the CPU | ||||
838 | * perspective, requiring manual detiling by the client. | ||||
839 | */ | ||||
840 | if (!i915_gem_object_has_struct_page(obj) || | ||||
841 | i915_gem_cpu_write_needs_clflush(obj)) | ||||
842 | /* Note that the gtt paths might fail with non-page-backed user | ||||
843 | * pointers (e.g. gtt mappings when moving data between | ||||
844 | * textures). Fallback to the shmem path in that case. | ||||
845 | */ | ||||
846 | ret = i915_gem_gtt_pwrite_fast(obj, args); | ||||
847 | |||||
848 | if (ret == -EFAULT14 || ret == -ENOSPC28) { | ||||
849 | if (i915_gem_object_has_struct_page(obj)) | ||||
850 | ret = i915_gem_shmem_pwrite(obj, args); | ||||
851 | } | ||||
852 | |||||
853 | err: | ||||
854 | i915_gem_object_put(obj); | ||||
855 | return ret; | ||||
856 | } | ||||
857 | |||||
858 | /** | ||||
859 | * Called when user space has done writes to this buffer | ||||
860 | * @dev: drm device | ||||
861 | * @data: ioctl data blob | ||||
862 | * @file: drm file | ||||
863 | */ | ||||
864 | int | ||||
865 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | ||||
866 | struct drm_file *file) | ||||
867 | { | ||||
868 | struct drm_i915_gem_sw_finish *args = data; | ||||
869 | struct drm_i915_gem_object *obj; | ||||
870 | |||||
871 | obj = i915_gem_object_lookup(file, args->handle); | ||||
872 | if (!obj) | ||||
873 | return -ENOENT2; | ||||
874 | |||||
875 | /* | ||||
876 | * Proxy objects are barred from CPU access, so there is no | ||||
877 | * need to ban sw_finish as it is a nop. | ||||
878 | */ | ||||
879 | |||||
880 | /* Pinned buffers may be scanout, so flush the cache */ | ||||
881 | i915_gem_object_flush_if_display(obj); | ||||
882 | i915_gem_object_put(obj); | ||||
883 | |||||
884 | return 0; | ||||
885 | } | ||||
886 | |||||
887 | void i915_gem_runtime_suspend(struct drm_i915_privateinteldrm_softc *i915) | ||||
888 | { | ||||
889 | struct drm_i915_gem_object *obj, *on; | ||||
890 | int i; | ||||
891 | |||||
892 | /* | ||||
893 | * Only called during RPM suspend. All users of the userfault_list | ||||
894 | * must be holding an RPM wakeref to ensure that this can not | ||||
895 | * run concurrently with themselves (and use the struct_mutex for | ||||
896 | * protection between themselves). | ||||
897 | */ | ||||
898 | |||||
899 | list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&to_gt(i915)->ggtt->userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&to_gt(i915)->ggtt->userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||
900 | &to_gt(i915)->ggtt->userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&to_gt(i915)->ggtt->userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&to_gt(i915)->ggtt->userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||
901 | __i915_gem_object_release_mmap_gtt(obj); | ||||
902 | |||||
903 | list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&i915->runtime_pm.lmem_userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&i915->runtime_pm.lmem_userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||
904 | &i915->runtime_pm.lmem_userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&i915->runtime_pm.lmem_userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&i915->runtime_pm.lmem_userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||
905 | i915_gem_object_runtime_pm_release_mmap_offset(obj); | ||||
906 | |||||
907 | /* | ||||
908 | * The fence will be lost when the device powers down. If any were | ||||
909 | * in use by hardware (i.e. they are pinned), we should not be powering | ||||
910 | * down! All other fences will be reacquired by the user upon waking. | ||||
911 | */ | ||||
912 | for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) { | ||||
913 | struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i]; | ||||
914 | |||||
915 | /* | ||||
916 | * Ideally we want to assert that the fence register is not | ||||
917 | * live at this point (i.e. that no piece of code will be | ||||
918 | * trying to write through fence + GTT, as that both violates | ||||
919 | * our tracking of activity and associated locking/barriers, | ||||
920 | * but also is illegal given that the hw is powered down). | ||||
921 | * | ||||
922 | * Previously we used reg->pin_count as a "liveness" indicator. | ||||
923 | * That is not sufficient, and we need a more fine-grained | ||||
924 | * tool if we want to have a sanity check here. | ||||
925 | */ | ||||
926 | |||||
927 | if (!reg->vma) | ||||
928 | continue; | ||||
929 | |||||
930 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma))((void)0); | ||||
931 | reg->dirty = true1; | ||||
932 | } | ||||
933 | } | ||||
934 | |||||
935 | static void discard_ggtt_vma(struct i915_vma *vma) | ||||
936 | { | ||||
937 | struct drm_i915_gem_object *obj = vma->obj; | ||||
938 | |||||
939 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||
940 | if (!RB_EMPTY_NODE(&vma->obj_node)((&vma->obj_node)->__entry.rbe_parent == &vma-> obj_node)) { | ||||
941 | rb_erase(&vma->obj_node, &obj->vma.tree)linux_root_RB_REMOVE((struct linux_root *)(&obj->vma.tree ), (&vma->obj_node)); | ||||
942 | RB_CLEAR_NODE(&vma->obj_node)(((&vma->obj_node))->__entry.rbe_parent = (&vma ->obj_node)); | ||||
943 | } | ||||
944 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||
945 | } | ||||
946 | |||||
947 | struct i915_vma * | ||||
948 | i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, | ||||
949 | struct i915_gem_ww_ctx *ww, | ||||
950 | const struct i915_gtt_view *view, | ||||
951 | u64 size, u64 alignment, u64 flags) | ||||
952 | { | ||||
953 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
954 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||
955 | struct i915_vma *vma; | ||||
956 | int ret; | ||||
957 | |||||
958 | GEM_WARN_ON(!ww)({ __builtin_expect(!!(!!(!ww)), 0); }); | ||||
959 | |||||
960 | if (flags & PIN_MAPPABLE(1ULL << (3)) && | ||||
961 | (!view || view->type == I915_GTT_VIEW_NORMAL)) { | ||||
962 | /* | ||||
963 | * If the required space is larger than the available | ||||
964 | * aperture, we will not able to find a slot for the | ||||
965 | * object and unbinding the object now will be in | ||||
966 | * vain. Worse, doing so may cause us to ping-pong | ||||
967 | * the object in and out of the Global GTT and | ||||
968 | * waste a lot of cycles under the mutex. | ||||
969 | */ | ||||
970 | if (obj->base.size > ggtt->mappable_end) | ||||
971 | return ERR_PTR(-E2BIG7); | ||||
972 | |||||
973 | /* | ||||
974 | * If NONBLOCK is set the caller is optimistically | ||||
975 | * trying to cache the full object within the mappable | ||||
976 | * aperture, and *must* have a fallback in place for | ||||
977 | * situations where we cannot bind the object. We | ||||
978 | * can be a little more lax here and use the fallback | ||||
979 | * more often to avoid costly migrations of ourselves | ||||
980 | * and other objects within the aperture. | ||||
981 | * | ||||
982 | * Half-the-aperture is used as a simple heuristic. | ||||
983 | * More interesting would to do search for a free | ||||
984 | * block prior to making the commitment to unbind. | ||||
985 | * That caters for the self-harm case, and with a | ||||
986 | * little more heuristics (e.g. NOFAULT, NOEVICT) | ||||
987 | * we could try to minimise harm to others. | ||||
988 | */ | ||||
989 | if (flags & PIN_NONBLOCK(1ULL << (2)) && | ||||
990 | obj->base.size > ggtt->mappable_end / 2) | ||||
991 | return ERR_PTR(-ENOSPC28); | ||||
992 | } | ||||
993 | |||||
994 | new_vma: | ||||
995 | vma = i915_vma_instance(obj, &ggtt->vm, view); | ||||
996 | if (IS_ERR(vma)) | ||||
997 | return vma; | ||||
998 | |||||
999 | if (i915_vma_misplaced(vma, size, alignment, flags)) { | ||||
1000 | if (flags & PIN_NONBLOCK(1ULL << (2))) { | ||||
1001 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) | ||||
1002 | return ERR_PTR(-ENOSPC28); | ||||
1003 | |||||
1004 | /* | ||||
1005 | * If this misplaced vma is too big (i.e, at-least | ||||
1006 | * half the size of aperture) or hasn't been pinned | ||||
1007 | * mappable before, we ignore the misplacement when | ||||
1008 | * PIN_NONBLOCK is set in order to avoid the ping-pong | ||||
1009 | * issue described above. In other words, we try to | ||||
1010 | * avoid the costly operation of unbinding this vma | ||||
1011 | * from the GGTT and rebinding it back because there | ||||
1012 | * may not be enough space for this vma in the aperture. | ||||
1013 | */ | ||||
1014 | if (flags & PIN_MAPPABLE(1ULL << (3)) && | ||||
1015 | (vma->fence_size > ggtt->mappable_end / 2 || | ||||
1016 | !i915_vma_is_map_and_fenceable(vma))) | ||||
1017 | return ERR_PTR(-ENOSPC28); | ||||
1018 | } | ||||
1019 | |||||
1020 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) { | ||||
1021 | discard_ggtt_vma(vma); | ||||
1022 | goto new_vma; | ||||
1023 | } | ||||
1024 | |||||
1025 | ret = i915_vma_unbind(vma); | ||||
1026 | if (ret) | ||||
1027 | return ERR_PTR(ret); | ||||
1028 | } | ||||
1029 | |||||
1030 | ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL(1ULL << (10))); | ||||
1031 | |||||
1032 | if (ret) | ||||
1033 | return ERR_PTR(ret); | ||||
1034 | |||||
1035 | if (vma->fence && !i915_gem_object_is_tiled(obj)) { | ||||
1036 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); | ||||
1037 | i915_vma_revoke_fence(vma); | ||||
1038 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||
1039 | } | ||||
1040 | |||||
1041 | ret = i915_vma_wait_for_bind(vma); | ||||
1042 | if (ret) { | ||||
1043 | i915_vma_unpin(vma); | ||||
1044 | return ERR_PTR(ret); | ||||
1045 | } | ||||
1046 | |||||
1047 | return vma; | ||||
1048 | } | ||||
1049 | |||||
1050 | struct i915_vma * __must_check | ||||
1051 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | ||||
1052 | const struct i915_gtt_view *view, | ||||
1053 | u64 size, u64 alignment, u64 flags) | ||||
1054 | { | ||||
1055 | struct i915_gem_ww_ctx ww; | ||||
1056 | struct i915_vma *ret; | ||||
1057 | int err; | ||||
1058 | |||||
1059 | for_i915_gem_ww(&ww, err, true)for (i915_gem_ww_ctx_init(&ww, 1), (err) = -11; (err) == - 11; (err) = __i915_gem_ww_fini(&ww, err)) { | ||||
1060 | err = i915_gem_object_lock(obj, &ww); | ||||
1061 | if (err) | ||||
1062 | continue; | ||||
1063 | |||||
1064 | ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size, | ||||
1065 | alignment, flags); | ||||
1066 | if (IS_ERR(ret)) | ||||
1067 | err = PTR_ERR(ret); | ||||
1068 | } | ||||
1069 | |||||
1070 | return err ? ERR_PTR(err) : ret; | ||||
1071 | } | ||||
1072 | |||||
1073 | int | ||||
1074 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||||
1075 | struct drm_file *file_priv) | ||||
1076 | { | ||||
1077 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||
1078 | struct drm_i915_gem_madvise *args = data; | ||||
1079 | struct drm_i915_gem_object *obj; | ||||
1080 | int err; | ||||
1081 | |||||
1082 | switch (args->madv) { | ||||
1083 | case I915_MADV_DONTNEED1: | ||||
1084 | case I915_MADV_WILLNEED0: | ||||
1085 | break; | ||||
1086 | default: | ||||
1087 | return -EINVAL22; | ||||
1088 | } | ||||
1089 | |||||
1090 | obj = i915_gem_object_lookup(file_priv, args->handle); | ||||
1091 | if (!obj) | ||||
1092 | return -ENOENT2; | ||||
1093 | |||||
1094 | err = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||
1095 | if (err) | ||||
1096 | goto out; | ||||
1097 | |||||
1098 | if (i915_gem_object_has_pages(obj) && | ||||
1099 | i915_gem_object_is_tiled(obj) && | ||||
1100 | i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES(1UL << (0))) { | ||||
1101 | if (obj->mm.madv == I915_MADV_WILLNEED0) { | ||||
1102 | GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj))((void)0); | ||||
1103 | i915_gem_object_clear_tiling_quirk(obj); | ||||
1104 | i915_gem_object_make_shrinkable(obj); | ||||
1105 | } | ||||
1106 | if (args->madv == I915_MADV_WILLNEED0) { | ||||
1107 | GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj))((void)0); | ||||
1108 | i915_gem_object_make_unshrinkable(obj); | ||||
1109 | i915_gem_object_set_tiling_quirk(obj); | ||||
1110 | } | ||||
1111 | } | ||||
1112 | |||||
1113 | if (obj->mm.madv != __I915_MADV_PURGED2) { | ||||
1114 | obj->mm.madv = args->madv; | ||||
1115 | if (obj->ops->adjust_lru) | ||||
1116 | obj->ops->adjust_lru(obj); | ||||
1117 | } | ||||
1118 | |||||
1119 | if (i915_gem_object_has_pages(obj) || | ||||
1120 | i915_gem_object_has_self_managed_shrink_list(obj)) { | ||||
1121 | unsigned long flags; | ||||
1122 | |||||
1123 | spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while (0); | ||||
1124 | if (!list_empty(&obj->mm.link)) { | ||||
1125 | struct list_head *list; | ||||
1126 | |||||
1127 | if (obj->mm.madv != I915_MADV_WILLNEED0) | ||||
1128 | list = &i915->mm.purge_list; | ||||
1129 | else | ||||
1130 | list = &i915->mm.shrink_list; | ||||
1131 | list_move_tail(&obj->mm.link, list); | ||||
1132 | |||||
1133 | } | ||||
1134 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while (0); | ||||
1135 | } | ||||
1136 | |||||
1137 | /* if the object is no longer attached, discard its backing storage */ | ||||
1138 | if (obj->mm.madv == I915_MADV_DONTNEED1 && | ||||
1139 | !i915_gem_object_has_pages(obj)) | ||||
1140 | i915_gem_object_truncate(obj); | ||||
1141 | |||||
1142 | args->retained = obj->mm.madv != __I915_MADV_PURGED2; | ||||
1143 | |||||
1144 | i915_gem_object_unlock(obj); | ||||
1145 | out: | ||||
1146 | i915_gem_object_put(obj); | ||||
1147 | return err; | ||||
1148 | } | ||||
1149 | |||||
1150 | /* | ||||
1151 | * A single pass should suffice to release all the freed objects (along most | ||||
1152 | * call paths), but be a little more paranoid in that freeing the objects does | ||||
1153 | * take a little amount of time, during which the rcu callbacks could have added | ||||
1154 | * new objects into the freed list, and armed the work again. | ||||
1155 | */ | ||||
1156 | void i915_gem_drain_freed_objects(struct drm_i915_privateinteldrm_softc *i915) | ||||
1157 | { | ||||
1158 | while (atomic_read(&i915->mm.free_count)({ typeof(*(&i915->mm.free_count)) __tmp = *(volatile typeof (*(&i915->mm.free_count)) *)&(*(&i915->mm.free_count )); membar_datadep_consumer(); __tmp; })) { | ||||
1159 | flush_work(&i915->mm.free_work); | ||||
1160 | flush_delayed_work(&i915->bdev.wq); | ||||
1161 | rcu_barrier()__asm volatile("" : : : "memory"); | ||||
1162 | } | ||||
1163 | } | ||||
1164 | |||||
1165 | /* | ||||
1166 | * Similar to objects above (see i915_gem_drain_freed-objects), in general we | ||||
1167 | * have workers that are armed by RCU and then rearm themselves in their | ||||
1168 | * callbacks. To be paranoid, we need to drain the workqueue a second time after | ||||
1169 | * waiting for the RCU grace period so that we catch work queued via RCU from | ||||
1170 | * the first pass. As neither drain_workqueue() nor flush_workqueue() report a | ||||
1171 | * result, we make an assumption that we only don't require more than 3 passes | ||||
1172 | * to catch all _recursive_ RCU delayed work. | ||||
1173 | */ | ||||
1174 | void i915_gem_drain_workqueue(struct drm_i915_privateinteldrm_softc *i915) | ||||
1175 | { | ||||
1176 | int i; | ||||
1177 | |||||
1178 | for (i = 0; i < 3; i++) { | ||||
1179 | flush_workqueue(i915->wq); | ||||
1180 | rcu_barrier()__asm volatile("" : : : "memory"); | ||||
1181 | i915_gem_drain_freed_objects(i915); | ||||
1182 | } | ||||
1183 | |||||
1184 | drain_workqueue(i915->wq); | ||||
1185 | } | ||||
1186 | |||||
1187 | int i915_gem_init(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||
1188 | { | ||||
1189 | struct intel_gt *gt; | ||||
1190 | unsigned int i; | ||||
1191 | int ret; | ||||
1192 | |||||
1193 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ | ||||
1194 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) | ||||
1195 | RUNTIME_INFO(dev_priv)(&(dev_priv)->__runtime)->page_sizes = I915_GTT_PAGE_SIZE_4K(1ULL << (12)); | ||||
1196 | |||||
1197 | ret = i915_gem_init_userptr(dev_priv); | ||||
1198 | if (ret) | ||||
1199 | return ret; | ||||
1200 | |||||
1201 | intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc); | ||||
1202 | intel_wopcm_init(&dev_priv->wopcm); | ||||
1203 | |||||
1204 | ret = i915_init_ggtt(dev_priv); | ||||
1205 | if (ret) { | ||||
1206 | GEM_BUG_ON(ret == -EIO)((void)0); | ||||
1207 | goto err_unlock; | ||||
1208 | } | ||||
1209 | |||||
1210 | /* | ||||
1211 | * Despite its name intel_init_clock_gating applies both display | ||||
1212 | * clock gating workarounds; GT mmio workarounds and the occasional | ||||
1213 | * GT power context workaround. Worse, sometimes it includes a context | ||||
1214 | * register workaround which we need to apply before we record the | ||||
1215 | * default HW state for all contexts. | ||||
1216 | * | ||||
1217 | * FIXME: break up the workarounds and apply them at the right time! | ||||
1218 | */ | ||||
1219 | intel_init_clock_gating(dev_priv); | ||||
1220 | |||||
1221 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||
1222 | ret = intel_gt_init(gt); | ||||
1223 | if (ret) | ||||
1224 | goto err_unlock; | ||||
1225 | } | ||||
1226 | |||||
1227 | return 0; | ||||
1228 | |||||
1229 | /* | ||||
1230 | * Unwinding is complicated by that we want to handle -EIO to mean | ||||
1231 | * disable GPU submission but keep KMS alive. We want to mark the | ||||
1232 | * HW as irrevisibly wedged, but keep enough state around that the | ||||
1233 | * driver doesn't explode during runtime. | ||||
1234 | */ | ||||
1235 | err_unlock: | ||||
1236 | i915_gem_drain_workqueue(dev_priv); | ||||
1237 | |||||
1238 | if (ret != -EIO5) { | ||||
1239 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||
1240 | intel_gt_driver_remove(gt); | ||||
1241 | intel_gt_driver_release(gt); | ||||
1242 | intel_uc_cleanup_firmwares(>->uc); | ||||
1243 | } | ||||
1244 | } | ||||
1245 | |||||
1246 | if (ret == -EIO5) { | ||||
1247 | /* | ||||
1248 | * Allow engines or uC initialisation to fail by marking the GPU | ||||
1249 | * as wedged. But we only want to do this when the GPU is angry, | ||||
1250 | * for all other failure, such as an allocation failure, bail. | ||||
1251 | */ | ||||
1252 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||
1253 | if (!intel_gt_is_wedged(gt)) { | ||||
1254 | i915_probe_error(dev_priv,__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n" ) | ||||
1255 | "Failed to initialize GPU, declaring it wedged!\n")__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n" ); | ||||
1256 | intel_gt_set_wedged(gt); | ||||
1257 | } | ||||
1258 | } | ||||
1259 | |||||
1260 | /* Minimal basic recovery for KMS */ | ||||
1261 | ret = i915_ggtt_enable_hw(dev_priv); | ||||
1262 | i915_ggtt_resume(to_gt(dev_priv)->ggtt); | ||||
1263 | intel_init_clock_gating(dev_priv); | ||||
1264 | } | ||||
1265 | |||||
1266 | i915_gem_drain_freed_objects(dev_priv); | ||||
1267 | |||||
1268 | return ret; | ||||
1269 | } | ||||
1270 | |||||
1271 | void i915_gem_driver_register(struct drm_i915_privateinteldrm_softc *i915) | ||||
1272 | { | ||||
1273 | i915_gem_driver_register__shrinker(i915); | ||||
1274 | |||||
1275 | intel_engines_driver_register(i915); | ||||
1276 | } | ||||
1277 | |||||
1278 | void i915_gem_driver_unregister(struct drm_i915_privateinteldrm_softc *i915) | ||||
1279 | { | ||||
1280 | i915_gem_driver_unregister__shrinker(i915); | ||||
1281 | } | ||||
1282 | |||||
1283 | void i915_gem_driver_remove(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||
1284 | { | ||||
1285 | struct intel_gt *gt; | ||||
1286 | unsigned int i; | ||||
1287 | |||||
1288 | i915_gem_suspend_late(dev_priv); | ||||
1289 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else | ||||
1290 | intel_gt_driver_remove(gt); | ||||
1291 | dev_priv->uabi_engines = RB_ROOT(struct rb_root) { ((void *)0) }; | ||||
1292 | |||||
1293 | /* Flush any outstanding unpin_work. */ | ||||
1294 | i915_gem_drain_workqueue(dev_priv); | ||||
1295 | |||||
1296 | i915_gem_drain_freed_objects(dev_priv); | ||||
1297 | } | ||||
1298 | |||||
1299 | void i915_gem_driver_release(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||
1300 | { | ||||
1301 | struct intel_gt *gt; | ||||
1302 | unsigned int i; | ||||
1303 | |||||
1304 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||
1305 | intel_gt_driver_release(gt); | ||||
1306 | intel_uc_cleanup_firmwares(>->uc); | ||||
1307 | } | ||||
1308 | |||||
1309 | /* Flush any outstanding work, including i915_gem_context.release_work. */ | ||||
1310 | i915_gem_drain_workqueue(dev_priv); | ||||
1311 | |||||
1312 | drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list))({ int __ret = !!((!list_empty(&dev_priv->gem.contexts .list))); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!list_empty(&dev_priv->gem.contexts.list)" ")"); __builtin_expect(!!(__ret), 0); }); | ||||
1313 | } | ||||
1314 | |||||
1315 | static void i915_gem_init__mm(struct drm_i915_privateinteldrm_softc *i915) | ||||
1316 | { | ||||
1317 | mtx_init(&i915->mm.obj_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&i915-> mm.obj_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | ||||
1318 | |||||
1319 | init_llist_head(&i915->mm.free_list); | ||||
1320 | |||||
1321 | INIT_LIST_HEAD(&i915->mm.purge_list); | ||||
1322 | INIT_LIST_HEAD(&i915->mm.shrink_list); | ||||
1323 | |||||
1324 | i915_gem_init__objects(i915); | ||||
1325 | } | ||||
1326 | |||||
1327 | void i915_gem_init_early(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||
1328 | { | ||||
1329 | i915_gem_init__mm(dev_priv); | ||||
1330 | i915_gem_init__contexts(dev_priv); | ||||
1331 | |||||
1332 | mtx_init(&dev_priv->display.fb_tracking.lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev_priv ->display.fb_tracking.lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); | ||||
1333 | } | ||||
1334 | |||||
1335 | void i915_gem_cleanup_early(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||
1336 | { | ||||
1337 | i915_gem_drain_freed_objects(dev_priv); | ||||
1338 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list))((void)0); | ||||
1339 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count))((void)0); | ||||
1340 | drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count)({ int __ret = !!((dev_priv->mm.shrink_count)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "dev_priv->mm.shrink_count" ")"); __builtin_expect(!!(__ret), 0); }); | ||||
1341 | } | ||||
1342 | |||||
1343 | int i915_gem_open(struct drm_i915_privateinteldrm_softc *i915, struct drm_file *file) | ||||
1344 | { | ||||
1345 | struct drm_i915_file_private *file_priv; | ||||
1346 | struct i915_drm_client *client; | ||||
1347 | int ret = -ENOMEM12; | ||||
1348 | |||||
1349 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); | ||||
1350 | |||||
1351 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL(0x0001 | 0x0004)); | ||||
1352 | if (!file_priv) | ||||
1353 | goto err_alloc; | ||||
1354 | |||||
1355 | client = i915_drm_client_add(&i915->clients); | ||||
1356 | if (IS_ERR(client)) { | ||||
1357 | ret = PTR_ERR(client); | ||||
1358 | goto err_client; | ||||
1359 | } | ||||
1360 | |||||
1361 | file->driver_priv = file_priv; | ||||
1362 | file_priv->dev_priv = i915; | ||||
1363 | file_priv->file = file; | ||||
1364 | file_priv->client = client; | ||||
1365 | |||||
1366 | file_priv->bsd_engine = -1; | ||||
1367 | file_priv->hang_timestamp = jiffies; | ||||
1368 | |||||
1369 | ret = i915_gem_context_open(i915, file); | ||||
1370 | if (ret) | ||||
1371 | goto err_context; | ||||
1372 | |||||
1373 | return 0; | ||||
1374 | |||||
1375 | err_context: | ||||
1376 | i915_drm_client_put(client); | ||||
1377 | err_client: | ||||
1378 | kfree(file_priv); | ||||
1379 | err_alloc: | ||||
1380 | return ret; | ||||
1381 | } | ||||
1382 | |||||
1383 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 | ||||
1384 | #include "selftests/mock_gem_device.c" | ||||
1385 | #include "selftests/i915_gem.c" | ||||
1386 | #endif |
1 | /* $OpenBSD: list.h,v 1.7 2023/01/18 23:47:25 jsg Exp $ */ | |||
2 | /* drm_linux_list.h -- linux list functions for the BSDs. | |||
3 | * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org | |||
4 | */ | |||
5 | /*- | |||
6 | * Copyright 2003 Eric Anholt | |||
7 | * All Rights Reserved. | |||
8 | * | |||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
10 | * copy of this software and associated documentation files (the "Software"), | |||
11 | * to deal in the Software without restriction, including without limitation | |||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
13 | * and/or sell copies of the Software, and to permit persons to whom the | |||
14 | * Software is furnished to do so, subject to the following conditions: | |||
15 | * | |||
16 | * The above copyright notice and this permission notice (including the next | |||
17 | * paragraph) shall be included in all copies or substantial portions of the | |||
18 | * Software. | |||
19 | * | |||
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
26 | * OTHER DEALINGS IN THE SOFTWARE. | |||
27 | * | |||
28 | * Authors: | |||
29 | * Eric Anholt <anholt@FreeBSD.org> | |||
30 | * | |||
31 | */ | |||
32 | ||||
33 | #ifndef _DRM_LINUX_LIST_H_ | |||
34 | #define _DRM_LINUX_LIST_H_ | |||
35 | ||||
36 | #include <sys/param.h> | |||
37 | #include <linux/container_of.h> | |||
38 | #include <linux/types.h> | |||
39 | #include <linux/poison.h> | |||
40 | ||||
41 | #define list_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) | |||
42 | ||||
43 | static inline void | |||
44 | INIT_LIST_HEAD(struct list_head *head) { | |||
45 | (head)->next = head; | |||
46 | (head)->prev = head; | |||
47 | } | |||
48 | ||||
49 | #define LIST_HEAD_INIT(name){ &(name), &(name) } { &(name), &(name) } | |||
50 | ||||
51 | #define DRM_LIST_HEAD(name)struct list_head name = { &(name), &(name) } \ | |||
52 | struct list_head name = LIST_HEAD_INIT(name){ &(name), &(name) } | |||
53 | ||||
54 | static inline int | |||
55 | list_empty(const struct list_head *head) { | |||
56 | return (head)->next == head; | |||
57 | } | |||
58 | ||||
59 | static inline int | |||
60 | list_is_singular(const struct list_head *head) { | |||
61 | return !list_empty(head) && ((head)->next == (head)->prev); | |||
62 | } | |||
63 | ||||
64 | static inline int | |||
65 | list_is_first(const struct list_head *list, | |||
66 | const struct list_head *head) | |||
67 | { | |||
68 | return list->prev == head; | |||
69 | } | |||
70 | ||||
71 | static inline int | |||
72 | list_is_last(const struct list_head *list, | |||
73 | const struct list_head *head) | |||
74 | { | |||
75 | return list->next == head; | |||
76 | } | |||
77 | ||||
78 | static inline void | |||
79 | __list_add(struct list_head *new, struct list_head *prev, | |||
80 | struct list_head *next) | |||
81 | { | |||
82 | next->prev = new; | |||
83 | new->next = next; | |||
84 | new->prev = prev; | |||
85 | prev->next = new; | |||
86 | } | |||
87 | ||||
88 | static inline void | |||
89 | list_add(struct list_head *new, struct list_head *head) { | |||
90 | (head)->next->prev = new; | |||
91 | (new)->next = (head)->next; | |||
92 | (new)->prev = head; | |||
93 | (head)->next = new; | |||
94 | } | |||
95 | ||||
96 | static inline void | |||
97 | list_add_tail(struct list_head *entry, struct list_head *head) { | |||
98 | (entry)->prev = (head)->prev; | |||
99 | (entry)->next = head; | |||
100 | (head)->prev->next = entry; | |||
101 | (head)->prev = entry; | |||
102 | } | |||
103 | ||||
104 | static inline void | |||
105 | list_del(struct list_head *entry) { | |||
106 | (entry)->next->prev = (entry)->prev; | |||
107 | (entry)->prev->next = (entry)->next; | |||
108 | } | |||
109 | ||||
110 | #define __list_del_entry(x)list_del(x) list_del(x) | |||
111 | ||||
112 | static inline void list_replace(struct list_head *old, | |||
113 | struct list_head *new) | |||
114 | { | |||
115 | new->next = old->next; | |||
116 | new->next->prev = new; | |||
117 | new->prev = old->prev; | |||
118 | new->prev->next = new; | |||
119 | } | |||
120 | ||||
121 | static inline void list_replace_init(struct list_head *old, | |||
122 | struct list_head *new) | |||
123 | { | |||
124 | list_replace(old, new); | |||
125 | INIT_LIST_HEAD(old); | |||
126 | } | |||
127 | ||||
128 | static inline void list_move(struct list_head *list, struct list_head *head) | |||
129 | { | |||
130 | list_del(list); | |||
131 | list_add(list, head); | |||
132 | } | |||
133 | ||||
134 | static inline void list_move_tail(struct list_head *list, | |||
135 | struct list_head *head) | |||
136 | { | |||
137 | list_del(list); | |||
138 | list_add_tail(list, head); | |||
139 | } | |||
140 | ||||
141 | static inline void | |||
142 | list_rotate_to_front(struct list_head *list, struct list_head *head) | |||
143 | { | |||
144 | list_del(head); | |||
145 | list_add_tail(head, list); | |||
146 | } | |||
147 | ||||
148 | static inline void | |||
149 | list_bulk_move_tail(struct list_head *head, struct list_head *first, | |||
150 | struct list_head *last) | |||
151 | { | |||
152 | first->prev->next = last->next; | |||
153 | last->next->prev = first->prev; | |||
154 | head->prev->next = first; | |||
155 | first->prev = head->prev; | |||
156 | last->next = head; | |||
157 | head->prev = last; | |||
158 | } | |||
159 | ||||
160 | static inline void | |||
161 | list_del_init(struct list_head *entry) { | |||
162 | (entry)->next->prev = (entry)->prev; | |||
163 | (entry)->prev->next = (entry)->next; | |||
164 | INIT_LIST_HEAD(entry); | |||
165 | } | |||
166 | ||||
167 | #define list_next_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) \ | |||
168 | list_entry(((pos)->member.next), typeof(*(pos)), member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) | |||
169 | ||||
170 | #define list_prev_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.prev)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) \ | |||
171 | list_entry(((pos)->member.prev), typeof(*(pos)), member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.prev)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) | |||
172 | ||||
173 | #define list_safe_reset_next(pos, n, member)n = ({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) \ | |||
174 | n = list_next_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr = (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(pos)), member) );}) | |||
175 | ||||
176 | #define list_for_each(entry, head)for (entry = (head)->next; entry != head; entry = (entry)-> next) \ | |||
177 | for (entry = (head)->next; entry != head; entry = (entry)->next) | |||
178 | ||||
179 | #define list_for_each_prev(entry, head)for (entry = (head)->prev; entry != (head); entry = entry-> prev) \ | |||
180 | for (entry = (head)->prev; entry != (head); \ | |||
181 | entry = entry->prev) | |||
182 | ||||
183 | #define list_for_each_safe(entry, temp, head)for (entry = (head)->next, temp = (entry)->next; entry != head; entry = temp, temp = entry->next) \ | |||
184 | for (entry = (head)->next, temp = (entry)->next; \ | |||
185 | entry != head; \ | |||
186 | entry = temp, temp = entry->next) | |||
187 | ||||
188 | #define list_for_each_entry_safe_reverse(pos, n, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}), n = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((pos )->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); &(pos)->member != (head ); pos = n, n = ({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr = (n->member.prev); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof(__typeof(*n), member) );})) \ | |||
189 | for (pos = list_entry((head)->prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}), \ | |||
190 | n = list_entry((pos)->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((pos)->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); \ | |||
191 | &(pos)->member != (head); \ | |||
192 | pos = n, n = list_entry(n->member.prev, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr = (n->member.prev); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof (__typeof(*n), member) );})) | |||
193 | ||||
194 | #define list_for_each_entry_safe_from(pos, n, head, member)for (n = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char * )__mptr - __builtin_offsetof(__typeof(*pos), member) );}); & pos->member != (head); pos = n, n = ({ const __typeof( ((__typeof (*n) *)0)->member ) *__mptr = (n->member.next); (__typeof (*n) *)( (char *)__mptr - __builtin_offsetof(__typeof(*n), member ) );})) \ | |||
195 | for (n = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); \ | |||
196 | &pos->member != (head); \ | |||
197 | pos = n, n = list_entry(n->member.next, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr = (n->member.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof (__typeof(*n), member) );})) | |||
198 | ||||
199 | #define list_for_each_entry(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); &pos-> member != (head); pos = ({ const __typeof( ((__typeof(*pos) * )0)->member ) *__mptr = (pos->member.next); (__typeof(* pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );})) \ | |||
200 | for (pos = list_entry((head)->next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); \ | |||
201 | &pos->member != (head); \ | |||
202 | pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
203 | ||||
204 | #define list_for_each_entry_from(pos, head, member)for (; &pos->member != (head); pos = ({ const __typeof ( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member .next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );})) \ | |||
205 | for (; \ | |||
206 | &pos->member != (head); \ | |||
207 | pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
208 | ||||
209 | #define list_for_each_entry_reverse(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); &pos-> member != (head); pos = ({ const __typeof( ((__typeof(*pos) * )0)->member ) *__mptr = (pos->member.prev); (__typeof(* pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );})) \ | |||
210 | for (pos = list_entry((head)->prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); \ | |||
211 | &pos->member != (head); \ | |||
212 | pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
213 | ||||
214 | #define list_for_each_entry_from_reverse(pos, head, member)for (; &pos->member != (head); pos = ({ const __typeof ( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member .prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );})) \ | |||
215 | for (; \ | |||
216 | &pos->member != (head); \ | |||
217 | pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
218 | ||||
219 | #define list_for_each_entry_continue(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); & pos->member != (head); pos = ({ const __typeof( ((__typeof (*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof (*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos) , member) );})) \ | |||
220 | for (pos = list_entry((pos)->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); \ | |||
221 | &pos->member != (head); \ | |||
222 | pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
223 | ||||
224 | #define list_for_each_entry_continue_reverse(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); & pos->member != (head); pos = ({ const __typeof( ((__typeof (*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof (*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos) , member) );})) \ | |||
225 | for (pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); \ | |||
226 | &pos->member != (head); \ | |||
227 | pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );})) | |||
228 | ||||
229 | /** | |||
230 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry | |||
231 | * @pos: the type * to use as a loop cursor. | |||
232 | * @n: another type * to use as temporary storage | |||
233 | * @head: the head for your list. | |||
234 | * @member: the name of the list_struct within the struct. | |||
235 | */ | |||
236 | #define list_for_each_entry_safe(pos, n, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}), n = ({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos-> member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}); &pos->member != (head); pos = n, n = ({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr = (n->member.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof(__typeof(*n), member) );})) \ | |||
237 | for (pos = list_entry((head)->next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}), \ | |||
238 | n = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); \ | |||
239 | &pos->member != (head); \ | |||
240 | pos = n, n = list_entry(n->member.next, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr = (n->member.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof (__typeof(*n), member) );})) | |||
241 | ||||
242 | #define list_first_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)-> next); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) \ | |||
243 | list_entry((ptr)->next, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)-> next); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) | |||
244 | ||||
245 | #define list_first_entry_or_null(ptr, type, member)(list_empty(ptr) ? ((void *)0) : ({ const __typeof( ((type *) 0)->member ) *__mptr = ((ptr)->next); (type *)( (char * )__mptr - __builtin_offsetof(type, member) );})) \ | |||
246 | (list_empty(ptr) ? NULL((void *)0) : list_first_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)-> next); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );})) | |||
247 | ||||
248 | #define list_last_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)-> prev); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) \ | |||
249 | list_entry((ptr)->prev, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)-> prev); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) | |||
250 | ||||
251 | static inline void | |||
252 | __list_splice(const struct list_head *list, struct list_head *prev, | |||
253 | struct list_head *next) | |||
254 | { | |||
255 | struct list_head *first = list->next; | |||
256 | struct list_head *last = list->prev; | |||
257 | ||||
258 | first->prev = prev; | |||
259 | prev->next = first; | |||
260 | ||||
261 | last->next = next; | |||
262 | next->prev = last; | |||
| ||||
263 | } | |||
264 | ||||
265 | static inline void | |||
266 | list_splice(const struct list_head *list, struct list_head *head) | |||
267 | { | |||
268 | if (list_empty(list)) | |||
269 | return; | |||
270 | ||||
271 | __list_splice(list, head, head->next); | |||
272 | } | |||
273 | ||||
274 | static inline void | |||
275 | list_splice_init(struct list_head *list, struct list_head *head) | |||
276 | { | |||
277 | if (list_empty(list)) | |||
278 | return; | |||
279 | ||||
280 | __list_splice(list, head, head->next); | |||
281 | INIT_LIST_HEAD(list); | |||
282 | } | |||
283 | ||||
284 | static inline void | |||
285 | list_splice_tail(const struct list_head *list, struct list_head *head) | |||
286 | { | |||
287 | if (list_empty(list)) | |||
288 | return; | |||
289 | ||||
290 | __list_splice(list, head->prev, head); | |||
291 | } | |||
292 | ||||
293 | static inline void | |||
294 | list_splice_tail_init(struct list_head *list, struct list_head *head) | |||
295 | { | |||
296 | if (list_empty(list)) | |||
297 | return; | |||
298 | ||||
299 | __list_splice(list, head->prev, head); | |||
300 | INIT_LIST_HEAD(list); | |||
301 | } | |||
302 | ||||
303 | void list_sort(void *, struct list_head *, | |||
304 | int (*)(void *, const struct list_head *, const struct list_head *)); | |||
305 | ||||
306 | #define hlist_entry(ptr, type, member)((ptr) ? ({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (type *)( (char *)__mptr - __builtin_offsetof(type, member ) );}) : ((void *)0)) \ | |||
307 | ((ptr) ? container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); ( type *)( (char *)__mptr - __builtin_offsetof(type, member) ); }) : NULL((void *)0)) | |||
308 | ||||
309 | static inline void | |||
310 | INIT_HLIST_HEAD(struct hlist_head *head) { | |||
311 | head->first = NULL((void *)0); | |||
312 | } | |||
313 | ||||
314 | static inline int | |||
315 | hlist_empty(const struct hlist_head *head) { | |||
316 | return head->first == NULL((void *)0); | |||
317 | } | |||
318 | ||||
319 | static inline void | |||
320 | hlist_add_head(struct hlist_node *new, struct hlist_head *head) | |||
321 | { | |||
322 | if ((new->next = head->first) != NULL((void *)0)) | |||
323 | head->first->prev = &new->next; | |||
324 | head->first = new; | |||
325 | new->prev = &head->first; | |||
326 | } | |||
327 | ||||
328 | static inline void | |||
329 | hlist_del_init(struct hlist_node *node) | |||
330 | { | |||
331 | if (node->next != NULL((void *)0)) | |||
332 | node->next->prev = node->prev; | |||
333 | *(node->prev) = node->next; | |||
334 | node->next = NULL((void *)0); | |||
335 | node->prev = NULL((void *)0); | |||
336 | } | |||
337 | ||||
338 | #define hlist_for_each(pos, head)for (pos = (head)->first; pos != ((void *)0); pos = pos-> next) \ | |||
339 | for (pos = (head)->first; pos != NULL((void *)0); pos = pos->next) | |||
340 | ||||
341 | #define hlist_for_each_entry(pos, head, member)for (pos = (((head)->first) ? ({ const __typeof( ((__typeof (*pos) *)0)->member ) *__mptr = ((head)->first); (__typeof (*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos) , member) );}) : ((void *)0)); pos != ((void *)0); pos = (((pos )->member.next) ? ({ const __typeof( ((__typeof(*pos) *)0) ->member ) *__mptr = ((pos)->member.next); (__typeof(*pos ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );}) : ((void *)0))) \ | |||
342 | for (pos = hlist_entry((head)->first, __typeof(*pos), member)(((head)->first) ? ({ const __typeof( ((__typeof(*pos) *)0 )->member ) *__mptr = ((head)->first); (__typeof(*pos) * )( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );}) : ((void *)0)); \ | |||
343 | pos != NULL((void *)0); \ | |||
344 | pos = hlist_entry((pos)->member.next, __typeof(*pos), member)(((pos)->member.next) ? ({ const __typeof( ((__typeof(*pos ) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof (*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos) , member) );}) : ((void *)0))) | |||
345 | ||||
346 | #define hlist_for_each_entry_safe(pos, n, head, member)for (pos = (((head)->first) ? ({ const __typeof( ((__typeof (*pos) *)0)->member ) *__mptr = ((head)->first); (__typeof (*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos) , member) );}) : ((void *)0)); pos != ((void *)0) && ( n = pos->member.next, 1); pos = ((n) ? ({ const __typeof( ( (__typeof(*pos) *)0)->member ) *__mptr = (n); (__typeof(*pos ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );}) : ((void *)0))) \ | |||
347 | for (pos = hlist_entry((head)->first, __typeof(*pos), member)(((head)->first) ? ({ const __typeof( ((__typeof(*pos) *)0 )->member ) *__mptr = ((head)->first); (__typeof(*pos) * )( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member ) );}) : ((void *)0)); \ | |||
348 | pos != NULL((void *)0) && (n = pos->member.next, 1); \ | |||
349 | pos = hlist_entry(n, __typeof(*pos), member)((n) ? ({ const __typeof( ((__typeof(*pos) *)0)->member ) * __mptr = (n); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof (__typeof(*pos), member) );}) : ((void *)0))) | |||
350 | ||||
351 | #endif /* _DRM_LINUX_LIST_H_ */ |