Bug Summary

File:dev/pci/drm/include/linux/list.h
Warning:line 252, column 13
Access to field 'prev' results in a dereference of a null pointer (loaded from variable 'next')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name i915_gem.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_gem.c

/usr/src/sys/dev/pci/drm/i915/i915_gem.c

1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <drm/drm_vma_manager.h>
29#include <linux/dma-fence-array.h>
30#include <linux/kthread.h>
31#include <linux/dma-resv.h>
32#include <linux/shmem_fs.h>
33#include <linux/slab.h>
34#include <linux/stop_machine.h>
35#include <linux/swap.h>
36#include <linux/pci.h>
37#include <linux/dma-buf.h>
38#include <linux/mman.h>
39
40#include <dev/pci/agpvar.h>
41
42#include "display/intel_display.h"
43#include "display/intel_frontbuffer.h"
44
45#include "gem/i915_gem_clflush.h"
46#include "gem/i915_gem_context.h"
47#include "gem/i915_gem_ioctls.h"
48#include "gem/i915_gem_mman.h"
49#include "gem/i915_gem_region.h"
50#include "gt/intel_engine_user.h"
51#include "gt/intel_gt.h"
52#include "gt/intel_gt_pm.h"
53#include "gt/intel_workarounds.h"
54
55#include "i915_drv.h"
56#include "i915_trace.h"
57#include "i915_vgpu.h"
58
59#include "intel_pm.h"
60
61static int
62insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
63{
64 int err;
65
66 err = mutex_lock_interruptible(&ggtt->vm.mutex);
67 if (err)
68 return err;
69
70 memset(node, 0, sizeof(*node))__builtin_memset((node), (0), (sizeof(*node)));
71 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
72 size, 0, I915_COLOR_UNEVICTABLE(-1),
73 0, ggtt->mappable_end,
74 DRM_MM_INSERT_LOW);
75
76 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
77
78 return err;
79}
80
81static void
82remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
83{
84 mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex);
85 drm_mm_remove_node(node);
86 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
87}
88
89int
90i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file)
92{
93 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
94 struct drm_i915_gem_get_aperture *args = data;
95 struct i915_vma *vma;
96 u64 pinned;
97
98 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 return -EINTR4;
100
101 pinned = ggtt->vm.reserved;
102 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link
) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof
(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma)
, vm_link) );}); &vma->vm_link != (&ggtt->vm.bound_list
); vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link
) *__mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );}))
103 if (i915_vma_is_pinned(vma))
104 pinned += vma->node.size;
105
106 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
107
108 args->aper_size = ggtt->vm.total;
109 args->aper_available_size = args->aper_size - pinned;
110
111 return 0;
112}
113
114int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 unsigned long flags)
116{
117 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
118 DRM_LIST_HEAD(still_in_list)struct list_head still_in_list = { &(still_in_list), &
(still_in_list) }
;
119 intel_wakeref_t wakeref;
120 struct i915_vma *vma;
121 int ret;
122
123 if (list_empty(&obj->vma.list))
1
Assuming the condition is false
2
Taking false branch
124 return 0;
125
126 /*
127 * As some machines use ACPI to handle runtime-resume callbacks, and
128 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
129 * as they are required by the shrinker. Ergo, we wake the device up
130 * first just in case.
131 */
132 wakeref = intel_runtime_pm_get(rpm);
133
134try_again:
135 ret = 0;
136 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
137 while (!ret
2.1
'ret' is 0
2.1
'ret' is 0
&& (vma = list_first_entry_or_null(&obj->vma.list,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
3
Assuming the condition is false
4
'?' condition is false
5
Loop condition is true. Entering loop body
15
Assuming 'ret' is 0
16
Assuming the condition is false
17
'?' condition is false
18
Assuming pointer value is null
19
Loop condition is false. Execution continues on line 171
138 struct i915_vma,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
139 obj_link)(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
)
) {
140 struct i915_address_space *vm = vma->vm;
141
142 list_move_tail(&vma->obj_link, &still_in_list);
143 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11))))))
6
Assuming the condition is false
7
Taking false branch
144 continue;
145
146 if (flags & I915_GEM_OBJECT_UNBIND_TEST(1UL << (2))) {
8
Assuming the condition is false
9
Taking false branch
147 ret = -EBUSY16;
148 break;
149 }
150
151 ret = -EAGAIN35;
152 if (!i915_vm_tryopen(vm))
10
Assuming the condition is false
11
Taking false branch
153 break;
154
155 /* Prevent vma being freed by i915_vma_parked as we unbind */
156 vma = __i915_vma_get(vma);
157 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
158
159 if (vma
11.1
'vma' is non-null
11.1
'vma' is non-null
) {
12
Taking true branch
160 ret = -EBUSY16;
161 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0)) ||
13
Assuming the condition is true
162 !i915_vma_is_active(vma))
163 ret = i915_vma_unbind(vma);
164
165 __i915_vma_put(vma);
166 }
167
168 i915_vm_close(vm);
169 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
14
Value assigned to field 'next'
170 }
171 list_splice_init(&still_in_list, &obj->vma.list);
20
Calling 'list_splice_init'
172 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
173
174 if (ret == -EAGAIN35 && flags & I915_GEM_OBJECT_UNBIND_BARRIER(1UL << (1))) {
175 rcu_barrier()__asm volatile("" : : : "memory"); /* flush the i915_vm_release() */
176 goto try_again;
177 }
178
179 intel_runtime_pm_put(rpm, wakeref);
180
181 return ret;
182}
183
184static int
185i915_gem_create(struct drm_file *file,
186 struct intel_memory_region *mr,
187 u64 *size_p,
188 u32 *handle_p)
189{
190 struct drm_i915_gem_object *obj;
191 u32 handle;
192 u64 size;
193 int ret;
194
195 GEM_BUG_ON(!is_power_of_2(mr->min_page_size))((void)0);
196 size = round_up(*size_p, mr->min_page_size)((((*size_p) + ((mr->min_page_size) - 1)) / (mr->min_page_size
)) * (mr->min_page_size))
;
197 if (size == 0)
198 return -EINVAL22;
199
200 /* For most of the ABI (e.g. mmap) we think in system pages */
201 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE))((void)0);
202
203 /* Allocate the new object */
204 obj = i915_gem_object_create_region(mr, size, 0);
205 if (IS_ERR(obj))
206 return PTR_ERR(obj);
207
208 ret = drm_gem_handle_create(file, &obj->base, &handle);
209 /* drop reference from allocate - handle holds it now */
210 i915_gem_object_put(obj);
211 if (ret)
212 return ret;
213
214 *handle_p = handle;
215 *size_p = size;
216 return 0;
217}
218
219int
220i915_gem_dumb_create(struct drm_file *file,
221 struct drm_device *dev,
222 struct drm_mode_create_dumb *args)
223{
224 enum intel_memory_type mem_type;
225 int cpp = DIV_ROUND_UP(args->bpp, 8)(((args->bpp) + ((8) - 1)) / (8));
226 u32 format;
227
228 switch (cpp) {
229 case 1:
230 format = DRM_FORMAT_C8((__u32)('C') | ((__u32)('8') << 8) | ((__u32)(' ') <<
16) | ((__u32)(' ') << 24))
;
231 break;
232 case 2:
233 format = DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
;
234 break;
235 case 4:
236 format = DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
;
237 break;
238 default:
239 return -EINVAL22;
240 }
241
242 /* have to work out size/pitch and return them */
243 args->pitch = roundup2(args->width * cpp, 64)(((args->width * cpp) + ((64) - 1)) & (~((__typeof(args
->width * cpp))(64) - 1)))
;
244
245 /* align stride to page size so that we can remap */
246 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
247 DRM_FORMAT_MOD_LINEAR((((__u64)0) << 56) | ((0) & 0x00ffffffffffffffULL)
)
))
248 args->pitch = roundup2(args->pitch, 4096)(((args->pitch) + ((4096) - 1)) & (~((__typeof(args->
pitch))(4096) - 1)))
;
249
250 if (args->pitch < args->width)
251 return -EINVAL22;
252
253 args->size = mul_u32_u32(args->pitch, args->height);
254
255 mem_type = INTEL_MEMORY_SYSTEM;
256 if (HAS_LMEM(to_i915(dev))((&(to_i915(dev))->__info)->memory_regions & ((
1UL << (INTEL_REGION_LMEM))))
)
257 mem_type = INTEL_MEMORY_LOCAL;
258
259 return i915_gem_create(file,
260 intel_memory_region_by_type(to_i915(dev),
261 mem_type),
262 &args->size, &args->handle);
263}
264
265/**
266 * Creates a new mm object and returns a handle to it.
267 * @dev: drm device pointer
268 * @data: ioctl data blob
269 * @file: drm file pointer
270 */
271int
272i915_gem_create_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file)
274{
275 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
276 struct drm_i915_gem_create *args = data;
277
278 i915_gem_flush_free_objects(i915);
279
280 return i915_gem_create(file,
281 intel_memory_region_by_type(i915,
282 INTEL_MEMORY_SYSTEM),
283 &args->size, &args->handle);
284}
285
286static int
287shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data,
288 bool_Bool needs_clflush)
289{
290 char *vaddr;
291 int ret;
292
293 vaddr = kmap(page);
294
295 if (needs_clflush)
296 drm_clflush_virt_range(vaddr + offset, len);
297
298 ret = __copy_to_user(user_data, vaddr + offset, len);
299
300 kunmap_va(vaddr);
301
302 return ret ? -EFAULT14 : 0;
303}
304
305static int
306i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
307 struct drm_i915_gem_pread *args)
308{
309 unsigned int needs_clflush;
310 unsigned int idx, offset;
311 struct dma_fence *fence;
312 char __user *user_data;
313 u64 remain;
314 int ret;
315
316 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
317 if (ret)
318 return ret;
319
320 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
321 if (ret) {
322 i915_gem_object_unlock(obj);
323 return ret;
324 }
325
326 fence = i915_gem_object_lock_fence(obj);
327 i915_gem_object_finish_access(obj);
328 i915_gem_object_unlock(obj);
329
330 if (!fence)
331 return -ENOMEM12;
332
333 remain = args->size;
334 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
335 offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1));
336 for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) {
337 struct vm_page *page = i915_gem_object_get_page(obj, idx);
338 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset
); __min_a < __min_b ? __min_a : __min_b; })
;
339
340 ret = shmem_pread(page, offset, length, user_data,
341 needs_clflush);
342 if (ret)
343 break;
344
345 remain -= length;
346 user_data += length;
347 offset = 0;
348 }
349
350 i915_gem_object_unlock_fence(obj, fence);
351 return ret;
352}
353
354#ifdef __linux__
355static inline bool_Bool
356gtt_user_read(struct io_mapping *mapping,
357 loff_t base, int offset,
358 char __user *user_data, int length)
359{
360 void __iomem *vaddr;
361 unsigned long unwritten;
362
363 /* We can use the cpu mem copy function because this is X86. */
364 vaddr = io_mapping_map_atomic_wc(mapping, base);
365 unwritten = __copy_to_user_inatomic(user_data,
366 (void __force *)vaddr + offset,
367 length);
368 io_mapping_unmap_atomic(vaddr);
369 if (unwritten) {
370 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12));
371 unwritten = copy_to_user(user_data,
372 (void __force *)vaddr + offset,
373 length);
374 io_mapping_unmap(vaddr);
375 }
376 return unwritten;
377}
378#else
379static inline bool_Bool
380gtt_user_read(struct drm_i915_privateinteldrm_softc *dev_priv,
381 loff_t base, int offset,
382 char __user *user_data, int length)
383{
384 bus_space_handle_t bsh;
385 void __iomem *vaddr;
386 unsigned long unwritten;
387
388 /* We can use the cpu mem copy function because this is X86. */
389 agp_map_atomic(dev_priv->agph, base, &bsh);
390 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
391 unwritten = __copy_to_user_inatomic(user_data,
392 (void __force *)vaddr + offset,
393 length);
394 agp_unmap_atomic(dev_priv->agph, bsh);
395 if (unwritten) {
396 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh);
397 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
398 unwritten = copy_to_user(user_data,
399 (void __force *)vaddr + offset,
400 length);
401 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12));
402 }
403 return unwritten;
404}
405#endif
406
407static int
408i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
409 const struct drm_i915_gem_pread *args)
410{
411 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
412 struct i915_ggtt *ggtt = &i915->ggtt;
413 intel_wakeref_t wakeref;
414 struct drm_mm_node node;
415 struct dma_fence *fence;
416 void __user *user_data;
417 struct i915_vma *vma;
418 u64 remain, offset;
419 int ret;
420
421 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
422 vma = ERR_PTR(-ENODEV19);
423 if (!i915_gem_object_is_tiled(obj))
424 vma = i915_gem_object_ggtt_pin(obj, NULL((void *)0), 0, 0,
425 PIN_MAPPABLE(1ULL << (3)) |
426 PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ |
427 PIN_NOEVICT(1ULL << (0)));
428 if (!IS_ERR(vma)) {
429 node.start = i915_ggtt_offset(vma);
430 node.flags = 0;
431 } else {
432 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE(1 << 12));
433 if (ret)
434 goto out_rpm;
435 GEM_BUG_ON(!drm_mm_node_allocated(&node))((void)0);
436 }
437
438 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
439 if (ret)
440 goto out_unpin;
441
442 ret = i915_gem_object_set_to_gtt_domain(obj, false0);
443 if (ret) {
444 i915_gem_object_unlock(obj);
445 goto out_unpin;
446 }
447
448 fence = i915_gem_object_lock_fence(obj);
449 i915_gem_object_unlock(obj);
450 if (!fence) {
451 ret = -ENOMEM12;
452 goto out_unpin;
453 }
454
455 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
456 remain = args->size;
457 offset = args->offset;
458
459 while (remain > 0) {
460 /* Operation in this page
461 *
462 * page_base = page offset within aperture
463 * page_offset = offset within page
464 * page_length = bytes to copy for this page
465 */
466 u32 page_base = node.start;
467 unsigned page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1));
468 unsigned page_length = PAGE_SIZE(1 << 12) - page_offset;
469 page_length = remain < page_length ? remain : page_length;
470 if (drm_mm_node_allocated(&node)) {
471 ggtt->vm.insert_page(&ggtt->vm,
472 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12),
473 node.start, I915_CACHE_NONE, 0);
474 } else {
475 page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1));
476 }
477
478 if (gtt_user_read(i915, page_base, page_offset,
479 user_data, page_length)) {
480 ret = -EFAULT14;
481 break;
482 }
483
484 remain -= page_length;
485 user_data += page_length;
486 offset += page_length;
487 }
488
489 i915_gem_object_unlock_fence(obj, fence);
490out_unpin:
491 if (drm_mm_node_allocated(&node)) {
492 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
493 remove_mappable_node(ggtt, &node);
494 } else {
495 i915_vma_unpin(vma);
496 }
497out_rpm:
498 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
499 return ret;
500}
501
502/**
503 * Reads data from the object referenced by handle.
504 * @dev: drm device pointer
505 * @data: ioctl data blob
506 * @file: drm file pointer
507 *
508 * On error, the contents of *data are undefined.
509 */
510int
511i915_gem_pread_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *file)
513{
514 struct drm_i915_gem_pread *args = data;
515 struct drm_i915_gem_object *obj;
516 int ret;
517
518 if (args->size == 0)
519 return 0;
520
521 if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)),
522 args->size))
523 return -EFAULT14;
524
525 obj = i915_gem_object_lookup(file, args->handle);
526 if (!obj)
527 return -ENOENT2;
528
529 /* Bounds check source. */
530 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset
)); typeof((u64)(args->size)) size__ = ((u64)(args->size
)); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base
.size)); (void)(&start__ == &size__); (void)(&start__
== &max__); start__ >= max__ || size__ > max__ - start__
; })
) {
531 ret = -EINVAL22;
532 goto out;
533 }
534
535 trace_i915_gem_object_pread(obj, args->offset, args->size);
536
537 ret = -ENODEV19;
538 if (obj->ops->pread)
539 ret = obj->ops->pread(obj, args);
540 if (ret != -ENODEV19)
541 goto out;
542
543 ret = i915_gem_object_wait(obj,
544 I915_WAIT_INTERRUPTIBLE(1UL << (0)),
545 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
546 if (ret)
547 goto out;
548
549 ret = i915_gem_object_pin_pages(obj);
550 if (ret)
551 goto out;
552
553 ret = i915_gem_shmem_pread(obj, args);
554 if (ret == -EFAULT14 || ret == -ENODEV19)
555 ret = i915_gem_gtt_pread(obj, args);
556
557 i915_gem_object_unpin_pages(obj);
558out:
559 i915_gem_object_put(obj);
560 return ret;
561}
562
563/* This is the fast write path which cannot handle
564 * page faults in the source data
565 */
566#ifdef __linux__
567static inline bool_Bool
568ggtt_write(struct io_mapping *mapping,
569 loff_t base, int offset,
570 char __user *user_data, int length)
571{
572 void __iomem *vaddr;
573 unsigned long unwritten;
574
575 /* We can use the cpu mem copy function because this is X86. */
576 vaddr = io_mapping_map_atomic_wc(mapping, base);
577 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
578 user_data, length);
579 io_mapping_unmap_atomic(vaddr);
580 if (unwritten) {
581 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12));
582 unwritten = copy_from_user((void __force *)vaddr + offset,
583 user_data, length);
584 io_mapping_unmap(vaddr);
585 }
586
587 return unwritten;
588}
589#else
590static inline bool_Bool
591ggtt_write(struct drm_i915_privateinteldrm_softc *dev_priv,
592 loff_t base, int offset,
593 char __user *user_data, int length)
594{
595 bus_space_handle_t bsh;
596 void __iomem *vaddr;
597 unsigned long unwritten;
598
599 /* We can use the cpu mem copy function because this is X86. */
600 agp_map_atomic(dev_priv->agph, base, &bsh);
601 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
602 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
603 user_data, length);
604 agp_unmap_atomic(dev_priv->agph, bsh);
605 if (unwritten) {
606 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh);
607 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
608 unwritten = copy_from_user((void __force *)vaddr + offset,
609 user_data, length);
610 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12));
611 }
612
613 return unwritten;
614}
615#endif
616
617/**
618 * This is the fast pwrite path, where we copy the data directly from the
619 * user into the GTT, uncached.
620 * @obj: i915 GEM object
621 * @args: pwrite arguments structure
622 */
623static int
624i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
625 const struct drm_i915_gem_pwrite *args)
626{
627 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
628 struct i915_ggtt *ggtt = &i915->ggtt;
629 struct intel_runtime_pm *rpm = &i915->runtime_pm;
630 intel_wakeref_t wakeref;
631 struct drm_mm_node node;
632 struct dma_fence *fence;
633 struct i915_vma *vma;
634 u64 remain, offset;
635 void __user *user_data;
636 int ret;
637
638 if (i915_gem_object_has_struct_page(obj)) {
639 /*
640 * Avoid waking the device up if we can fallback, as
641 * waking/resuming is very slow (worst-case 10-100 ms
642 * depending on PCI sleeps and our own resume time).
643 * This easily dwarfs any performance advantage from
644 * using the cache bypass of indirect GGTT access.
645 */
646 wakeref = intel_runtime_pm_get_if_in_use(rpm);
647 if (!wakeref)
648 return -EFAULT14;
649 } else {
650 /* No backing pages, no fallback, we must force GGTT access */
651 wakeref = intel_runtime_pm_get(rpm);
652 }
653
654 vma = ERR_PTR(-ENODEV19);
655 if (!i915_gem_object_is_tiled(obj))
656 vma = i915_gem_object_ggtt_pin(obj, NULL((void *)0), 0, 0,
657 PIN_MAPPABLE(1ULL << (3)) |
658 PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ |
659 PIN_NOEVICT(1ULL << (0)));
660 if (!IS_ERR(vma)) {
661 node.start = i915_ggtt_offset(vma);
662 node.flags = 0;
663 } else {
664 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE(1 << 12));
665 if (ret)
666 goto out_rpm;
667 GEM_BUG_ON(!drm_mm_node_allocated(&node))((void)0);
668 }
669
670 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
671 if (ret)
672 goto out_unpin;
673
674 ret = i915_gem_object_set_to_gtt_domain(obj, true1);
675 if (ret) {
676 i915_gem_object_unlock(obj);
677 goto out_unpin;
678 }
679
680 fence = i915_gem_object_lock_fence(obj);
681 i915_gem_object_unlock(obj);
682 if (!fence) {
683 ret = -ENOMEM12;
684 goto out_unpin;
685 }
686
687 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
688
689 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
690 offset = args->offset;
691 remain = args->size;
692 while (remain) {
693 /* Operation in this page
694 *
695 * page_base = page offset within aperture
696 * page_offset = offset within page
697 * page_length = bytes to copy for this page
698 */
699 u32 page_base = node.start;
700 unsigned int page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1));
701 unsigned int page_length = PAGE_SIZE(1 << 12) - page_offset;
702 page_length = remain < page_length ? remain : page_length;
703 if (drm_mm_node_allocated(&node)) {
704 /* flush the write before we modify the GGTT */
705 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
706 ggtt->vm.insert_page(&ggtt->vm,
707 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12),
708 node.start, I915_CACHE_NONE, 0);
709 wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); /* flush modifications to the GGTT (insert_page) */
710 } else {
711 page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1));
712 }
713 /* If we get a fault while copying data, then (presumably) our
714 * source page isn't available. Return the error and we'll
715 * retry in the slow path.
716 * If the object is non-shmem backed, we retry again with the
717 * path that handles page fault.
718 */
719 if (ggtt_write(i915, page_base, page_offset,
720 user_data, page_length)) {
721 ret = -EFAULT14;
722 break;
723 }
724
725 remain -= page_length;
726 user_data += page_length;
727 offset += page_length;
728 }
729
730 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
731 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
732
733 i915_gem_object_unlock_fence(obj, fence);
734out_unpin:
735 if (drm_mm_node_allocated(&node)) {
736 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
737 remove_mappable_node(ggtt, &node);
738 } else {
739 i915_vma_unpin(vma);
740 }
741out_rpm:
742 intel_runtime_pm_put(rpm, wakeref);
743 return ret;
744}
745
746/* Per-page copy function for the shmem pwrite fastpath.
747 * Flushes invalid cachelines before writing to the target if
748 * needs_clflush_before is set and flushes out any written cachelines after
749 * writing if needs_clflush is set.
750 */
751static int
752shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data,
753 bool_Bool needs_clflush_before,
754 bool_Bool needs_clflush_after)
755{
756 char *vaddr;
757 int ret;
758
759 vaddr = kmap(page);
760
761 if (needs_clflush_before)
762 drm_clflush_virt_range(vaddr + offset, len);
763
764 ret = __copy_from_user(vaddr + offset, user_data, len);
765 if (!ret && needs_clflush_after)
766 drm_clflush_virt_range(vaddr + offset, len);
767
768 kunmap_va(vaddr);
769
770 return ret ? -EFAULT14 : 0;
771}
772
773static int
774i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
775 const struct drm_i915_gem_pwrite *args)
776{
777 unsigned int partial_cacheline_write;
778 unsigned int needs_clflush;
779 unsigned int offset, idx;
780 struct dma_fence *fence;
781 void __user *user_data;
782 u64 remain;
783 int ret;
784
785 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
786 if (ret)
787 return ret;
788
789 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
790 if (ret) {
791 i915_gem_object_unlock(obj);
792 return ret;
793 }
794
795 fence = i915_gem_object_lock_fence(obj);
796 i915_gem_object_finish_access(obj);
797 i915_gem_object_unlock(obj);
798
799 if (!fence)
800 return -ENOMEM12;
801
802 /* If we don't overwrite a cacheline completely we need to be
803 * careful to have up-to-date data by first clflushing. Don't
804 * overcomplicate things and flush the entire patch.
805 */
806 partial_cacheline_write = 0;
807 if (needs_clflush & CLFLUSH_BEFORE(1UL << (0)))
808 partial_cacheline_write = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
->ci_cflushsz - 1;
809
810 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
811 remain = args->size;
812 offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1));
813 for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) {
814 struct vm_page *page = i915_gem_object_get_page(obj, idx);
815 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset
); __min_a < __min_b ? __min_a : __min_b; })
;
816
817 ret = shmem_pwrite(page, offset, length, user_data,
818 (offset | length) & partial_cacheline_write,
819 needs_clflush & CLFLUSH_AFTER(1UL << (1)));
820 if (ret)
821 break;
822
823 remain -= length;
824 user_data += length;
825 offset = 0;
826 }
827
828 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
829 i915_gem_object_unlock_fence(obj, fence);
830
831 return ret;
832}
833
834/**
835 * Writes data to the object referenced by handle.
836 * @dev: drm device
837 * @data: ioctl data blob
838 * @file: drm file
839 *
840 * On error, the contents of the buffer that were to be modified are undefined.
841 */
842int
843i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file)
845{
846 struct drm_i915_gem_pwrite *args = data;
847 struct drm_i915_gem_object *obj;
848 int ret;
849
850 if (args->size == 0)
851 return 0;
852
853 if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), args->size))
854 return -EFAULT14;
855
856 obj = i915_gem_object_lookup(file, args->handle);
857 if (!obj)
858 return -ENOENT2;
859
860 /* Bounds check destination. */
861 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset
)); typeof((u64)(args->size)) size__ = ((u64)(args->size
)); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base
.size)); (void)(&start__ == &size__); (void)(&start__
== &max__); start__ >= max__ || size__ > max__ - start__
; })
) {
862 ret = -EINVAL22;
863 goto err;
864 }
865
866 /* Writes not allowed into this read-only object */
867 if (i915_gem_object_is_readonly(obj)) {
868 ret = -EINVAL22;
869 goto err;
870 }
871
872 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
873
874 ret = -ENODEV19;
875 if (obj->ops->pwrite)
876 ret = obj->ops->pwrite(obj, args);
877 if (ret != -ENODEV19)
878 goto err;
879
880 ret = i915_gem_object_wait(obj,
881 I915_WAIT_INTERRUPTIBLE(1UL << (0)) |
882 I915_WAIT_ALL(1UL << (2)),
883 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
884 if (ret)
885 goto err;
886
887 ret = i915_gem_object_pin_pages(obj);
888 if (ret)
889 goto err;
890
891 ret = -EFAULT14;
892 /* We can only do the GTT pwrite on untiled buffers, as otherwise
893 * it would end up going through the fenced access, and we'll get
894 * different detiling behavior between reading and writing.
895 * pread/pwrite currently are reading and writing from the CPU
896 * perspective, requiring manual detiling by the client.
897 */
898 if (!i915_gem_object_has_struct_page(obj) ||
899 cpu_write_needs_clflush(obj))
900 /* Note that the gtt paths might fail with non-page-backed user
901 * pointers (e.g. gtt mappings when moving data between
902 * textures). Fallback to the shmem path in that case.
903 */
904 ret = i915_gem_gtt_pwrite_fast(obj, args);
905
906 if (ret == -EFAULT14 || ret == -ENOSPC28) {
907 if (i915_gem_object_has_struct_page(obj))
908 ret = i915_gem_shmem_pwrite(obj, args);
909 }
910
911 i915_gem_object_unpin_pages(obj);
912err:
913 i915_gem_object_put(obj);
914 return ret;
915}
916
917/**
918 * Called when user space has done writes to this buffer
919 * @dev: drm device
920 * @data: ioctl data blob
921 * @file: drm file
922 */
923int
924i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
925 struct drm_file *file)
926{
927 struct drm_i915_gem_sw_finish *args = data;
928 struct drm_i915_gem_object *obj;
929
930 obj = i915_gem_object_lookup(file, args->handle);
931 if (!obj)
932 return -ENOENT2;
933
934 /*
935 * Proxy objects are barred from CPU access, so there is no
936 * need to ban sw_finish as it is a nop.
937 */
938
939 /* Pinned buffers may be scanout, so flush the cache */
940 i915_gem_object_flush_if_display(obj);
941 i915_gem_object_put(obj);
942
943 return 0;
944}
945
946void i915_gem_runtime_suspend(struct drm_i915_privateinteldrm_softc *i915)
947{
948 struct drm_i915_gem_object *obj, *on;
949 int i;
950
951 /*
952 * Only called during RPM suspend. All users of the userfault_list
953 * must be holding an RPM wakeref to ensure that this can not
954 * run concurrently with themselves (and use the struct_mutex for
955 * protection between themselves).
956 */
957
958 list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link
) *__mptr = ((&i915->ggtt.userfault_list)->next); (
__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*obj), userfault_link) );}), on = ({ const __typeof( ((__typeof
(*obj) *)0)->userfault_link ) *__mptr = (obj->userfault_link
.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*obj), userfault_link) );}); &obj->userfault_link
!= (&i915->ggtt.userfault_list); obj = on, on = ({ const
__typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr =
(on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*on), userfault_link) );}))
959 &i915->ggtt.userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link
) *__mptr = ((&i915->ggtt.userfault_list)->next); (
__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*obj), userfault_link) );}), on = ({ const __typeof( ((__typeof
(*obj) *)0)->userfault_link ) *__mptr = (obj->userfault_link
.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*obj), userfault_link) );}); &obj->userfault_link
!= (&i915->ggtt.userfault_list); obj = on, on = ({ const
__typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr =
(on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*on), userfault_link) );}))
960 __i915_gem_object_release_mmap_gtt(obj);
961
962 /*
963 * The fence will be lost when the device powers down. If any were
964 * in use by hardware (i.e. they are pinned), we should not be powering
965 * down! All other fences will be reacquired by the user upon waking.
966 */
967 for (i = 0; i < i915->ggtt.num_fences; i++) {
968 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
969
970 /*
971 * Ideally we want to assert that the fence register is not
972 * live at this point (i.e. that no piece of code will be
973 * trying to write through fence + GTT, as that both violates
974 * our tracking of activity and associated locking/barriers,
975 * but also is illegal given that the hw is powered down).
976 *
977 * Previously we used reg->pin_count as a "liveness" indicator.
978 * That is not sufficient, and we need a more fine-grained
979 * tool if we want to have a sanity check here.
980 */
981
982 if (!reg->vma)
983 continue;
984
985 GEM_BUG_ON(i915_vma_has_userfault(reg->vma))((void)0);
986 reg->dirty = true1;
987 }
988}
989
990static void discard_ggtt_vma(struct i915_vma *vma)
991{
992 struct drm_i915_gem_object *obj = vma->obj;
993
994 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
995 if (!RB_EMPTY_NODE(&vma->obj_node)((&vma->obj_node)->__entry.rbe_parent == &vma->
obj_node)
) {
996 rb_erase(&vma->obj_node, &obj->vma.tree)linux_root_RB_REMOVE((struct linux_root *)(&obj->vma.tree
), (&vma->obj_node))
;
997 RB_CLEAR_NODE(&vma->obj_node)(((&vma->obj_node))->__entry.rbe_parent = (&vma
->obj_node))
;
998 }
999 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
1000}
1001
1002struct i915_vma *
1003i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1004 struct i915_gem_ww_ctx *ww,
1005 const struct i915_ggtt_view *view,
1006 u64 size, u64 alignment, u64 flags)
1007{
1008 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
1009 struct i915_ggtt *ggtt = &i915->ggtt;
1010 struct i915_vma *vma;
1011 int ret;
1012
1013 if (flags & PIN_MAPPABLE(1ULL << (3)) &&
1014 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1015 /*
1016 * If the required space is larger than the available
1017 * aperture, we will not able to find a slot for the
1018 * object and unbinding the object now will be in
1019 * vain. Worse, doing so may cause us to ping-pong
1020 * the object in and out of the Global GTT and
1021 * waste a lot of cycles under the mutex.
1022 */
1023 if (obj->base.size > ggtt->mappable_end)
1024 return ERR_PTR(-E2BIG7);
1025
1026 /*
1027 * If NONBLOCK is set the caller is optimistically
1028 * trying to cache the full object within the mappable
1029 * aperture, and *must* have a fallback in place for
1030 * situations where we cannot bind the object. We
1031 * can be a little more lax here and use the fallback
1032 * more often to avoid costly migrations of ourselves
1033 * and other objects within the aperture.
1034 *
1035 * Half-the-aperture is used as a simple heuristic.
1036 * More interesting would to do search for a free
1037 * block prior to making the commitment to unbind.
1038 * That caters for the self-harm case, and with a
1039 * little more heuristics (e.g. NOFAULT, NOEVICT)
1040 * we could try to minimise harm to others.
1041 */
1042 if (flags & PIN_NONBLOCK(1ULL << (2)) &&
1043 obj->base.size > ggtt->mappable_end / 2)
1044 return ERR_PTR(-ENOSPC28);
1045 }
1046
1047new_vma:
1048 vma = i915_vma_instance(obj, &ggtt->vm, view);
1049 if (IS_ERR(vma))
1050 return vma;
1051
1052 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1053 if (flags & PIN_NONBLOCK(1ULL << (2))) {
1054 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1055 return ERR_PTR(-ENOSPC28);
1056
1057 if (flags & PIN_MAPPABLE(1ULL << (3)) &&
1058 vma->fence_size > ggtt->mappable_end / 2)
1059 return ERR_PTR(-ENOSPC28);
1060 }
1061
1062 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1063 discard_ggtt_vma(vma);
1064 goto new_vma;
1065 }
1066
1067 ret = i915_vma_unbind(vma);
1068 if (ret)
1069 return ERR_PTR(ret);
1070 }
1071
1072 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL(1ULL << (10)));
1073 if (ret)
1074 return ERR_PTR(ret);
1075
1076 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1077 mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex);
1078 i915_vma_revoke_fence(vma);
1079 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
1080 }
1081
1082 ret = i915_vma_wait_for_bind(vma);
1083 if (ret) {
1084 i915_vma_unpin(vma);
1085 return ERR_PTR(ret);
1086 }
1087
1088 return vma;
1089}
1090
1091int
1092i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1093 struct drm_file *file_priv)
1094{
1095 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
1096 struct drm_i915_gem_madvise *args = data;
1097 struct drm_i915_gem_object *obj;
1098 int err;
1099
1100 switch (args->madv) {
1101 case I915_MADV_DONTNEED1:
1102 case I915_MADV_WILLNEED0:
1103 break;
1104 default:
1105 return -EINVAL22;
1106 }
1107
1108 obj = i915_gem_object_lookup(file_priv, args->handle);
1109 if (!obj)
1110 return -ENOENT2;
1111
1112 err = mutex_lock_interruptible(&obj->mm.lock);
1113 if (err)
1114 goto out;
1115
1116 if (i915_gem_object_has_pages(obj) &&
1117 i915_gem_object_is_tiled(obj) &&
1118 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES(1<<5)) {
1119 if (obj->mm.madv == I915_MADV_WILLNEED0) {
1120 GEM_BUG_ON(!obj->mm.quirked)((void)0);
1121 __i915_gem_object_unpin_pages(obj);
1122 obj->mm.quirked = false0;
1123 }
1124 if (args->madv == I915_MADV_WILLNEED0) {
1125 GEM_BUG_ON(obj->mm.quirked)((void)0);
1126 __i915_gem_object_pin_pages(obj);
1127 obj->mm.quirked = true1;
1128 }
1129 }
1130
1131 if (obj->mm.madv != __I915_MADV_PURGED2)
1132 obj->mm.madv = args->madv;
1133
1134 if (i915_gem_object_has_pages(obj)) {
1135 struct list_head *list;
1136
1137 if (i915_gem_object_is_shrinkable(obj)) {
1138 unsigned long flags;
1139
1140 spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while
(0)
;
1141
1142 if (obj->mm.madv != I915_MADV_WILLNEED0)
1143 list = &i915->mm.purge_list;
1144 else
1145 list = &i915->mm.shrink_list;
1146 list_move_tail(&obj->mm.link, list);
1147
1148 spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while
(0)
;
1149 }
1150 }
1151
1152 /* if the object is no longer attached, discard its backing storage */
1153 if (obj->mm.madv == I915_MADV_DONTNEED1 &&
1154 !i915_gem_object_has_pages(obj))
1155 i915_gem_object_truncate(obj);
1156
1157 args->retained = obj->mm.madv != __I915_MADV_PURGED2;
1158 mutex_unlock(&obj->mm.lock)rw_exit_write(&obj->mm.lock);
1159
1160out:
1161 i915_gem_object_put(obj);
1162 return err;
1163}
1164
1165int i915_gem_init(struct drm_i915_privateinteldrm_softc *dev_priv)
1166{
1167 int ret;
1168
1169 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1170 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1171 mkwrite_device_info(dev_priv)->page_sizes =
1172 I915_GTT_PAGE_SIZE_4K(1ULL << (12));
1173
1174 ret = i915_gem_init_userptr(dev_priv);
1175 if (ret)
1176 return ret;
1177
1178 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1179 intel_wopcm_init(&dev_priv->wopcm);
1180
1181 ret = i915_init_ggtt(dev_priv);
1182 if (ret) {
1183 GEM_BUG_ON(ret == -EIO)((void)0);
1184 goto err_unlock;
1185 }
1186
1187 /*
1188 * Despite its name intel_init_clock_gating applies both display
1189 * clock gating workarounds; GT mmio workarounds and the occasional
1190 * GT power context workaround. Worse, sometimes it includes a context
1191 * register workaround which we need to apply before we record the
1192 * default HW state for all contexts.
1193 *
1194 * FIXME: break up the workarounds and apply them at the right time!
1195 */
1196 intel_init_clock_gating(dev_priv);
1197
1198 ret = intel_gt_init(&dev_priv->gt);
1199 if (ret)
1200 goto err_unlock;
1201
1202 return 0;
1203
1204 /*
1205 * Unwinding is complicated by that we want to handle -EIO to mean
1206 * disable GPU submission but keep KMS alive. We want to mark the
1207 * HW as irrevisibly wedged, but keep enough state around that the
1208 * driver doesn't explode during runtime.
1209 */
1210err_unlock:
1211 i915_gem_drain_workqueue(dev_priv);
1212
1213 if (ret != -EIO5) {
1214 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1215 i915_gem_cleanup_userptr(dev_priv);
1216 }
1217
1218 if (ret == -EIO5) {
1219 /*
1220 * Allow engines or uC initialisation to fail by marking the GPU
1221 * as wedged. But we only want to do this when the GPU is angry,
1222 * for all other failure, such as an allocation failure, bail.
1223 */
1224 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1225 i915_probe_error(dev_priv,__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n"
)
1226 "Failed to initialize GPU, declaring it wedged!\n")__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n"
)
;
1227 intel_gt_set_wedged(&dev_priv->gt);
1228 }
1229
1230 /* Minimal basic recovery for KMS */
1231 ret = i915_ggtt_enable_hw(dev_priv);
1232 i915_ggtt_resume(&dev_priv->ggtt);
1233 intel_init_clock_gating(dev_priv);
1234 }
1235
1236 i915_gem_drain_freed_objects(dev_priv);
1237 return ret;
1238}
1239
1240void i915_gem_driver_register(struct drm_i915_privateinteldrm_softc *i915)
1241{
1242 i915_gem_driver_register__shrinker(i915);
1243
1244 intel_engines_driver_register(i915);
1245}
1246
1247void i915_gem_driver_unregister(struct drm_i915_privateinteldrm_softc *i915)
1248{
1249 i915_gem_driver_unregister__shrinker(i915);
1250}
1251
1252void i915_gem_driver_remove(struct drm_i915_privateinteldrm_softc *dev_priv)
1253{
1254 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1255
1256 i915_gem_suspend_late(dev_priv);
1257 intel_gt_driver_remove(&dev_priv->gt);
1258 dev_priv->uabi_engines = RB_ROOT(struct rb_root) { ((void *)0) };
1259
1260 /* Flush any outstanding unpin_work. */
1261 i915_gem_drain_workqueue(dev_priv);
1262
1263 i915_gem_drain_freed_objects(dev_priv);
1264}
1265
1266void i915_gem_driver_release(struct drm_i915_privateinteldrm_softc *dev_priv)
1267{
1268 i915_gem_driver_release__contexts(dev_priv);
1269
1270 intel_gt_driver_release(&dev_priv->gt);
1271
1272 intel_wa_list_free(&dev_priv->gt_wa_list);
1273
1274 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1275 i915_gem_cleanup_userptr(dev_priv);
1276
1277 i915_gem_drain_freed_objects(dev_priv);
1278
1279 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list))({ int __ret = !!((!list_empty(&dev_priv->gem.contexts
.list))); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!list_empty(&dev_priv->gem.contexts.list)"
")"); __builtin_expect(!!(__ret), 0); })
;
1280}
1281
1282static void i915_gem_init__mm(struct drm_i915_privateinteldrm_softc *i915)
1283{
1284 mtx_init(&i915->mm.obj_lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&i915->
mm.obj_lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9)
? 0x9 : ((0x0)))); } while (0)
;
1285
1286 init_llist_head(&i915->mm.free_list);
1287
1288 INIT_LIST_HEAD(&i915->mm.purge_list);
1289 INIT_LIST_HEAD(&i915->mm.shrink_list);
1290
1291 i915_gem_init__objects(i915);
1292}
1293
1294void i915_gem_init_early(struct drm_i915_privateinteldrm_softc *dev_priv)
1295{
1296 i915_gem_init__mm(dev_priv);
1297 i915_gem_init__contexts(dev_priv);
1298
1299 mtx_init(&dev_priv->fb_tracking.lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev_priv
->fb_tracking.lock), ((((0x0)) > 0x0 && ((0x0))
< 0x9) ? 0x9 : ((0x0)))); } while (0)
;
1300}
1301
1302void i915_gem_cleanup_early(struct drm_i915_privateinteldrm_softc *dev_priv)
1303{
1304 i915_gem_drain_freed_objects(dev_priv);
1305 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list))((void)0);
1306 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count))((void)0);
1307 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count)({ int __ret = !!((dev_priv->mm.shrink_count)); if (__ret)
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "dev_priv->mm.shrink_count"
")"); __builtin_expect(!!(__ret), 0); })
;
1308}
1309
1310int i915_gem_freeze(struct drm_i915_privateinteldrm_softc *dev_priv)
1311{
1312 /* Discard all purgeable objects, let userspace recover those as
1313 * required after resuming.
1314 */
1315 i915_gem_shrink_all(dev_priv);
1316
1317 return 0;
1318}
1319
1320int i915_gem_freeze_late(struct drm_i915_privateinteldrm_softc *i915)
1321{
1322 struct drm_i915_gem_object *obj;
1323 intel_wakeref_t wakeref;
1324
1325 /*
1326 * Called just before we write the hibernation image.
1327 *
1328 * We need to update the domain tracking to reflect that the CPU
1329 * will be accessing all the pages to create and restore from the
1330 * hibernation, and so upon restoration those pages will be in the
1331 * CPU domain.
1332 *
1333 * To make sure the hibernation image contains the latest state,
1334 * we update that state just before writing out the image.
1335 *
1336 * To try and reduce the hibernation image, we manually shrink
1337 * the objects as well, see i915_gem_freeze()
1338 */
1339
1340 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1341
1342 i915_gem_shrink(i915, -1UL, NULL((void *)0), ~0);
1343 i915_gem_drain_freed_objects(i915);
1344
1345 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->mm.link
) *__mptr = ((&i915->mm.shrink_list)->next); (__typeof
(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj)
, mm.link) );}); &obj->mm.link != (&i915->mm.shrink_list
); obj = ({ const __typeof( ((__typeof(*obj) *)0)->mm.link
) *__mptr = (obj->mm.link.next); (__typeof(*obj) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*obj), mm.link) );}))
{
1346 i915_gem_object_lock(obj, NULL((void *)0));
1347 drm_WARN_ON(&i915->drm,({ int __ret = !!((i915_gem_object_set_to_cpu_domain(obj, 1))
); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_cpu_domain(obj, 1)"
")"); __builtin_expect(!!(__ret), 0); })
1348 i915_gem_object_set_to_cpu_domain(obj, true))({ int __ret = !!((i915_gem_object_set_to_cpu_domain(obj, 1))
); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_cpu_domain(obj, 1)"
")"); __builtin_expect(!!(__ret), 0); })
;
1349 i915_gem_object_unlock(obj);
1350 }
1351
1352 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1353
1354 return 0;
1355}
1356
1357int i915_gem_open(struct drm_i915_privateinteldrm_softc *i915, struct drm_file *file)
1358{
1359 struct drm_i915_file_private *file_priv;
1360 int ret;
1361
1362 DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n");
1363
1364 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL(0x0001 | 0x0004));
1365 if (!file_priv)
1366 return -ENOMEM12;
1367
1368 file->driver_priv = file_priv;
1369 file_priv->dev_priv = i915;
1370 file_priv->file = file;
1371
1372 file_priv->bsd_engine = -1;
1373 file_priv->hang_timestamp = jiffies;
1374
1375 ret = i915_gem_context_open(i915, file);
1376 if (ret)
1377 kfree(file_priv);
1378
1379 return ret;
1380}
1381
1382void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool_Bool intr)
1383{
1384 ww_acquire_init(&ww->ctx, &reservation_ww_class);
1385 INIT_LIST_HEAD(&ww->obj_list);
1386 ww->intr = intr;
1387 ww->contended = NULL((void *)0);
1388}
1389
1390static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1391{
1392 struct drm_i915_gem_object *obj;
1393
1394 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link)(list_empty(&ww->obj_list) ? ((void *)0) : ({ const __typeof
( ((struct drm_i915_gem_object *)0)->obj_link ) *__mptr = (
(&ww->obj_list)->next); (struct drm_i915_gem_object
*)( (char *)__mptr - __builtin_offsetof(struct drm_i915_gem_object
, obj_link) );}))
)) {
1395 list_del(&obj->obj_link);
1396 i915_gem_object_unlock(obj);
1397 }
1398}
1399
1400void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1401{
1402 list_del(&obj->obj_link);
1403 i915_gem_object_unlock(obj);
1404}
1405
1406void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1407{
1408 i915_gem_ww_ctx_unlock_all(ww);
1409 WARN_ON(ww->contended)({ int __ret = !!((ww->contended)); if (__ret) printf("%s"
, "WARN_ON(" "ww->contended" ")"); __builtin_expect(!!(__ret
), 0); })
;
1410 ww_acquire_fini(&ww->ctx);
1411}
1412
1413int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1414{
1415 int ret = 0;
1416
1417 if (WARN_ON(!ww->contended)({ int __ret = !!((!ww->contended)); if (__ret) printf("%s"
, "WARN_ON(" "!ww->contended" ")"); __builtin_expect(!!(__ret
), 0); })
)
1418 return -EINVAL22;
1419
1420 i915_gem_ww_ctx_unlock_all(ww);
1421 if (ww->intr)
1422 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1423 else
1424 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1425
1426 if (!ret)
1427 list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1428
1429 ww->contended = NULL((void *)0);
1430
1431 return ret;
1432}
1433
1434#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
1435#include "selftests/mock_gem_device.c"
1436#include "selftests/i915_gem.c"
1437#endif

/usr/src/sys/dev/pci/drm/include/linux/list.h

1/* $OpenBSD: list.h,v 1.4 2021/10/01 04:36:38 jsg Exp $ */
2/* drm_linux_list.h -- linux list functions for the BSDs.
3 * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
4 */
5/*-
6 * Copyright 2003 Eric Anholt
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Eric Anholt <anholt@FreeBSD.org>
30 *
31 */
32
33#ifndef _DRM_LINUX_LIST_H_
34#define _DRM_LINUX_LIST_H_
35
36#include <sys/param.h>
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/poison.h>
40
41#define list_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (
type *)( (char *)__mptr - __builtin_offsetof(type, member) );
})
container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (
type *)( (char *)__mptr - __builtin_offsetof(type, member) );
})
42
43static inline void
44INIT_LIST_HEAD(struct list_head *head) {
45 (head)->next = head;
46 (head)->prev = head;
47}
48
49#define LIST_HEAD_INIT(name){ &(name), &(name) } { &(name), &(name) }
50
51#define DRM_LIST_HEAD(name)struct list_head name = { &(name), &(name) } \
52 struct list_head name = LIST_HEAD_INIT(name){ &(name), &(name) }
53
54static inline int
55list_empty(const struct list_head *head) {
56 return (head)->next == head;
22
Returning zero, which participates in a condition later
57}
58
59static inline int
60list_is_singular(const struct list_head *head) {
61 return !list_empty(head) && ((head)->next == (head)->prev);
62}
63
64static inline int
65list_is_first(const struct list_head *list,
66 const struct list_head *head)
67{
68 return list->prev == head;
69}
70
71static inline int
72list_is_last(const struct list_head *list,
73 const struct list_head *head)
74{
75 return list->next == head;
76}
77
78static inline void
79list_add(struct list_head *new, struct list_head *head) {
80 (head)->next->prev = new;
81 (new)->next = (head)->next;
82 (new)->prev = head;
83 (head)->next = new;
84}
85
86static inline void
87list_add_tail(struct list_head *entry, struct list_head *head) {
88 (entry)->prev = (head)->prev;
89 (entry)->next = head;
90 (head)->prev->next = entry;
91 (head)->prev = entry;
92}
93
94static inline void
95list_del(struct list_head *entry) {
96 (entry)->next->prev = (entry)->prev;
97 (entry)->prev->next = (entry)->next;
98}
99
100#define __list_del_entry(x)list_del(x) list_del(x)
101
102static inline void list_replace(struct list_head *old,
103 struct list_head *new)
104{
105 new->next = old->next;
106 new->next->prev = new;
107 new->prev = old->prev;
108 new->prev->next = new;
109}
110
111static inline void list_replace_init(struct list_head *old,
112 struct list_head *new)
113{
114 list_replace(old, new);
115 INIT_LIST_HEAD(old);
116}
117
118static inline void list_move(struct list_head *list, struct list_head *head)
119{
120 list_del(list);
121 list_add(list, head);
122}
123
124static inline void list_move_tail(struct list_head *list,
125 struct list_head *head)
126{
127 list_del(list);
128 list_add_tail(list, head);
129}
130
131static inline void
132list_rotate_to_front(struct list_head *list, struct list_head *head)
133{
134 list_del(head);
135 list_add_tail(head, list);
136}
137
138static inline void
139list_bulk_move_tail(struct list_head *head, struct list_head *first,
140 struct list_head *last)
141{
142 first->prev->next = last->next;
143 last->next->prev = first->prev;
144 head->prev->next = first;
145 first->prev = head->prev;
146 last->next = head;
147 head->prev = last;
148}
149
150static inline void
151list_del_init(struct list_head *entry) {
152 (entry)->next->prev = (entry)->prev;
153 (entry)->prev->next = (entry)->next;
154 INIT_LIST_HEAD(entry);
155}
156
157#define list_next_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
\
158 list_entry(((pos)->member.next), typeof(*(pos)), member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
159
160#define list_prev_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.prev)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
\
161 list_entry(((pos)->member.prev), typeof(*(pos)), member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.prev)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
162
163#define list_safe_reset_next(pos, n, member)n = ({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
\
164 n = list_next_entry(pos, member)({ const __typeof( ((typeof(*(pos)) *)0)->member ) *__mptr
= (((pos)->member.next)); (typeof(*(pos)) *)( (char *)__mptr
- __builtin_offsetof(typeof(*(pos)), member) );})
165
166#define list_for_each(entry, head)for (entry = (head)->next; entry != head; entry = (entry)->
next)
\
167 for (entry = (head)->next; entry != head; entry = (entry)->next)
168
169#define list_for_each_prev(entry, head)for (entry = (head)->prev; entry != (head); entry = entry->
prev)
\
170 for (entry = (head)->prev; entry != (head); \
171 entry = entry->prev)
172
173#define list_for_each_safe(entry, temp, head)for (entry = (head)->next, temp = (entry)->next; entry !=
head; entry = temp, temp = entry->next)
\
174 for (entry = (head)->next, temp = (entry)->next; \
175 entry != head; \
176 entry = temp, temp = entry->next)
177
178#define list_for_each_entry_safe_reverse(pos, n, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );}), n = ({ const
__typeof( ((__typeof(*pos) *)0)->member ) *__mptr = ((pos
)->member.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );}); &(pos)->member != (head
); pos = n, n = ({ const __typeof( ((__typeof(*n) *)0)->member
) *__mptr = (n->member.prev); (__typeof(*n) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*n), member) );}))
\
179 for (pos = list_entry((head)->prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );})
, \
180 n = list_entry((pos)->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((pos)->member.prev); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );})
; \
181 &(pos)->member != (head); \
182 pos = n, n = list_entry(n->member.prev, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr =
(n->member.prev); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*n), member) );})
)
183
184#define list_for_each_entry_safe_from(pos, n, head, member)for (n = ({ const __typeof( ((__typeof(*pos) *)0)->member )
*__mptr = (pos->member.next); (__typeof(*pos) *)( (char *
)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); &
pos->member != (head); pos = n, n = ({ const __typeof( ((__typeof
(*n) *)0)->member ) *__mptr = (n->member.next); (__typeof
(*n) *)( (char *)__mptr - __builtin_offsetof(__typeof(*n), member
) );}))
\
185 for (n = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.next); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
; \
186 &pos->member != (head); \
187 pos = n, n = list_entry(n->member.next, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr =
(n->member.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*n), member) );})
)
188
189#define list_for_each_entry(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );}); &pos->
member != (head); pos = ({ const __typeof( ((__typeof(*pos) *
)0)->member ) *__mptr = (pos->member.next); (__typeof(*
pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}))
\
190 for (pos = list_entry((head)->next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );})
; \
191 &pos->member != (head); \
192 pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.next); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
193
194#define list_for_each_entry_from(pos, head, member)for (; &pos->member != (head); pos = ({ const __typeof
( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member
.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );}))
\
195 for (; \
196 &pos->member != (head); \
197 pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.next); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
198
199#define list_for_each_entry_reverse(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = ((head)->prev); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );}); &pos->
member != (head); pos = ({ const __typeof( ((__typeof(*pos) *
)0)->member ) *__mptr = (pos->member.prev); (__typeof(*
pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}))
\
200 for (pos = list_entry((head)->prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((head)->prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );})
; \
201 &pos->member != (head); \
202 pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
203
204#define list_for_each_entry_from_reverse(pos, head, member)for (; &pos->member != (head); pos = ({ const __typeof
( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->member
.prev); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );}))
\
205 for (; \
206 &pos->member != (head); \
207 pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
208
209#define list_for_each_entry_continue(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = ((pos)->member.next); (__typeof(*pos) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); &
pos->member != (head); pos = ({ const __typeof( ((__typeof
(*pos) *)0)->member ) *__mptr = (pos->member.next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, member) );}))
\
210 for (pos = list_entry((pos)->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((pos)->member.next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );})
; \
211 &pos->member != (head); \
212 pos = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.next); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
213
214#define list_for_each_entry_continue_reverse(pos, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = (pos->member.prev); (__typeof(*pos) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*pos), member) );}); &
pos->member != (head); pos = ({ const __typeof( ((__typeof
(*pos) *)0)->member ) *__mptr = (pos->member.prev); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, member) );}))
\
215 for (pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
; \
216 &pos->member != (head); \
217 pos = list_entry(pos->member.prev, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.prev); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
)
218
219/**
220 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
221 * @pos: the type * to use as a loop cursor.
222 * @n: another type * to use as temporary storage
223 * @head: the head for your list.
224 * @member: the name of the list_struct within the struct.
225 */
226#define list_for_each_entry_safe(pos, n, head, member)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->member
) *__mptr = ((head)->next); (__typeof(*pos) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*pos), member) );}), n = ({ const
__typeof( ((__typeof(*pos) *)0)->member ) *__mptr = (pos->
member.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );}); &pos->member != (head);
pos = n, n = ({ const __typeof( ((__typeof(*n) *)0)->member
) *__mptr = (n->member.next); (__typeof(*n) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*n), member) );}))
\
227 for (pos = list_entry((head)->next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= ((head)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );})
, \
228 n = list_entry(pos->member.next, __typeof(*pos), member)({ const __typeof( ((__typeof(*pos) *)0)->member ) *__mptr
= (pos->member.next); (__typeof(*pos) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*pos), member) );})
; \
229 &pos->member != (head); \
230 pos = n, n = list_entry(n->member.next, __typeof(*n), member)({ const __typeof( ((__typeof(*n) *)0)->member ) *__mptr =
(n->member.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*n), member) );})
)
231
232#define list_first_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)->
next); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );})
\
233 list_entry((ptr)->next, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)->
next); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );})
234
235#define list_first_entry_or_null(ptr, type, member)(list_empty(ptr) ? ((void *)0) : ({ const __typeof( ((type *)
0)->member ) *__mptr = ((ptr)->next); (type *)( (char *
)__mptr - __builtin_offsetof(type, member) );}))
\
236 (list_empty(ptr) ? NULL((void *)0) : list_first_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)->
next); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );})
)
237
238#define list_last_entry(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)->
prev); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );})
\
239 list_entry((ptr)->prev, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = ((ptr)->
prev); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );})
240
241static inline void
242__list_splice(const struct list_head *list, struct list_head *prev,
243 struct list_head *next)
244{
245 struct list_head *first = list->next;
246 struct list_head *last = list->prev;
247
248 first->prev = prev;
249 prev->next = first;
250
251 last->next = next;
252 next->prev = last;
27
Access to field 'prev' results in a dereference of a null pointer (loaded from variable 'next')
253}
254
255static inline void
256list_splice(const struct list_head *list, struct list_head *head)
257{
258 if (list_empty(list))
259 return;
260
261 __list_splice(list, head, head->next);
262}
263
264static inline void
265list_splice_init(struct list_head *list, struct list_head *head)
266{
267 if (list_empty(list))
21
Calling 'list_empty'
23
Returning from 'list_empty'
24
Taking false branch
268 return;
269
270 __list_splice(list, head, head->next);
25
Passing null pointer value via 3rd parameter 'next'
26
Calling '__list_splice'
271 INIT_LIST_HEAD(list);
272}
273
274static inline void
275list_splice_tail(const struct list_head *list, struct list_head *head)
276{
277 if (list_empty(list))
278 return;
279
280 __list_splice(list, head->prev, head);
281}
282
283static inline void
284list_splice_tail_init(struct list_head *list, struct list_head *head)
285{
286 if (list_empty(list))
287 return;
288
289 __list_splice(list, head->prev, head);
290 INIT_LIST_HEAD(list);
291}
292
293void list_sort(void *, struct list_head *,
294 int (*)(void *, const struct list_head *, const struct list_head *));
295
296#define hlist_entry(ptr, type, member)((ptr) ? ({ const __typeof( ((type *)0)->member ) *__mptr =
(ptr); (type *)( (char *)__mptr - __builtin_offsetof(type, member
) );}) : ((void *)0))
\
297 ((ptr) ? container_of(ptr, type, member)({ const __typeof( ((type *)0)->member ) *__mptr = (ptr); (
type *)( (char *)__mptr - __builtin_offsetof(type, member) );
})
: NULL((void *)0))
298
299static inline void
300INIT_HLIST_HEAD(struct hlist_head *head) {
301 head->first = NULL((void *)0);
302}
303
304static inline int
305hlist_empty(const struct hlist_head *head) {
306 return head->first == NULL((void *)0);
307}
308
309static inline void
310hlist_add_head(struct hlist_node *new, struct hlist_head *head)
311{
312 if ((new->next = head->first) != NULL((void *)0))
313 head->first->prev = &new->next;
314 head->first = new;
315 new->prev = &head->first;
316}
317
318static inline void
319hlist_del_init(struct hlist_node *node)
320{
321 if (node->next != NULL((void *)0))
322 node->next->prev = node->prev;
323 *(node->prev) = node->next;
324 node->next = NULL((void *)0);
325 node->prev = NULL((void *)0);
326}
327
328#define hlist_for_each(pos, head)for (pos = (head)->first; pos != ((void *)0); pos = pos->
next)
\
329 for (pos = (head)->first; pos != NULL((void *)0); pos = pos->next)
330
331#define hlist_for_each_entry(pos, head, member)for (pos = (((head)->first) ? ({ const __typeof( ((__typeof
(*pos) *)0)->member ) *__mptr = ((head)->first); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, member) );}) : ((void *)0)); pos != ((void *)0); pos = (((pos
)->member.next) ? ({ const __typeof( ((__typeof(*pos) *)0)
->member ) *__mptr = ((pos)->member.next); (__typeof(*pos
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}) : ((void *)0)))
\
332 for (pos = hlist_entry((head)->first, __typeof(*pos), member)(((head)->first) ? ({ const __typeof( ((__typeof(*pos) *)0
)->member ) *__mptr = ((head)->first); (__typeof(*pos) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}) : ((void *)0))
; \
333 pos != NULL((void *)0); \
334 pos = hlist_entry((pos)->member.next, __typeof(*pos), member)(((pos)->member.next) ? ({ const __typeof( ((__typeof(*pos
) *)0)->member ) *__mptr = ((pos)->member.next); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, member) );}) : ((void *)0))
)
335
336#define hlist_for_each_entry_safe(pos, n, head, member)for (pos = (((head)->first) ? ({ const __typeof( ((__typeof
(*pos) *)0)->member ) *__mptr = ((head)->first); (__typeof
(*pos) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos)
, member) );}) : ((void *)0)); pos != ((void *)0) && (
n = pos->member.next, 1); pos = ((n) ? ({ const __typeof( (
(__typeof(*pos) *)0)->member ) *__mptr = (n); (__typeof(*pos
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}) : ((void *)0)))
\
337 for (pos = hlist_entry((head)->first, __typeof(*pos), member)(((head)->first) ? ({ const __typeof( ((__typeof(*pos) *)0
)->member ) *__mptr = ((head)->first); (__typeof(*pos) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*pos), member
) );}) : ((void *)0))
; \
338 pos != NULL((void *)0) && (n = pos->member.next, 1); \
339 pos = hlist_entry(n, __typeof(*pos), member)((n) ? ({ const __typeof( ((__typeof(*pos) *)0)->member ) *
__mptr = (n); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), member) );}) : ((void *)0))
)
340
341#endif /* _DRM_LINUX_LIST_H_ */