Bug Summary

File:dev/pci/drm/i915/gem/i915_gem_object.h
Warning:line 130, column 17
Access to field 'contended' results in a dereference of a null pointer (loaded from variable 'ww')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name i915_gem.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_gem.c

/usr/src/sys/dev/pci/drm/i915/i915_gem.c

1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <drm/drm_vma_manager.h>
29#include <linux/dma-fence-array.h>
30#include <linux/kthread.h>
31#include <linux/dma-resv.h>
32#include <linux/shmem_fs.h>
33#include <linux/slab.h>
34#include <linux/stop_machine.h>
35#include <linux/swap.h>
36#include <linux/pci.h>
37#include <linux/dma-buf.h>
38#include <linux/mman.h>
39
40#include <dev/pci/agpvar.h>
41
42#include "display/intel_display.h"
43#include "display/intel_frontbuffer.h"
44
45#include "gem/i915_gem_clflush.h"
46#include "gem/i915_gem_context.h"
47#include "gem/i915_gem_ioctls.h"
48#include "gem/i915_gem_mman.h"
49#include "gem/i915_gem_region.h"
50#include "gt/intel_engine_user.h"
51#include "gt/intel_gt.h"
52#include "gt/intel_gt_pm.h"
53#include "gt/intel_workarounds.h"
54
55#include "i915_drv.h"
56#include "i915_trace.h"
57#include "i915_vgpu.h"
58
59#include "intel_pm.h"
60
61static int
62insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
63{
64 int err;
65
66 err = mutex_lock_interruptible(&ggtt->vm.mutex);
67 if (err)
68 return err;
69
70 memset(node, 0, sizeof(*node))__builtin_memset((node), (0), (sizeof(*node)));
71 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
72 size, 0, I915_COLOR_UNEVICTABLE(-1),
73 0, ggtt->mappable_end,
74 DRM_MM_INSERT_LOW);
75
76 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
77
78 return err;
79}
80
81static void
82remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
83{
84 mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex);
85 drm_mm_remove_node(node);
86 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
87}
88
89int
90i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file)
92{
93 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
94 struct drm_i915_gem_get_aperture *args = data;
95 struct i915_vma *vma;
96 u64 pinned;
97
98 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 return -EINTR4;
100
101 pinned = ggtt->vm.reserved;
102 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link
) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof
(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma)
, vm_link) );}); &vma->vm_link != (&ggtt->vm.bound_list
); vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link
) *__mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );}))
103 if (i915_vma_is_pinned(vma))
104 pinned += vma->node.size;
105
106 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
107
108 args->aper_size = ggtt->vm.total;
109 args->aper_available_size = args->aper_size - pinned;
110
111 return 0;
112}
113
114int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 unsigned long flags)
116{
117 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
118 DRM_LIST_HEAD(still_in_list)struct list_head still_in_list = { &(still_in_list), &
(still_in_list) }
;
119 intel_wakeref_t wakeref;
120 struct i915_vma *vma;
121 int ret;
122
123 if (list_empty(&obj->vma.list))
124 return 0;
125
126 /*
127 * As some machines use ACPI to handle runtime-resume callbacks, and
128 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
129 * as they are required by the shrinker. Ergo, we wake the device up
130 * first just in case.
131 */
132 wakeref = intel_runtime_pm_get(rpm);
133
134try_again:
135 ret = 0;
136 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
137 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
138 struct i915_vma,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
139 obj_link)(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof
( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj->
vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof
(struct i915_vma, obj_link) );}))
)) {
140 struct i915_address_space *vm = vma->vm;
141
142 list_move_tail(&vma->obj_link, &still_in_list);
143 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11))))))
144 continue;
145
146 if (flags & I915_GEM_OBJECT_UNBIND_TEST(1UL << (2))) {
147 ret = -EBUSY16;
148 break;
149 }
150
151 ret = -EAGAIN35;
152 if (!i915_vm_tryopen(vm))
153 break;
154
155 /* Prevent vma being freed by i915_vma_parked as we unbind */
156 vma = __i915_vma_get(vma);
157 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
158
159 if (vma) {
160 ret = -EBUSY16;
161 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0)) ||
162 !i915_vma_is_active(vma))
163 ret = i915_vma_unbind(vma);
164
165 __i915_vma_put(vma);
166 }
167
168 i915_vm_close(vm);
169 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
170 }
171 list_splice_init(&still_in_list, &obj->vma.list);
172 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
173
174 if (ret == -EAGAIN35 && flags & I915_GEM_OBJECT_UNBIND_BARRIER(1UL << (1))) {
175 rcu_barrier()__asm volatile("" : : : "memory"); /* flush the i915_vm_release() */
176 goto try_again;
177 }
178
179 intel_runtime_pm_put(rpm, wakeref);
180
181 return ret;
182}
183
184static int
185i915_gem_create(struct drm_file *file,
186 struct intel_memory_region *mr,
187 u64 *size_p,
188 u32 *handle_p)
189{
190 struct drm_i915_gem_object *obj;
191 u32 handle;
192 u64 size;
193 int ret;
194
195 GEM_BUG_ON(!is_power_of_2(mr->min_page_size))((void)0);
196 size = round_up(*size_p, mr->min_page_size)((((*size_p) + ((mr->min_page_size) - 1)) / (mr->min_page_size
)) * (mr->min_page_size))
;
197 if (size == 0)
198 return -EINVAL22;
199
200 /* For most of the ABI (e.g. mmap) we think in system pages */
201 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE))((void)0);
202
203 /* Allocate the new object */
204 obj = i915_gem_object_create_region(mr, size, 0);
205 if (IS_ERR(obj))
206 return PTR_ERR(obj);
207
208 ret = drm_gem_handle_create(file, &obj->base, &handle);
209 /* drop reference from allocate - handle holds it now */
210 i915_gem_object_put(obj);
211 if (ret)
212 return ret;
213
214 *handle_p = handle;
215 *size_p = size;
216 return 0;
217}
218
219int
220i915_gem_dumb_create(struct drm_file *file,
221 struct drm_device *dev,
222 struct drm_mode_create_dumb *args)
223{
224 enum intel_memory_type mem_type;
225 int cpp = DIV_ROUND_UP(args->bpp, 8)(((args->bpp) + ((8) - 1)) / (8));
226 u32 format;
227
228 switch (cpp) {
229 case 1:
230 format = DRM_FORMAT_C8((__u32)('C') | ((__u32)('8') << 8) | ((__u32)(' ') <<
16) | ((__u32)(' ') << 24))
;
231 break;
232 case 2:
233 format = DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') <<
16) | ((__u32)('6') << 24))
;
234 break;
235 case 4:
236 format = DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') <<
16) | ((__u32)('4') << 24))
;
237 break;
238 default:
239 return -EINVAL22;
240 }
241
242 /* have to work out size/pitch and return them */
243 args->pitch = roundup2(args->width * cpp, 64)(((args->width * cpp) + ((64) - 1)) & (~((__typeof(args
->width * cpp))(64) - 1)))
;
244
245 /* align stride to page size so that we can remap */
246 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
247 DRM_FORMAT_MOD_LINEAR((((__u64)0) << 56) | ((0) & 0x00ffffffffffffffULL)
)
))
248 args->pitch = roundup2(args->pitch, 4096)(((args->pitch) + ((4096) - 1)) & (~((__typeof(args->
pitch))(4096) - 1)))
;
249
250 if (args->pitch < args->width)
251 return -EINVAL22;
252
253 args->size = mul_u32_u32(args->pitch, args->height);
254
255 mem_type = INTEL_MEMORY_SYSTEM;
256 if (HAS_LMEM(to_i915(dev))((&(to_i915(dev))->__info)->memory_regions & ((
1UL << (INTEL_REGION_LMEM))))
)
257 mem_type = INTEL_MEMORY_LOCAL;
258
259 return i915_gem_create(file,
260 intel_memory_region_by_type(to_i915(dev),
261 mem_type),
262 &args->size, &args->handle);
263}
264
265/**
266 * Creates a new mm object and returns a handle to it.
267 * @dev: drm device pointer
268 * @data: ioctl data blob
269 * @file: drm file pointer
270 */
271int
272i915_gem_create_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file)
274{
275 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
276 struct drm_i915_gem_create *args = data;
277
278 i915_gem_flush_free_objects(i915);
279
280 return i915_gem_create(file,
281 intel_memory_region_by_type(i915,
282 INTEL_MEMORY_SYSTEM),
283 &args->size, &args->handle);
284}
285
286static int
287shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data,
288 bool_Bool needs_clflush)
289{
290 char *vaddr;
291 int ret;
292
293 vaddr = kmap(page);
294
295 if (needs_clflush)
296 drm_clflush_virt_range(vaddr + offset, len);
297
298 ret = __copy_to_user(user_data, vaddr + offset, len);
299
300 kunmap_va(vaddr);
301
302 return ret ? -EFAULT14 : 0;
303}
304
305static int
306i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
307 struct drm_i915_gem_pread *args)
308{
309 unsigned int needs_clflush;
310 unsigned int idx, offset;
311 struct dma_fence *fence;
312 char __user *user_data;
313 u64 remain;
314 int ret;
315
316 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
317 if (ret)
318 return ret;
319
320 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
321 if (ret) {
322 i915_gem_object_unlock(obj);
323 return ret;
324 }
325
326 fence = i915_gem_object_lock_fence(obj);
327 i915_gem_object_finish_access(obj);
328 i915_gem_object_unlock(obj);
329
330 if (!fence)
331 return -ENOMEM12;
332
333 remain = args->size;
334 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
335 offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1));
336 for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) {
337 struct vm_page *page = i915_gem_object_get_page(obj, idx);
338 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset
); __min_a < __min_b ? __min_a : __min_b; })
;
339
340 ret = shmem_pread(page, offset, length, user_data,
341 needs_clflush);
342 if (ret)
343 break;
344
345 remain -= length;
346 user_data += length;
347 offset = 0;
348 }
349
350 i915_gem_object_unlock_fence(obj, fence);
351 return ret;
352}
353
354#ifdef __linux__
355static inline bool_Bool
356gtt_user_read(struct io_mapping *mapping,
357 loff_t base, int offset,
358 char __user *user_data, int length)
359{
360 void __iomem *vaddr;
361 unsigned long unwritten;
362
363 /* We can use the cpu mem copy function because this is X86. */
364 vaddr = io_mapping_map_atomic_wc(mapping, base);
365 unwritten = __copy_to_user_inatomic(user_data,
366 (void __force *)vaddr + offset,
367 length);
368 io_mapping_unmap_atomic(vaddr);
369 if (unwritten) {
370 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12));
371 unwritten = copy_to_user(user_data,
372 (void __force *)vaddr + offset,
373 length);
374 io_mapping_unmap(vaddr);
375 }
376 return unwritten;
377}
378#else
379static inline bool_Bool
380gtt_user_read(struct drm_i915_privateinteldrm_softc *dev_priv,
381 loff_t base, int offset,
382 char __user *user_data, int length)
383{
384 bus_space_handle_t bsh;
385 void __iomem *vaddr;
386 unsigned long unwritten;
387
388 /* We can use the cpu mem copy function because this is X86. */
389 agp_map_atomic(dev_priv->agph, base, &bsh);
390 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
391 unwritten = __copy_to_user_inatomic(user_data,
392 (void __force *)vaddr + offset,
393 length);
394 agp_unmap_atomic(dev_priv->agph, bsh);
395 if (unwritten) {
396 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh);
397 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
398 unwritten = copy_to_user(user_data,
399 (void __force *)vaddr + offset,
400 length);
401 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12));
402 }
403 return unwritten;
404}
405#endif
406
407static int
408i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
409 const struct drm_i915_gem_pread *args)
410{
411 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
412 struct i915_ggtt *ggtt = &i915->ggtt;
413 intel_wakeref_t wakeref;
414 struct drm_mm_node node;
415 struct dma_fence *fence;
416 void __user *user_data;
417 struct i915_vma *vma;
418 u64 remain, offset;
419 int ret;
420
421 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
422 vma = ERR_PTR(-ENODEV19);
423 if (!i915_gem_object_is_tiled(obj))
424 vma = i915_gem_object_ggtt_pin(obj, NULL((void *)0), 0, 0,
425 PIN_MAPPABLE(1ULL << (3)) |
426 PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ |
427 PIN_NOEVICT(1ULL << (0)));
428 if (!IS_ERR(vma)) {
429 node.start = i915_ggtt_offset(vma);
430 node.flags = 0;
431 } else {
432 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE(1 << 12));
433 if (ret)
434 goto out_rpm;
435 GEM_BUG_ON(!drm_mm_node_allocated(&node))((void)0);
436 }
437
438 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
439 if (ret)
440 goto out_unpin;
441
442 ret = i915_gem_object_set_to_gtt_domain(obj, false0);
443 if (ret) {
444 i915_gem_object_unlock(obj);
445 goto out_unpin;
446 }
447
448 fence = i915_gem_object_lock_fence(obj);
449 i915_gem_object_unlock(obj);
450 if (!fence) {
451 ret = -ENOMEM12;
452 goto out_unpin;
453 }
454
455 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
456 remain = args->size;
457 offset = args->offset;
458
459 while (remain > 0) {
460 /* Operation in this page
461 *
462 * page_base = page offset within aperture
463 * page_offset = offset within page
464 * page_length = bytes to copy for this page
465 */
466 u32 page_base = node.start;
467 unsigned page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1));
468 unsigned page_length = PAGE_SIZE(1 << 12) - page_offset;
469 page_length = remain < page_length ? remain : page_length;
470 if (drm_mm_node_allocated(&node)) {
471 ggtt->vm.insert_page(&ggtt->vm,
472 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12),
473 node.start, I915_CACHE_NONE, 0);
474 } else {
475 page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1));
476 }
477
478 if (gtt_user_read(i915, page_base, page_offset,
479 user_data, page_length)) {
480 ret = -EFAULT14;
481 break;
482 }
483
484 remain -= page_length;
485 user_data += page_length;
486 offset += page_length;
487 }
488
489 i915_gem_object_unlock_fence(obj, fence);
490out_unpin:
491 if (drm_mm_node_allocated(&node)) {
492 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
493 remove_mappable_node(ggtt, &node);
494 } else {
495 i915_vma_unpin(vma);
496 }
497out_rpm:
498 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
499 return ret;
500}
501
502/**
503 * Reads data from the object referenced by handle.
504 * @dev: drm device pointer
505 * @data: ioctl data blob
506 * @file: drm file pointer
507 *
508 * On error, the contents of *data are undefined.
509 */
510int
511i915_gem_pread_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *file)
513{
514 struct drm_i915_gem_pread *args = data;
515 struct drm_i915_gem_object *obj;
516 int ret;
517
518 if (args->size == 0)
519 return 0;
520
521 if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)),
522 args->size))
523 return -EFAULT14;
524
525 obj = i915_gem_object_lookup(file, args->handle);
526 if (!obj)
527 return -ENOENT2;
528
529 /* Bounds check source. */
530 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset
)); typeof((u64)(args->size)) size__ = ((u64)(args->size
)); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base
.size)); (void)(&start__ == &size__); (void)(&start__
== &max__); start__ >= max__ || size__ > max__ - start__
; })
) {
531 ret = -EINVAL22;
532 goto out;
533 }
534
535 trace_i915_gem_object_pread(obj, args->offset, args->size);
536
537 ret = -ENODEV19;
538 if (obj->ops->pread)
539 ret = obj->ops->pread(obj, args);
540 if (ret != -ENODEV19)
541 goto out;
542
543 ret = i915_gem_object_wait(obj,
544 I915_WAIT_INTERRUPTIBLE(1UL << (0)),
545 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
546 if (ret)
547 goto out;
548
549 ret = i915_gem_object_pin_pages(obj);
550 if (ret)
551 goto out;
552
553 ret = i915_gem_shmem_pread(obj, args);
554 if (ret == -EFAULT14 || ret == -ENODEV19)
555 ret = i915_gem_gtt_pread(obj, args);
556
557 i915_gem_object_unpin_pages(obj);
558out:
559 i915_gem_object_put(obj);
560 return ret;
561}
562
563/* This is the fast write path which cannot handle
564 * page faults in the source data
565 */
566#ifdef __linux__
567static inline bool_Bool
568ggtt_write(struct io_mapping *mapping,
569 loff_t base, int offset,
570 char __user *user_data, int length)
571{
572 void __iomem *vaddr;
573 unsigned long unwritten;
574
575 /* We can use the cpu mem copy function because this is X86. */
576 vaddr = io_mapping_map_atomic_wc(mapping, base);
577 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
578 user_data, length);
579 io_mapping_unmap_atomic(vaddr);
580 if (unwritten) {
581 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12));
582 unwritten = copy_from_user((void __force *)vaddr + offset,
583 user_data, length);
584 io_mapping_unmap(vaddr);
585 }
586
587 return unwritten;
588}
589#else
590static inline bool_Bool
591ggtt_write(struct drm_i915_privateinteldrm_softc *dev_priv,
592 loff_t base, int offset,
593 char __user *user_data, int length)
594{
595 bus_space_handle_t bsh;
596 void __iomem *vaddr;
597 unsigned long unwritten;
598
599 /* We can use the cpu mem copy function because this is X86. */
600 agp_map_atomic(dev_priv->agph, base, &bsh);
601 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
602 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
603 user_data, length);
604 agp_unmap_atomic(dev_priv->agph, bsh);
605 if (unwritten) {
606 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh);
607 vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh)));
608 unwritten = copy_from_user((void __force *)vaddr + offset,
609 user_data, length);
610 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12));
611 }
612
613 return unwritten;
614}
615#endif
616
617/**
618 * This is the fast pwrite path, where we copy the data directly from the
619 * user into the GTT, uncached.
620 * @obj: i915 GEM object
621 * @args: pwrite arguments structure
622 */
623static int
624i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
625 const struct drm_i915_gem_pwrite *args)
626{
627 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
628 struct i915_ggtt *ggtt = &i915->ggtt;
629 struct intel_runtime_pm *rpm = &i915->runtime_pm;
630 intel_wakeref_t wakeref;
631 struct drm_mm_node node;
632 struct dma_fence *fence;
633 struct i915_vma *vma;
634 u64 remain, offset;
635 void __user *user_data;
636 int ret;
637
638 if (i915_gem_object_has_struct_page(obj)) {
639 /*
640 * Avoid waking the device up if we can fallback, as
641 * waking/resuming is very slow (worst-case 10-100 ms
642 * depending on PCI sleeps and our own resume time).
643 * This easily dwarfs any performance advantage from
644 * using the cache bypass of indirect GGTT access.
645 */
646 wakeref = intel_runtime_pm_get_if_in_use(rpm);
647 if (!wakeref)
648 return -EFAULT14;
649 } else {
650 /* No backing pages, no fallback, we must force GGTT access */
651 wakeref = intel_runtime_pm_get(rpm);
652 }
653
654 vma = ERR_PTR(-ENODEV19);
655 if (!i915_gem_object_is_tiled(obj))
656 vma = i915_gem_object_ggtt_pin(obj, NULL((void *)0), 0, 0,
657 PIN_MAPPABLE(1ULL << (3)) |
658 PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ |
659 PIN_NOEVICT(1ULL << (0)));
660 if (!IS_ERR(vma)) {
661 node.start = i915_ggtt_offset(vma);
662 node.flags = 0;
663 } else {
664 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE(1 << 12));
665 if (ret)
666 goto out_rpm;
667 GEM_BUG_ON(!drm_mm_node_allocated(&node))((void)0);
668 }
669
670 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
671 if (ret)
672 goto out_unpin;
673
674 ret = i915_gem_object_set_to_gtt_domain(obj, true1);
675 if (ret) {
676 i915_gem_object_unlock(obj);
677 goto out_unpin;
678 }
679
680 fence = i915_gem_object_lock_fence(obj);
681 i915_gem_object_unlock(obj);
682 if (!fence) {
683 ret = -ENOMEM12;
684 goto out_unpin;
685 }
686
687 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
688
689 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
690 offset = args->offset;
691 remain = args->size;
692 while (remain) {
693 /* Operation in this page
694 *
695 * page_base = page offset within aperture
696 * page_offset = offset within page
697 * page_length = bytes to copy for this page
698 */
699 u32 page_base = node.start;
700 unsigned int page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1));
701 unsigned int page_length = PAGE_SIZE(1 << 12) - page_offset;
702 page_length = remain < page_length ? remain : page_length;
703 if (drm_mm_node_allocated(&node)) {
704 /* flush the write before we modify the GGTT */
705 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
706 ggtt->vm.insert_page(&ggtt->vm,
707 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12),
708 node.start, I915_CACHE_NONE, 0);
709 wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); /* flush modifications to the GGTT (insert_page) */
710 } else {
711 page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1));
712 }
713 /* If we get a fault while copying data, then (presumably) our
714 * source page isn't available. Return the error and we'll
715 * retry in the slow path.
716 * If the object is non-shmem backed, we retry again with the
717 * path that handles page fault.
718 */
719 if (ggtt_write(i915, page_base, page_offset,
720 user_data, page_length)) {
721 ret = -EFAULT14;
722 break;
723 }
724
725 remain -= page_length;
726 user_data += page_length;
727 offset += page_length;
728 }
729
730 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
731 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
732
733 i915_gem_object_unlock_fence(obj, fence);
734out_unpin:
735 if (drm_mm_node_allocated(&node)) {
736 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
737 remove_mappable_node(ggtt, &node);
738 } else {
739 i915_vma_unpin(vma);
740 }
741out_rpm:
742 intel_runtime_pm_put(rpm, wakeref);
743 return ret;
744}
745
746/* Per-page copy function for the shmem pwrite fastpath.
747 * Flushes invalid cachelines before writing to the target if
748 * needs_clflush_before is set and flushes out any written cachelines after
749 * writing if needs_clflush is set.
750 */
751static int
752shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data,
753 bool_Bool needs_clflush_before,
754 bool_Bool needs_clflush_after)
755{
756 char *vaddr;
757 int ret;
758
759 vaddr = kmap(page);
760
761 if (needs_clflush_before)
762 drm_clflush_virt_range(vaddr + offset, len);
763
764 ret = __copy_from_user(vaddr + offset, user_data, len);
765 if (!ret && needs_clflush_after)
766 drm_clflush_virt_range(vaddr + offset, len);
767
768 kunmap_va(vaddr);
769
770 return ret ? -EFAULT14 : 0;
771}
772
773static int
774i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
775 const struct drm_i915_gem_pwrite *args)
776{
777 unsigned int partial_cacheline_write;
778 unsigned int needs_clflush;
779 unsigned int offset, idx;
780 struct dma_fence *fence;
781 void __user *user_data;
782 u64 remain;
783 int ret;
784
785 ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0));
786 if (ret)
787 return ret;
788
789 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
790 if (ret) {
791 i915_gem_object_unlock(obj);
792 return ret;
793 }
794
795 fence = i915_gem_object_lock_fence(obj);
796 i915_gem_object_finish_access(obj);
797 i915_gem_object_unlock(obj);
798
799 if (!fence)
800 return -ENOMEM12;
801
802 /* If we don't overwrite a cacheline completely we need to be
803 * careful to have up-to-date data by first clflushing. Don't
804 * overcomplicate things and flush the entire patch.
805 */
806 partial_cacheline_write = 0;
807 if (needs_clflush & CLFLUSH_BEFORE(1UL << (0)))
808 partial_cacheline_write = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
->ci_cflushsz - 1;
809
810 user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
811 remain = args->size;
812 offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1));
813 for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) {
814 struct vm_page *page = i915_gem_object_get_page(obj, idx);
815 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset
); __min_a < __min_b ? __min_a : __min_b; })
;
816
817 ret = shmem_pwrite(page, offset, length, user_data,
818 (offset | length) & partial_cacheline_write,
819 needs_clflush & CLFLUSH_AFTER(1UL << (1)));
820 if (ret)
821 break;
822
823 remain -= length;
824 user_data += length;
825 offset = 0;
826 }
827
828 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
829 i915_gem_object_unlock_fence(obj, fence);
830
831 return ret;
832}
833
834/**
835 * Writes data to the object referenced by handle.
836 * @dev: drm device
837 * @data: ioctl data blob
838 * @file: drm file
839 *
840 * On error, the contents of the buffer that were to be modified are undefined.
841 */
842int
843i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file)
845{
846 struct drm_i915_gem_pwrite *args = data;
847 struct drm_i915_gem_object *obj;
848 int ret;
849
850 if (args->size == 0)
851 return 0;
852
853 if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), args->size))
854 return -EFAULT14;
855
856 obj = i915_gem_object_lookup(file, args->handle);
857 if (!obj)
858 return -ENOENT2;
859
860 /* Bounds check destination. */
861 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset
)); typeof((u64)(args->size)) size__ = ((u64)(args->size
)); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base
.size)); (void)(&start__ == &size__); (void)(&start__
== &max__); start__ >= max__ || size__ > max__ - start__
; })
) {
862 ret = -EINVAL22;
863 goto err;
864 }
865
866 /* Writes not allowed into this read-only object */
867 if (i915_gem_object_is_readonly(obj)) {
868 ret = -EINVAL22;
869 goto err;
870 }
871
872 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
873
874 ret = -ENODEV19;
875 if (obj->ops->pwrite)
876 ret = obj->ops->pwrite(obj, args);
877 if (ret != -ENODEV19)
878 goto err;
879
880 ret = i915_gem_object_wait(obj,
881 I915_WAIT_INTERRUPTIBLE(1UL << (0)) |
882 I915_WAIT_ALL(1UL << (2)),
883 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
884 if (ret)
885 goto err;
886
887 ret = i915_gem_object_pin_pages(obj);
888 if (ret)
889 goto err;
890
891 ret = -EFAULT14;
892 /* We can only do the GTT pwrite on untiled buffers, as otherwise
893 * it would end up going through the fenced access, and we'll get
894 * different detiling behavior between reading and writing.
895 * pread/pwrite currently are reading and writing from the CPU
896 * perspective, requiring manual detiling by the client.
897 */
898 if (!i915_gem_object_has_struct_page(obj) ||
899 cpu_write_needs_clflush(obj))
900 /* Note that the gtt paths might fail with non-page-backed user
901 * pointers (e.g. gtt mappings when moving data between
902 * textures). Fallback to the shmem path in that case.
903 */
904 ret = i915_gem_gtt_pwrite_fast(obj, args);
905
906 if (ret == -EFAULT14 || ret == -ENOSPC28) {
907 if (i915_gem_object_has_struct_page(obj))
908 ret = i915_gem_shmem_pwrite(obj, args);
909 }
910
911 i915_gem_object_unpin_pages(obj);
912err:
913 i915_gem_object_put(obj);
914 return ret;
915}
916
917/**
918 * Called when user space has done writes to this buffer
919 * @dev: drm device
920 * @data: ioctl data blob
921 * @file: drm file
922 */
923int
924i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
925 struct drm_file *file)
926{
927 struct drm_i915_gem_sw_finish *args = data;
928 struct drm_i915_gem_object *obj;
929
930 obj = i915_gem_object_lookup(file, args->handle);
931 if (!obj)
932 return -ENOENT2;
933
934 /*
935 * Proxy objects are barred from CPU access, so there is no
936 * need to ban sw_finish as it is a nop.
937 */
938
939 /* Pinned buffers may be scanout, so flush the cache */
940 i915_gem_object_flush_if_display(obj);
941 i915_gem_object_put(obj);
942
943 return 0;
944}
945
946void i915_gem_runtime_suspend(struct drm_i915_privateinteldrm_softc *i915)
947{
948 struct drm_i915_gem_object *obj, *on;
949 int i;
950
951 /*
952 * Only called during RPM suspend. All users of the userfault_list
953 * must be holding an RPM wakeref to ensure that this can not
954 * run concurrently with themselves (and use the struct_mutex for
955 * protection between themselves).
956 */
957
958 list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link
) *__mptr = ((&i915->ggtt.userfault_list)->next); (
__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*obj), userfault_link) );}), on = ({ const __typeof( ((__typeof
(*obj) *)0)->userfault_link ) *__mptr = (obj->userfault_link
.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*obj), userfault_link) );}); &obj->userfault_link
!= (&i915->ggtt.userfault_list); obj = on, on = ({ const
__typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr =
(on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*on), userfault_link) );}))
959 &i915->ggtt.userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link
) *__mptr = ((&i915->ggtt.userfault_list)->next); (
__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*obj), userfault_link) );}), on = ({ const __typeof( ((__typeof
(*obj) *)0)->userfault_link ) *__mptr = (obj->userfault_link
.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*obj), userfault_link) );}); &obj->userfault_link
!= (&i915->ggtt.userfault_list); obj = on, on = ({ const
__typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr =
(on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*on), userfault_link) );}))
960 __i915_gem_object_release_mmap_gtt(obj);
961
962 /*
963 * The fence will be lost when the device powers down. If any were
964 * in use by hardware (i.e. they are pinned), we should not be powering
965 * down! All other fences will be reacquired by the user upon waking.
966 */
967 for (i = 0; i < i915->ggtt.num_fences; i++) {
968 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
969
970 /*
971 * Ideally we want to assert that the fence register is not
972 * live at this point (i.e. that no piece of code will be
973 * trying to write through fence + GTT, as that both violates
974 * our tracking of activity and associated locking/barriers,
975 * but also is illegal given that the hw is powered down).
976 *
977 * Previously we used reg->pin_count as a "liveness" indicator.
978 * That is not sufficient, and we need a more fine-grained
979 * tool if we want to have a sanity check here.
980 */
981
982 if (!reg->vma)
983 continue;
984
985 GEM_BUG_ON(i915_vma_has_userfault(reg->vma))((void)0);
986 reg->dirty = true1;
987 }
988}
989
990static void discard_ggtt_vma(struct i915_vma *vma)
991{
992 struct drm_i915_gem_object *obj = vma->obj;
993
994 spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock);
995 if (!RB_EMPTY_NODE(&vma->obj_node)((&vma->obj_node)->__entry.rbe_parent == &vma->
obj_node)
) {
996 rb_erase(&vma->obj_node, &obj->vma.tree)linux_root_RB_REMOVE((struct linux_root *)(&obj->vma.tree
), (&vma->obj_node))
;
997 RB_CLEAR_NODE(&vma->obj_node)(((&vma->obj_node))->__entry.rbe_parent = (&vma
->obj_node))
;
998 }
999 spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock);
1000}
1001
1002struct i915_vma *
1003i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1004 struct i915_gem_ww_ctx *ww,
1005 const struct i915_ggtt_view *view,
1006 u64 size, u64 alignment, u64 flags)
1007{
1008 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
1009 struct i915_ggtt *ggtt = &i915->ggtt;
1010 struct i915_vma *vma;
1011 int ret;
1012
1013 if (flags & PIN_MAPPABLE(1ULL << (3)) &&
1014 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1015 /*
1016 * If the required space is larger than the available
1017 * aperture, we will not able to find a slot for the
1018 * object and unbinding the object now will be in
1019 * vain. Worse, doing so may cause us to ping-pong
1020 * the object in and out of the Global GTT and
1021 * waste a lot of cycles under the mutex.
1022 */
1023 if (obj->base.size > ggtt->mappable_end)
1024 return ERR_PTR(-E2BIG7);
1025
1026 /*
1027 * If NONBLOCK is set the caller is optimistically
1028 * trying to cache the full object within the mappable
1029 * aperture, and *must* have a fallback in place for
1030 * situations where we cannot bind the object. We
1031 * can be a little more lax here and use the fallback
1032 * more often to avoid costly migrations of ourselves
1033 * and other objects within the aperture.
1034 *
1035 * Half-the-aperture is used as a simple heuristic.
1036 * More interesting would to do search for a free
1037 * block prior to making the commitment to unbind.
1038 * That caters for the self-harm case, and with a
1039 * little more heuristics (e.g. NOFAULT, NOEVICT)
1040 * we could try to minimise harm to others.
1041 */
1042 if (flags & PIN_NONBLOCK(1ULL << (2)) &&
1043 obj->base.size > ggtt->mappable_end / 2)
1044 return ERR_PTR(-ENOSPC28);
1045 }
1046
1047new_vma:
1048 vma = i915_vma_instance(obj, &ggtt->vm, view);
1049 if (IS_ERR(vma))
1050 return vma;
1051
1052 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1053 if (flags & PIN_NONBLOCK(1ULL << (2))) {
1054 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1055 return ERR_PTR(-ENOSPC28);
1056
1057 if (flags & PIN_MAPPABLE(1ULL << (3)) &&
1058 vma->fence_size > ggtt->mappable_end / 2)
1059 return ERR_PTR(-ENOSPC28);
1060 }
1061
1062 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1063 discard_ggtt_vma(vma);
1064 goto new_vma;
1065 }
1066
1067 ret = i915_vma_unbind(vma);
1068 if (ret)
1069 return ERR_PTR(ret);
1070 }
1071
1072 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL(1ULL << (10)));
1073 if (ret)
1074 return ERR_PTR(ret);
1075
1076 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1077 mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex);
1078 i915_vma_revoke_fence(vma);
1079 mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex);
1080 }
1081
1082 ret = i915_vma_wait_for_bind(vma);
1083 if (ret) {
1084 i915_vma_unpin(vma);
1085 return ERR_PTR(ret);
1086 }
1087
1088 return vma;
1089}
1090
1091int
1092i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1093 struct drm_file *file_priv)
1094{
1095 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
1096 struct drm_i915_gem_madvise *args = data;
1097 struct drm_i915_gem_object *obj;
1098 int err;
1099
1100 switch (args->madv) {
1101 case I915_MADV_DONTNEED1:
1102 case I915_MADV_WILLNEED0:
1103 break;
1104 default:
1105 return -EINVAL22;
1106 }
1107
1108 obj = i915_gem_object_lookup(file_priv, args->handle);
1109 if (!obj)
1110 return -ENOENT2;
1111
1112 err = mutex_lock_interruptible(&obj->mm.lock);
1113 if (err)
1114 goto out;
1115
1116 if (i915_gem_object_has_pages(obj) &&
1117 i915_gem_object_is_tiled(obj) &&
1118 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES(1<<5)) {
1119 if (obj->mm.madv == I915_MADV_WILLNEED0) {
1120 GEM_BUG_ON(!obj->mm.quirked)((void)0);
1121 __i915_gem_object_unpin_pages(obj);
1122 obj->mm.quirked = false0;
1123 }
1124 if (args->madv == I915_MADV_WILLNEED0) {
1125 GEM_BUG_ON(obj->mm.quirked)((void)0);
1126 __i915_gem_object_pin_pages(obj);
1127 obj->mm.quirked = true1;
1128 }
1129 }
1130
1131 if (obj->mm.madv != __I915_MADV_PURGED2)
1132 obj->mm.madv = args->madv;
1133
1134 if (i915_gem_object_has_pages(obj)) {
1135 struct list_head *list;
1136
1137 if (i915_gem_object_is_shrinkable(obj)) {
1138 unsigned long flags;
1139
1140 spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while
(0)
;
1141
1142 if (obj->mm.madv != I915_MADV_WILLNEED0)
1143 list = &i915->mm.purge_list;
1144 else
1145 list = &i915->mm.shrink_list;
1146 list_move_tail(&obj->mm.link, list);
1147
1148 spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while
(0)
;
1149 }
1150 }
1151
1152 /* if the object is no longer attached, discard its backing storage */
1153 if (obj->mm.madv == I915_MADV_DONTNEED1 &&
1154 !i915_gem_object_has_pages(obj))
1155 i915_gem_object_truncate(obj);
1156
1157 args->retained = obj->mm.madv != __I915_MADV_PURGED2;
1158 mutex_unlock(&obj->mm.lock)rw_exit_write(&obj->mm.lock);
1159
1160out:
1161 i915_gem_object_put(obj);
1162 return err;
1163}
1164
1165int i915_gem_init(struct drm_i915_privateinteldrm_softc *dev_priv)
1166{
1167 int ret;
1168
1169 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1170 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1171 mkwrite_device_info(dev_priv)->page_sizes =
1172 I915_GTT_PAGE_SIZE_4K(1ULL << (12));
1173
1174 ret = i915_gem_init_userptr(dev_priv);
1175 if (ret)
1176 return ret;
1177
1178 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1179 intel_wopcm_init(&dev_priv->wopcm);
1180
1181 ret = i915_init_ggtt(dev_priv);
1182 if (ret) {
1183 GEM_BUG_ON(ret == -EIO)((void)0);
1184 goto err_unlock;
1185 }
1186
1187 /*
1188 * Despite its name intel_init_clock_gating applies both display
1189 * clock gating workarounds; GT mmio workarounds and the occasional
1190 * GT power context workaround. Worse, sometimes it includes a context
1191 * register workaround which we need to apply before we record the
1192 * default HW state for all contexts.
1193 *
1194 * FIXME: break up the workarounds and apply them at the right time!
1195 */
1196 intel_init_clock_gating(dev_priv);
1197
1198 ret = intel_gt_init(&dev_priv->gt);
1199 if (ret)
1200 goto err_unlock;
1201
1202 return 0;
1203
1204 /*
1205 * Unwinding is complicated by that we want to handle -EIO to mean
1206 * disable GPU submission but keep KMS alive. We want to mark the
1207 * HW as irrevisibly wedged, but keep enough state around that the
1208 * driver doesn't explode during runtime.
1209 */
1210err_unlock:
1211 i915_gem_drain_workqueue(dev_priv);
1212
1213 if (ret != -EIO5) {
1214 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1215 i915_gem_cleanup_userptr(dev_priv);
1216 }
1217
1218 if (ret == -EIO5) {
1219 /*
1220 * Allow engines or uC initialisation to fail by marking the GPU
1221 * as wedged. But we only want to do this when the GPU is angry,
1222 * for all other failure, such as an allocation failure, bail.
1223 */
1224 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1225 i915_probe_error(dev_priv,__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n"
)
1226 "Failed to initialize GPU, declaring it wedged!\n")__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n"
)
;
1227 intel_gt_set_wedged(&dev_priv->gt);
1228 }
1229
1230 /* Minimal basic recovery for KMS */
1231 ret = i915_ggtt_enable_hw(dev_priv);
1232 i915_ggtt_resume(&dev_priv->ggtt);
1233 intel_init_clock_gating(dev_priv);
1234 }
1235
1236 i915_gem_drain_freed_objects(dev_priv);
1237 return ret;
1238}
1239
1240void i915_gem_driver_register(struct drm_i915_privateinteldrm_softc *i915)
1241{
1242 i915_gem_driver_register__shrinker(i915);
1243
1244 intel_engines_driver_register(i915);
1245}
1246
1247void i915_gem_driver_unregister(struct drm_i915_privateinteldrm_softc *i915)
1248{
1249 i915_gem_driver_unregister__shrinker(i915);
1250}
1251
1252void i915_gem_driver_remove(struct drm_i915_privateinteldrm_softc *dev_priv)
1253{
1254 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1255
1256 i915_gem_suspend_late(dev_priv);
1257 intel_gt_driver_remove(&dev_priv->gt);
1258 dev_priv->uabi_engines = RB_ROOT(struct rb_root) { ((void *)0) };
1259
1260 /* Flush any outstanding unpin_work. */
1261 i915_gem_drain_workqueue(dev_priv);
1262
1263 i915_gem_drain_freed_objects(dev_priv);
1264}
1265
1266void i915_gem_driver_release(struct drm_i915_privateinteldrm_softc *dev_priv)
1267{
1268 i915_gem_driver_release__contexts(dev_priv);
1269
1270 intel_gt_driver_release(&dev_priv->gt);
1271
1272 intel_wa_list_free(&dev_priv->gt_wa_list);
1273
1274 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1275 i915_gem_cleanup_userptr(dev_priv);
1276
1277 i915_gem_drain_freed_objects(dev_priv);
1278
1279 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list))({ int __ret = !!((!list_empty(&dev_priv->gem.contexts
.list))); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!list_empty(&dev_priv->gem.contexts.list)"
")"); __builtin_expect(!!(__ret), 0); })
;
1280}
1281
1282static void i915_gem_init__mm(struct drm_i915_privateinteldrm_softc *i915)
1283{
1284 mtx_init(&i915->mm.obj_lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&i915->
mm.obj_lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9)
? 0x9 : ((0x0)))); } while (0)
;
1285
1286 init_llist_head(&i915->mm.free_list);
1287
1288 INIT_LIST_HEAD(&i915->mm.purge_list);
1289 INIT_LIST_HEAD(&i915->mm.shrink_list);
1290
1291 i915_gem_init__objects(i915);
1292}
1293
1294void i915_gem_init_early(struct drm_i915_privateinteldrm_softc *dev_priv)
1295{
1296 i915_gem_init__mm(dev_priv);
1297 i915_gem_init__contexts(dev_priv);
1298
1299 mtx_init(&dev_priv->fb_tracking.lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev_priv
->fb_tracking.lock), ((((0x0)) > 0x0 && ((0x0))
< 0x9) ? 0x9 : ((0x0)))); } while (0)
;
1300}
1301
1302void i915_gem_cleanup_early(struct drm_i915_privateinteldrm_softc *dev_priv)
1303{
1304 i915_gem_drain_freed_objects(dev_priv);
1305 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list))((void)0);
1306 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count))((void)0);
1307 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count)({ int __ret = !!((dev_priv->mm.shrink_count)); if (__ret)
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "dev_priv->mm.shrink_count"
")"); __builtin_expect(!!(__ret), 0); })
;
1308}
1309
1310int i915_gem_freeze(struct drm_i915_privateinteldrm_softc *dev_priv)
1311{
1312 /* Discard all purgeable objects, let userspace recover those as
1313 * required after resuming.
1314 */
1315 i915_gem_shrink_all(dev_priv);
1316
1317 return 0;
1318}
1319
1320int i915_gem_freeze_late(struct drm_i915_privateinteldrm_softc *i915)
1321{
1322 struct drm_i915_gem_object *obj;
1323 intel_wakeref_t wakeref;
1324
1325 /*
1326 * Called just before we write the hibernation image.
1327 *
1328 * We need to update the domain tracking to reflect that the CPU
1329 * will be accessing all the pages to create and restore from the
1330 * hibernation, and so upon restoration those pages will be in the
1331 * CPU domain.
1332 *
1333 * To make sure the hibernation image contains the latest state,
1334 * we update that state just before writing out the image.
1335 *
1336 * To try and reduce the hibernation image, we manually shrink
1337 * the objects as well, see i915_gem_freeze()
1338 */
1339
1340 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1341
1342 i915_gem_shrink(i915, -1UL, NULL((void *)0), ~0);
1343 i915_gem_drain_freed_objects(i915);
1344
1345 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->mm.link
) *__mptr = ((&i915->mm.shrink_list)->next); (__typeof
(*obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj)
, mm.link) );}); &obj->mm.link != (&i915->mm.shrink_list
); obj = ({ const __typeof( ((__typeof(*obj) *)0)->mm.link
) *__mptr = (obj->mm.link.next); (__typeof(*obj) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*obj), mm.link) );}))
{
1
Loop condition is true. Entering loop body
1346 i915_gem_object_lock(obj, NULL((void *)0));
2
Passing null pointer value via 2nd parameter 'ww'
3
Calling 'i915_gem_object_lock'
1347 drm_WARN_ON(&i915->drm,({ int __ret = !!((i915_gem_object_set_to_cpu_domain(obj, 1))
); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_cpu_domain(obj, 1)"
")"); __builtin_expect(!!(__ret), 0); })
1348 i915_gem_object_set_to_cpu_domain(obj, true))({ int __ret = !!((i915_gem_object_set_to_cpu_domain(obj, 1))
); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_cpu_domain(obj, 1)"
")"); __builtin_expect(!!(__ret), 0); })
;
1349 i915_gem_object_unlock(obj);
1350 }
1351
1352 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1353
1354 return 0;
1355}
1356
1357int i915_gem_open(struct drm_i915_privateinteldrm_softc *i915, struct drm_file *file)
1358{
1359 struct drm_i915_file_private *file_priv;
1360 int ret;
1361
1362 DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n");
1363
1364 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL(0x0001 | 0x0004));
1365 if (!file_priv)
1366 return -ENOMEM12;
1367
1368 file->driver_priv = file_priv;
1369 file_priv->dev_priv = i915;
1370 file_priv->file = file;
1371
1372 file_priv->bsd_engine = -1;
1373 file_priv->hang_timestamp = jiffies;
1374
1375 ret = i915_gem_context_open(i915, file);
1376 if (ret)
1377 kfree(file_priv);
1378
1379 return ret;
1380}
1381
1382void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool_Bool intr)
1383{
1384 ww_acquire_init(&ww->ctx, &reservation_ww_class);
1385 INIT_LIST_HEAD(&ww->obj_list);
1386 ww->intr = intr;
1387 ww->contended = NULL((void *)0);
1388}
1389
1390static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1391{
1392 struct drm_i915_gem_object *obj;
1393
1394 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link)(list_empty(&ww->obj_list) ? ((void *)0) : ({ const __typeof
( ((struct drm_i915_gem_object *)0)->obj_link ) *__mptr = (
(&ww->obj_list)->next); (struct drm_i915_gem_object
*)( (char *)__mptr - __builtin_offsetof(struct drm_i915_gem_object
, obj_link) );}))
)) {
1395 list_del(&obj->obj_link);
1396 i915_gem_object_unlock(obj);
1397 }
1398}
1399
1400void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1401{
1402 list_del(&obj->obj_link);
1403 i915_gem_object_unlock(obj);
1404}
1405
1406void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1407{
1408 i915_gem_ww_ctx_unlock_all(ww);
1409 WARN_ON(ww->contended)({ int __ret = !!((ww->contended)); if (__ret) printf("%s"
, "WARN_ON(" "ww->contended" ")"); __builtin_expect(!!(__ret
), 0); })
;
1410 ww_acquire_fini(&ww->ctx);
1411}
1412
1413int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1414{
1415 int ret = 0;
1416
1417 if (WARN_ON(!ww->contended)({ int __ret = !!((!ww->contended)); if (__ret) printf("%s"
, "WARN_ON(" "!ww->contended" ")"); __builtin_expect(!!(__ret
), 0); })
)
1418 return -EINVAL22;
1419
1420 i915_gem_ww_ctx_unlock_all(ww);
1421 if (ww->intr)
1422 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1423 else
1424 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1425
1426 if (!ret)
1427 list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1428
1429 ww->contended = NULL((void *)0);
1430
1431 return ret;
1432}
1433
1434#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
1435#include "selftests/mock_gem_device.c"
1436#include "selftests/i915_gem.c"
1437#endif

/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_object.h

1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7#ifndef __I915_GEM_OBJECT_H__
8#define __I915_GEM_OBJECT_H__
9
10#include <drm/drm_gem.h>
11#include <drm/drm_file.h>
12#include <drm/drm_device.h>
13
14#include "display/intel_frontbuffer.h"
15#include "i915_gem_object_types.h"
16#include "i915_gem_gtt.h"
17#include "i915_vma_types.h"
18
19void i915_gem_init__objects(struct drm_i915_privateinteldrm_softc *i915);
20
21struct drm_i915_gem_object *i915_gem_object_alloc(void);
22void i915_gem_object_free(struct drm_i915_gem_object *obj);
23
24void i915_gem_object_init(struct drm_i915_gem_object *obj,
25 const struct drm_i915_gem_object_ops *ops,
26 struct lock_class_key *key);
27struct drm_i915_gem_object *
28i915_gem_object_create_shmem(struct drm_i915_privateinteldrm_softc *i915,
29 resource_size_t size);
30struct drm_i915_gem_object *
31i915_gem_object_create_shmem_from_data(struct drm_i915_privateinteldrm_softc *i915,
32 const void *data, resource_size_t size);
33
34extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
35void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
36 struct sg_table *pages,
37 bool_Bool needs_clflush);
38
39int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
40
41void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
42void i915_gem_free_object(struct drm_gem_object *obj);
43
44void i915_gem_flush_free_objects(struct drm_i915_privateinteldrm_softc *i915);
45
46struct sg_table *
47__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
48void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
49
50/**
51 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
52 * @filp: DRM file private date
53 * @handle: userspace handle
54 *
55 * Returns:
56 *
57 * A pointer to the object named by the handle if such exists on @filp, NULL
58 * otherwise. This object is only valid whilst under the RCU read lock, and
59 * note carefully the object may be in the process of being destroyed.
60 */
61static inline struct drm_i915_gem_object *
62i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
63{
64#ifdef CONFIG_LOCKDEP
65 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map))({ int __ret = !!((debug_locks && !lock_is_held(&
rcu_lock_map))); if (__ret) printf("%s", "WARN_ON(" "debug_locks && !lock_is_held(&rcu_lock_map)"
")"); __builtin_expect(!!(__ret), 0); })
;
66#endif
67 return idr_find(&file->object_idr, handle);
68}
69
70static inline struct drm_i915_gem_object *
71i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
72{
73 if (obj && !kref_get_unless_zero(&obj->base.refcount))
74 obj = NULL((void *)0);
75
76 return obj;
77}
78
79static inline struct drm_i915_gem_object *
80i915_gem_object_lookup(struct drm_file *file, u32 handle)
81{
82 struct drm_i915_gem_object *obj;
83
84 rcu_read_lock();
85 obj = i915_gem_object_lookup_rcu(file, handle);
86 obj = i915_gem_object_get_rcu(obj);
87 rcu_read_unlock();
88
89 return obj;
90}
91
92__deprecated
93struct drm_gem_object *
94drm_gem_object_lookup(struct drm_file *file, u32 handle);
95
96__attribute__((nonnull))
97static inline struct drm_i915_gem_object *
98i915_gem_object_get(struct drm_i915_gem_object *obj)
99{
100 drm_gem_object_get(&obj->base);
101 return obj;
102}
103
104__attribute__((nonnull))
105static inline void
106i915_gem_object_put(struct drm_i915_gem_object *obj)
107{
108 __drm_gem_object_put(&obj->base);
109}
110
111#define assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while
(0)
dma_resv_assert_held((obj)->base.resv)do { (void)(&((obj)->base.resv)->lock.base); } while
(0)
112
113static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
114 struct i915_gem_ww_ctx *ww,
115 bool_Bool intr)
116{
117 int ret;
118
119 if (intr
5.1
'intr' is false
5.1
'intr' is false
)
6
Taking false branch
120 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL((void *)0));
121 else
122 ret = dma_resv_lock(obj->base.resv, ww
6.1
'ww' is null
6.1
'ww' is null
? &ww->ctx : NULL((void *)0));
7
'?' condition is false
123
124 if (!ret && ww)
8
Assuming 'ret' is not equal to 0
125 list_add_tail(&obj->obj_link, &ww->obj_list);
126 if (ret == -EALREADY37)
9
Assuming the condition is false
10
Taking false branch
127 ret = 0;
128
129 if (ret == -EDEADLK11)
11
Assuming the condition is true
12
Taking true branch
130 ww->contended = obj;
13
Access to field 'contended' results in a dereference of a null pointer (loaded from variable 'ww')
131
132 return ret;
133}
134
135static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
136 struct i915_gem_ww_ctx *ww)
137{
138 return __i915_gem_object_lock(obj, ww, ww
3.1
'ww' is null
3.1
'ww' is null
&& ww->intr)
;
4
Passing null pointer value via 2nd parameter 'ww'
5
Calling '__i915_gem_object_lock'
139}
140
141static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
142 struct i915_gem_ww_ctx *ww)
143{
144 WARN_ON(ww && !ww->intr)({ int __ret = !!((ww && !ww->intr)); if (__ret) printf
("%s", "WARN_ON(" "ww && !ww->intr" ")"); __builtin_expect
(!!(__ret), 0); })
;
145 return __i915_gem_object_lock(obj, ww, true1);
146}
147
148static inline bool_Bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
149{
150 return dma_resv_trylock(obj->base.resv);
151}
152
153static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
154{
155 dma_resv_unlock(obj->base.resv);
156}
157
158struct dma_fence *
159i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
160void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
161 struct dma_fence *fence);
162
163static inline void
164i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
165{
166 obj->flags |= I915_BO_READONLY(1UL << (2));
167}
168
169static inline bool_Bool
170i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
171{
172 return obj->flags & I915_BO_READONLY(1UL << (2));
173}
174
175static inline bool_Bool
176i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
177{
178 return obj->flags & I915_BO_ALLOC_CONTIGUOUS(1UL << (0));
179}
180
181static inline bool_Bool
182i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
183{
184 return obj->flags & I915_BO_ALLOC_VOLATILE(1UL << (1));
185}
186
187static inline void
188i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
189{
190 obj->flags |= I915_BO_ALLOC_VOLATILE(1UL << (1));
191}
192
193static inline bool_Bool
194i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
195 unsigned long flags)
196{
197 return obj->ops->flags & flags;
198}
199
200static inline bool_Bool
201i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
202{
203 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE(1UL << (0)));
204}
205
206static inline bool_Bool
207i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
208{
209 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE(1UL << (2)));
210}
211
212static inline bool_Bool
213i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
214{
215 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY(1UL << (3)));
216}
217
218static inline bool_Bool
219i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
220{
221 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP(1UL << (4)));
222}
223
224static inline bool_Bool
225i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
226{
227 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL(1UL << (5)));
228}
229
230static inline bool_Bool
231i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
232{
233 return READ_ONCE(obj->frontbuffer)({ typeof(obj->frontbuffer) __tmp = *(volatile typeof(obj->
frontbuffer) *)&(obj->frontbuffer); membar_datadep_consumer
(); __tmp; })
;
234}
235
236static inline unsigned int
237i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
238{
239 return obj->tiling_and_stride & TILING_MASK(128 - 1);
240}
241
242static inline bool_Bool
243i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
244{
245 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE0;
246}
247
248static inline unsigned int
249i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
250{
251 return obj->tiling_and_stride & STRIDE_MASK(~(128 - 1));
252}
253
254static inline unsigned int
255i915_gem_tile_height(unsigned int tiling)
256{
257 GEM_BUG_ON(!tiling)((void)0);
258 return tiling == I915_TILING_Y2 ? 32 : 8;
259}
260
261static inline unsigned int
262i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
263{
264 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
265}
266
267static inline unsigned int
268i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
269{
270 return (i915_gem_object_get_stride(obj) *
271 i915_gem_object_get_tile_height(obj));
272}
273
274int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
275 unsigned int tiling, unsigned int stride);
276
277struct scatterlist *
278i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
279 unsigned int n, unsigned int *offset);
280
281struct vm_page *
282i915_gem_object_get_page(struct drm_i915_gem_object *obj,
283 unsigned int n);
284
285struct vm_page *
286i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
287 unsigned int n);
288
289dma_addr_t
290i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
291 unsigned long n,
292 unsigned int *len);
293
294dma_addr_t
295i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
296 unsigned long n);
297
298void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
299 struct sg_table *pages,
300 unsigned int sg_page_sizes);
301
302int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
303int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
304
305enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
306 I915_MM_NORMAL = 0,
307 /*
308 * Only used by struct_mutex, when called "recursively" from
309 * direct-reclaim-esque. Safe because there is only every one
310 * struct_mutex in the entire system.
311 */
312 I915_MM_SHRINKER = 1,
313 /*
314 * Used for obj->mm.lock when allocating pages. Safe because the object
315 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
316 * it. As soon as the object has pages, obj->mm.lock nests within
317 * fs_reclaim.
318 */
319 I915_MM_GET_PAGES = 1,
320};
321
322static inline int __must_check
323i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
324{
325 might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
326
327 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)atomic_add_unless((&obj->mm.pages_pin_count), 1, 0))
328 return 0;
329
330 return __i915_gem_object_get_pages(obj);
331}
332
333static inline bool_Bool
334i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
335{
336 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)({ typeof(obj->mm.pages) __tmp = *(volatile typeof(obj->
mm.pages) *)&(obj->mm.pages); membar_datadep_consumer(
); __tmp; })
);
337}
338
339static inline void
340__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
341{
342 GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0);
343
344 atomic_inc(&obj->mm.pages_pin_count)__sync_fetch_and_add(&obj->mm.pages_pin_count, 1);
345}
346
347static inline bool_Bool
348i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
349{
350 return atomic_read(&obj->mm.pages_pin_count)({ typeof(*(&obj->mm.pages_pin_count)) __tmp = *(volatile
typeof(*(&obj->mm.pages_pin_count)) *)&(*(&obj
->mm.pages_pin_count)); membar_datadep_consumer(); __tmp; }
)
;
351}
352
353static inline void
354__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
355{
356 GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0);
357 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0);
358
359 atomic_dec(&obj->mm.pages_pin_count)__sync_fetch_and_sub(&obj->mm.pages_pin_count, 1);
360}
361
362static inline void
363i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
364{
365 __i915_gem_object_unpin_pages(obj);
366}
367
368int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
369void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
370void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
371
372enum i915_map_type {
373 I915_MAP_WB = 0,
374 I915_MAP_WC,
375#define I915_MAP_OVERRIDE(1UL << (31)) BIT(31)(1UL << (31))
376 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE(1UL << (31)),
377 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE(1UL << (31)),
378};
379
380/**
381 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
382 * @obj: the object to map into kernel address space
383 * @type: the type of mapping, used to select pgprot_t
384 *
385 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
386 * pages and then returns a contiguous mapping of the backing storage into
387 * the kernel address space. Based on the @type of mapping, the PTE will be
388 * set to either WriteBack or WriteCombine (via pgprot_t).
389 *
390 * The caller is responsible for calling i915_gem_object_unpin_map() when the
391 * mapping is no longer required.
392 *
393 * Returns the pointer through which to access the mapped object, or an
394 * ERR_PTR() on error.
395 */
396void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
397 enum i915_map_type type);
398
399void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
400 unsigned long offset,
401 unsigned long size);
402static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
403{
404 __i915_gem_object_flush_map(obj, 0, obj->base.size);
405}
406
407/**
408 * i915_gem_object_unpin_map - releases an earlier mapping
409 * @obj: the object to unmap
410 *
411 * After pinning the object and mapping its pages, once you are finished
412 * with your access, call i915_gem_object_unpin_map() to release the pin
413 * upon the mapping. Once the pin count reaches zero, that mapping may be
414 * removed.
415 */
416static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
417{
418 i915_gem_object_unpin_pages(obj);
419}
420
421void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
422
423void
424i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
425 unsigned int flush_domains);
426
427int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
428 unsigned int *needs_clflush);
429int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
430 unsigned int *needs_clflush);
431#define CLFLUSH_BEFORE(1UL << (0)) BIT(0)(1UL << (0))
432#define CLFLUSH_AFTER(1UL << (1)) BIT(1)(1UL << (1))
433#define CLFLUSH_FLAGS((1UL << (0)) | (1UL << (1))) (CLFLUSH_BEFORE(1UL << (0)) | CLFLUSH_AFTER(1UL << (1)))
434
435static inline void
436i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
437{
438 i915_gem_object_unpin_pages(obj);
439}
440
441static inline struct intel_engine_cs *
442i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
443{
444 struct intel_engine_cs *engine = NULL((void *)0);
445 struct dma_fence *fence;
446
447 rcu_read_lock();
448 fence = dma_resv_get_excl_rcu(obj->base.resv);
449 rcu_read_unlock();
450
451 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
452 engine = to_request(fence)->engine;
453 dma_fence_put(fence);
454
455 return engine;
456}
457
458void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
459 unsigned int cache_level);
460void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
461void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
462
463int __must_check
464i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool_Bool write);
465int __must_check
466i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool_Bool write);
467int __must_check
468i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool_Bool write);
469struct i915_vma * __must_check
470i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
471 u32 alignment,
472 const struct i915_ggtt_view *view,
473 unsigned int flags);
474
475void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
476void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
477void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
478
479static inline bool_Bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
480{
481 if (obj->cache_dirty)
482 return false0;
483
484 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE(1UL << (1))))
485 return true1;
486
487 /* Currently in use by HW (display engine)? Keep flushed. */
488 return i915_gem_object_is_framebuffer(obj);
489}
490
491static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
492{
493 obj->read_domains = I915_GEM_DOMAIN_CPU0x00000001;
494 obj->write_domain = I915_GEM_DOMAIN_CPU0x00000001;
495 if (cpu_write_needs_clflush(obj))
496 obj->cache_dirty = true1;
497}
498
499int i915_gem_object_wait(struct drm_i915_gem_object *obj,
500 unsigned int flags,
501 long timeout);
502int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
503 unsigned int flags,
504 const struct i915_sched_attr *attr);
505
506void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
507 enum fb_op_origin origin);
508void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
509 enum fb_op_origin origin);
510
511static inline void
512i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
513 enum fb_op_origin origin)
514{
515 if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0))
516 __i915_gem_object_flush_frontbuffer(obj, origin);
517}
518
519static inline void
520i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
521 enum fb_op_origin origin)
522{
523 if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0))
524 __i915_gem_object_invalidate_frontbuffer(obj, origin);
525}
526
527#endif