Bug Summary

File:dev/pci/drm/drm_gem.c
Warning:line 1426, column 2
Value stored to 'ret' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name drm_gem.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/drm_gem.c
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/dma-buf.h>
29#include <linux/file.h>
30#include <linux/fs.h>
31#include <linux/iosys-map.h>
32#include <linux/mem_encrypt.h>
33#include <linux/mm.h>
34#include <linux/mman.h>
35#include <linux/module.h>
36#include <linux/pagemap.h>
37#include <linux/pagevec.h>
38#include <linux/shmem_fs.h>
39#include <linux/slab.h>
40#include <linux/string_helpers.h>
41#include <linux/types.h>
42#include <linux/uaccess.h>
43
44#include <drm/drm.h>
45#include <drm/drm_device.h>
46#include <drm/drm_drv.h>
47#include <drm/drm_file.h>
48#include <drm/drm_gem.h>
49#include <drm/drm_managed.h>
50#include <drm/drm_print.h>
51#include <drm/drm_vma_manager.h>
52
53#include "drm_internal.h"
54
55#include <sys/conf.h>
56#include <uvm/uvm.h>
57
58void drm_unref(struct uvm_object *);
59void drm_ref(struct uvm_object *);
60boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
61int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
62 vm_fault_t, vm_prot_t, int);
63
64const struct uvm_pagerops drm_pgops = {
65 .pgo_reference = drm_ref,
66 .pgo_detach = drm_unref,
67 .pgo_fault = drm_fault,
68 .pgo_flush = drm_flush,
69};
70
71void
72drm_ref(struct uvm_object *uobj)
73{
74 struct drm_gem_object *obj =
75 container_of(uobj, struct drm_gem_object, uobj)({ const __typeof( ((struct drm_gem_object *)0)->uobj ) *__mptr
= (uobj); (struct drm_gem_object *)( (char *)__mptr - __builtin_offsetof
(struct drm_gem_object, uobj) );})
;
76
77 drm_gem_object_get(obj);
78}
79
80void
81drm_unref(struct uvm_object *uobj)
82{
83 struct drm_gem_object *obj =
84 container_of(uobj, struct drm_gem_object, uobj)({ const __typeof( ((struct drm_gem_object *)0)->uobj ) *__mptr
= (uobj); (struct drm_gem_object *)( (char *)__mptr - __builtin_offsetof
(struct drm_gem_object, uobj) );})
;
85
86 drm_gem_object_put(obj);
87}
88
89int
90drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91 int npages, int centeridx, vm_fault_t fault_type,
92 vm_prot_t access_type, int flags)
93{
94 struct vm_map_entry *entry = ufi->entry;
95 struct uvm_object *uobj = entry->object.uvm_obj;
96 struct drm_gem_object *obj =
97 container_of(uobj, struct drm_gem_object, uobj)({ const __typeof( ((struct drm_gem_object *)0)->uobj ) *__mptr
= (uobj); (struct drm_gem_object *)( (char *)__mptr - __builtin_offsetof
(struct drm_gem_object, uobj) );})
;
98 struct drm_device *dev = obj->dev;
99 int ret;
100
101 /*
102 * we do not allow device mappings to be mapped copy-on-write
103 * so we kill any attempt to do so here.
104 */
105
106 if (UVM_ET_ISCOPYONWRITE(entry)(((entry)->etype & 0x0004) != 0)) {
107 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
108 return(VM_PAGER_ERROR4);
109 }
110
111 /*
112 * We could end up here as the result of a copyin(9) or
113 * copyout(9) while handling an ioctl. So we must be careful
114 * not to deadlock. Therefore we only block if the quiesce
115 * count is zero, which guarantees we didn't enter from within
116 * an ioctl code path.
117 */
118 mtx_enter(&dev->quiesce_mtx);
119 if (dev->quiesce && dev->quiesce_count == 0) {
120 mtx_leave(&dev->quiesce_mtx);
121 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
122 mtx_enter(&dev->quiesce_mtx);
123 while (dev->quiesce) {
124 msleep_nsec(&dev->quiesce, &dev->quiesce_mtx,
125 PZERO22, "drmflt", INFSLP0xffffffffffffffffULL);
126 }
127 mtx_leave(&dev->quiesce_mtx);
128 return(VM_PAGER_REFAULT7);
129 }
130 dev->quiesce_count++;
131 mtx_leave(&dev->quiesce_mtx);
132
133 /* Call down into driver to do the magic */
134 ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
135 entry->start), vaddr, pps, npages, centeridx,
136 access_type, flags);
137
138 mtx_enter(&dev->quiesce_mtx);
139 dev->quiesce_count--;
140 if (dev->quiesce)
141 wakeup(&dev->quiesce_count);
142 mtx_leave(&dev->quiesce_mtx);
143
144 return (ret);
145}
146
147boolean_t
148drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
149{
150 return (TRUE1);
151}
152
153struct uvm_object *
154udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
155{
156 struct drm_device *dev = drm_get_device_from_kdev(device);
157 struct drm_gem_object *obj = NULL((void *)0);
158 struct drm_vma_offset_node *node;
159 struct drm_file *priv;
160 struct file *filp;
161
162 if (cdevsw[major(device)(((unsigned)(device) >> 8) & 0xff)].d_mmap != drmmmap)
163 return NULL((void *)0);
164
165 if (dev == NULL((void *)0))
166 return NULL((void *)0);
167
168 mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex);
169 priv = drm_find_file_by_minor(dev, minor(device)((unsigned)((device) & 0xff) | (((device) & 0xffff0000
) >> 8))
);
170 if (priv == NULL((void *)0)) {
171 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
172 return NULL((void *)0);
173 }
174 filp = priv->filp;
175 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
176
177 if (dev->driver->mmap)
178 return dev->driver->mmap(filp, accessprot, off, size);
179
180 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
181 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
182 off >> PAGE_SHIFT12,
183 atop(round_page(size))(((((size) + ((1 << 12) - 1)) & ~((1 << 12) -
1))) >> 12)
);
184 if (likely(node)__builtin_expect(!!(node), 1)) {
185 obj = container_of(node, struct drm_gem_object, vma_node)({ const __typeof( ((struct drm_gem_object *)0)->vma_node )
*__mptr = (node); (struct drm_gem_object *)( (char *)__mptr -
__builtin_offsetof(struct drm_gem_object, vma_node) );})
;
186 /*
187 * When the object is being freed, after it hits 0-refcnt it
188 * proceeds to tear down the object. In the process it will
189 * attempt to remove the VMA offset and so acquire this
190 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
191 * that matches our range, we know it is in the process of being
192 * destroyed and will be freed as soon as we release the lock -
193 * so we have to check for the 0-refcnted object and treat it as
194 * invalid.
195 */
196 if (!kref_get_unless_zero(&obj->refcount))
197 obj = NULL((void *)0);
198 }
199 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
200
201 if (!obj)
202 return NULL((void *)0);
203
204 if (!drm_vma_node_is_allowed(node, priv)) {
205 drm_gem_object_put(obj);
206 return NULL((void *)0);
207 }
208
209 return &obj->uobj;
210}
211
212/** @file drm_gem.c
213 *
214 * This file provides some of the base ioctls and library routines for
215 * the graphics memory manager implemented by each device driver.
216 *
217 * Because various devices have different requirements in terms of
218 * synchronization and migration strategies, implementing that is left up to
219 * the driver, and all that the general API provides should be generic --
220 * allocating objects, reading/writing data with the cpu, freeing objects.
221 * Even there, platform-dependent optimizations for reading/writing data with
222 * the CPU mean we'll likely hook those out to driver-specific calls. However,
223 * the DRI2 implementation wants to have at least allocate/mmap be generic.
224 *
225 * The goal was to have swap-backed object allocation managed through
226 * struct file. However, file descriptors as handles to a struct file have
227 * two major failings:
228 * - Process limits prevent more than 1024 or so being used at a time by
229 * default.
230 * - Inability to allocate high fds will aggravate the X Server's select()
231 * handling, and likely that of many GL client applications as well.
232 *
233 * This led to a plan of using our own integer IDs (called handles, following
234 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
235 * ioctls. The objects themselves will still include the struct file so
236 * that we can transition to fds if the required kernel infrastructure shows
237 * up at a later date, and as our interface with shmfs for memory allocation.
238 */
239
240static void
241drm_gem_init_release(struct drm_device *dev, void *ptr)
242{
243 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
244}
245
246/**
247 * drm_gem_init - Initialize the GEM device fields
248 * @dev: drm_devic structure to initialize
249 */
250int
251drm_gem_init(struct drm_device *dev)
252{
253 struct drm_vma_offset_manager *vma_offset_manager;
254
255 rw_init(&dev->object_name_lock, "drmonl")_rw_init_flags(&dev->object_name_lock, "drmonl", 0, ((
void *)0))
;
256 idr_init_base(&dev->object_name_idr, 1);
257
258 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
259 GFP_KERNEL(0x0001 | 0x0004));
260 if (!vma_offset_manager) {
261 DRM_ERROR("out of memory\n")__drm_err("out of memory\n");
262 return -ENOMEM12;
263 }
264
265 dev->vma_offset_manager = vma_offset_manager;
266 drm_vma_offset_manager_init(vma_offset_manager,
267 DRM_FILE_PAGE_OFFSET_START((0xFFFFFFFFUL >> 12) + 1),
268 DRM_FILE_PAGE_OFFSET_SIZE((0xFFFFFFFFUL >> 12) * 256));
269
270 return drmm_add_action(dev, drm_gem_init_release, NULL((void *)0));
271}
272
273#ifdef __linux__
274
275/**
276 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
277 * @dev: drm_device the object should be initialized for
278 * @obj: drm_gem_object to initialize
279 * @size: object size
280 *
281 * Initialize an already allocated GEM object of the specified size with
282 * shmfs backing store.
283 */
284int drm_gem_object_init(struct drm_device *dev,
285 struct drm_gem_object *obj, size_t size)
286{
287 struct file *filp;
288
289 drm_gem_private_object_init(dev, obj, size);
290
291 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
292 if (IS_ERR(filp))
293 return PTR_ERR(filp);
294
295 obj->filp = filp;
296
297 return 0;
298}
299EXPORT_SYMBOL(drm_gem_object_init);
300
301#else
302
303int drm_gem_object_init(struct drm_device *dev,
304 struct drm_gem_object *obj, size_t size)
305{
306 drm_gem_private_object_init(dev, obj, size);
307
308 if (size > (512 * 1024 * 1024)) {
309 printf("%s size too big %lu\n", __func__, size);
310 return -ENOMEM12;
311 }
312
313 obj->uao = uao_create(size, 0);
314 uvm_obj_init(&obj->uobj, &drm_pgops, 1);
315
316 return 0;
317}
318
319#endif
320
321/**
322 * drm_gem_private_object_init - initialize an allocated private GEM object
323 * @dev: drm_device the object should be initialized for
324 * @obj: drm_gem_object to initialize
325 * @size: object size
326 *
327 * Initialize an already allocated GEM object of the specified size with
328 * no GEM provided backing store. Instead the caller is responsible for
329 * backing the object and handling it.
330 */
331void drm_gem_private_object_init(struct drm_device *dev,
332 struct drm_gem_object *obj, size_t size)
333{
334 BUG_ON((size & (PAGE_SIZE - 1)) != 0)((!((size & ((1 << 12) - 1)) != 0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_gem.c", 334, "!((size & ((1 << 12) - 1)) != 0)"
))
;
335
336 obj->dev = dev;
337#ifdef __linux__
338 obj->filp = NULL((void *)0);
339#else
340 obj->uao = NULL((void *)0);
341 obj->uobj.pgops = NULL((void *)0);
342#endif
343
344 kref_init(&obj->refcount);
345 obj->handle_count = 0;
346 obj->size = size;
347 dma_resv_init(&obj->_resv);
348 if (!obj->resv)
349 obj->resv = &obj->_resv;
350
351 drm_vma_node_reset(&obj->vma_node);
352 INIT_LIST_HEAD(&obj->lru_node);
353}
354EXPORT_SYMBOL(drm_gem_private_object_init);
355
356/**
357 * drm_gem_object_handle_free - release resources bound to userspace handles
358 * @obj: GEM object to clean up.
359 *
360 * Called after the last handle to the object has been closed
361 *
362 * Removes any name for the object. Note that this must be
363 * called before drm_gem_object_free or we'll be touching
364 * freed memory
365 */
366static void drm_gem_object_handle_free(struct drm_gem_object *obj)
367{
368 struct drm_device *dev = obj->dev;
369
370 /* Remove any name for this object */
371 if (obj->name) {
372 idr_remove(&dev->object_name_idr, obj->name);
373 obj->name = 0;
374 }
375}
376
377static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
378{
379 /* Unbreak the reference cycle if we have an exported dma_buf. */
380 if (obj->dma_buf) {
381 dma_buf_put(obj->dma_buf);
382 obj->dma_buf = NULL((void *)0);
383 }
384}
385
386static void
387drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
388{
389 struct drm_device *dev = obj->dev;
390 bool_Bool final = false0;
391
392 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)({ int __ret = !!(({ typeof(obj->handle_count) __tmp = *(volatile
typeof(obj->handle_count) *)&(obj->handle_count); membar_datadep_consumer
(); __tmp; }) == 0); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "({ typeof(obj->handle_count) __tmp = *(volatile typeof(obj->handle_count) *)&(obj->handle_count); membar_datadep_consumer(); __tmp; }) == 0"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 392); __builtin_expect
(!!(__ret), 0); })
)
393 return;
394
395 /*
396 * Must bump handle count first as this may be the last
397 * ref, in which case the object would disappear before we
398 * checked for a name
399 */
400
401 mutex_lock(&dev->object_name_lock)rw_enter_write(&dev->object_name_lock);
402 if (--obj->handle_count == 0) {
403 drm_gem_object_handle_free(obj);
404 drm_gem_object_exported_dma_buf_free(obj);
405 final = true1;
406 }
407 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
408
409 if (final)
410 drm_gem_object_put(obj);
411}
412
413/*
414 * Called at device or object close to release the file's
415 * handle references on objects.
416 */
417static int
418drm_gem_object_release_handle(int id, void *ptr, void *data)
419{
420 struct drm_file *file_priv = data;
421 struct drm_gem_object *obj = ptr;
422
423 if (obj->funcs->close)
424 obj->funcs->close(obj, file_priv);
425
426 drm_prime_remove_buf_handle(&file_priv->prime, id);
427 drm_vma_node_revoke(&obj->vma_node, file_priv);
428
429 drm_gem_object_handle_put_unlocked(obj);
430
431 return 0;
432}
433
434/**
435 * drm_gem_handle_delete - deletes the given file-private handle
436 * @filp: drm file-private structure to use for the handle look up
437 * @handle: userspace handle to delete
438 *
439 * Removes the GEM handle from the @filp lookup table which has been added with
440 * drm_gem_handle_create(). If this is the last handle also cleans up linked
441 * resources like GEM names.
442 */
443int
444drm_gem_handle_delete(struct drm_file *filp, u32 handle)
445{
446 struct drm_gem_object *obj;
447
448 spin_lock(&filp->table_lock)mtx_enter(&filp->table_lock);
449
450 /* Check if we currently have a reference on the object */
451 obj = idr_replace(&filp->object_idr, NULL((void *)0), handle);
452 spin_unlock(&filp->table_lock)mtx_leave(&filp->table_lock);
453 if (IS_ERR_OR_NULL(obj))
454 return -EINVAL22;
455
456 /* Release driver's reference and decrement refcount. */
457 drm_gem_object_release_handle(handle, obj, filp);
458
459 /* And finally make the handle available for future allocations. */
460 spin_lock(&filp->table_lock)mtx_enter(&filp->table_lock);
461 idr_remove(&filp->object_idr, handle);
462 spin_unlock(&filp->table_lock)mtx_leave(&filp->table_lock);
463
464 return 0;
465}
466EXPORT_SYMBOL(drm_gem_handle_delete);
467
468/**
469 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
470 * @file: drm file-private structure containing the gem object
471 * @dev: corresponding drm_device
472 * @handle: gem object handle
473 * @offset: return location for the fake mmap offset
474 *
475 * This implements the &drm_driver.dumb_map_offset kms driver callback for
476 * drivers which use gem to manage their backing storage.
477 *
478 * Returns:
479 * 0 on success or a negative error code on failure.
480 */
481int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
482 u32 handle, u64 *offset)
483{
484 struct drm_gem_object *obj;
485 int ret;
486
487 obj = drm_gem_object_lookup(file, handle);
488 if (!obj)
489 return -ENOENT2;
490
491 /* Don't allow imported objects to be mapped */
492 if (obj->import_attach) {
493 ret = -EINVAL22;
494 goto out;
495 }
496
497 ret = drm_gem_create_mmap_offset(obj);
498 if (ret)
499 goto out;
500
501 *offset = drm_vma_node_offset_addr(&obj->vma_node);
502out:
503 drm_gem_object_put(obj);
504
505 return ret;
506}
507EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
508
509int drm_gem_dumb_destroy(struct drm_file *file,
510 struct drm_device *dev,
511 u32 handle)
512{
513 return drm_gem_handle_delete(file, handle);
514}
515
516/**
517 * drm_gem_handle_create_tail - internal functions to create a handle
518 * @file_priv: drm file-private structure to register the handle for
519 * @obj: object to register
520 * @handlep: pointer to return the created handle to the caller
521 *
522 * This expects the &drm_device.object_name_lock to be held already and will
523 * drop it before returning. Used to avoid races in establishing new handles
524 * when importing an object from either an flink name or a dma-buf.
525 *
526 * Handles must be release again through drm_gem_handle_delete(). This is done
527 * when userspace closes @file_priv for all attached handles, or through the
528 * GEM_CLOSE ioctl for individual handles.
529 */
530int
531drm_gem_handle_create_tail(struct drm_file *file_priv,
532 struct drm_gem_object *obj,
533 u32 *handlep)
534{
535 struct drm_device *dev = obj->dev;
536 u32 handle;
537 int ret;
538
539 WARN_ON(!mutex_is_locked(&dev->object_name_lock))({ int __ret = !!(!(rw_status(&dev->object_name_lock) !=
0)); if (__ret) printf("WARNING %s failed at %s:%d\n", "!(rw_status(&dev->object_name_lock) != 0)"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 539); __builtin_expect
(!!(__ret), 0); })
;
540 if (obj->handle_count++ == 0)
541 drm_gem_object_get(obj);
542
543 /*
544 * Get the user-visible handle using idr. Preload and perform
545 * allocation under our spinlock.
546 */
547 idr_preload(GFP_KERNEL(0x0001 | 0x0004));
548 spin_lock(&file_priv->table_lock)mtx_enter(&file_priv->table_lock);
549
550 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT0x0002);
551
552 spin_unlock(&file_priv->table_lock)mtx_leave(&file_priv->table_lock);
553 idr_preload_end();
554
555 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
556 if (ret < 0)
557 goto err_unref;
558
559 handle = ret;
560
561 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
562 if (ret)
563 goto err_remove;
564
565 if (obj->funcs->open) {
566 ret = obj->funcs->open(obj, file_priv);
567 if (ret)
568 goto err_revoke;
569 }
570
571 *handlep = handle;
572 return 0;
573
574err_revoke:
575 drm_vma_node_revoke(&obj->vma_node, file_priv);
576err_remove:
577 spin_lock(&file_priv->table_lock)mtx_enter(&file_priv->table_lock);
578 idr_remove(&file_priv->object_idr, handle);
579 spin_unlock(&file_priv->table_lock)mtx_leave(&file_priv->table_lock);
580err_unref:
581 drm_gem_object_handle_put_unlocked(obj);
582 return ret;
583}
584
585/**
586 * drm_gem_handle_create - create a gem handle for an object
587 * @file_priv: drm file-private structure to register the handle for
588 * @obj: object to register
589 * @handlep: pointer to return the created handle to the caller
590 *
591 * Create a handle for this object. This adds a handle reference to the object,
592 * which includes a regular reference count. Callers will likely want to
593 * dereference the object afterwards.
594 *
595 * Since this publishes @obj to userspace it must be fully set up by this point,
596 * drivers must call this last in their buffer object creation callbacks.
597 */
598int drm_gem_handle_create(struct drm_file *file_priv,
599 struct drm_gem_object *obj,
600 u32 *handlep)
601{
602 mutex_lock(&obj->dev->object_name_lock)rw_enter_write(&obj->dev->object_name_lock);
603
604 return drm_gem_handle_create_tail(file_priv, obj, handlep);
605}
606EXPORT_SYMBOL(drm_gem_handle_create);
607
608
609/**
610 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
611 * @obj: obj in question
612 *
613 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
614 *
615 * Note that drm_gem_object_release() already calls this function, so drivers
616 * don't have to take care of releasing the mmap offset themselves when freeing
617 * the GEM object.
618 */
619void
620drm_gem_free_mmap_offset(struct drm_gem_object *obj)
621{
622 struct drm_device *dev = obj->dev;
623
624 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
625}
626EXPORT_SYMBOL(drm_gem_free_mmap_offset);
627
628/**
629 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
630 * @obj: obj in question
631 * @size: the virtual size
632 *
633 * GEM memory mapping works by handing back to userspace a fake mmap offset
634 * it can use in a subsequent mmap(2) call. The DRM core code then looks
635 * up the object based on the offset and sets up the various memory mapping
636 * structures.
637 *
638 * This routine allocates and attaches a fake offset for @obj, in cases where
639 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
640 * Otherwise just use drm_gem_create_mmap_offset().
641 *
642 * This function is idempotent and handles an already allocated mmap offset
643 * transparently. Drivers do not need to check for this case.
644 */
645int
646drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
647{
648 struct drm_device *dev = obj->dev;
649
650 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
651 size / PAGE_SIZE(1 << 12));
652}
653EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
654
655/**
656 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
657 * @obj: obj in question
658 *
659 * GEM memory mapping works by handing back to userspace a fake mmap offset
660 * it can use in a subsequent mmap(2) call. The DRM core code then looks
661 * up the object based on the offset and sets up the various memory mapping
662 * structures.
663 *
664 * This routine allocates and attaches a fake offset for @obj.
665 *
666 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
667 * the fake offset again.
668 */
669int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
670{
671 return drm_gem_create_mmap_offset_size(obj, obj->size);
672}
673EXPORT_SYMBOL(drm_gem_create_mmap_offset);
674
675#ifdef notyet
676/*
677 * Move pages to appropriate lru and release the pagevec, decrementing the
678 * ref count of those pages.
679 */
680static void drm_gem_check_release_pagevec(struct pagevec *pvec)
681{
682 check_move_unevictable_pages(pvec);
683 __pagevec_release(pvec);
684 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
685}
686#endif
687
688/**
689 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
690 * from shmem
691 * @obj: obj in question
692 *
693 * This reads the page-array of the shmem-backing storage of the given gem
694 * object. An array of pages is returned. If a page is not allocated or
695 * swapped-out, this will allocate/swap-in the required pages. Note that the
696 * whole object is covered by the page-array and pinned in memory.
697 *
698 * Use drm_gem_put_pages() to release the array and unpin all pages.
699 *
700 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
701 * If you require other GFP-masks, you have to do those allocations yourself.
702 *
703 * Note that you are not allowed to change gfp-zones during runtime. That is,
704 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
705 * set during initialization. If you have special zone constraints, set them
706 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
707 * to keep pages in the required zone during swap-in.
708 *
709 * This function is only valid on objects initialized with
710 * drm_gem_object_init(), but not for those initialized with
711 * drm_gem_private_object_init() only.
712 */
713struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj)
714{
715 STUB()do { printf("%s: stub\n", __func__); } while(0);
716 return ERR_PTR(-ENOSYS78);
717#ifdef notyet
718 struct address_space *mapping;
719 struct vm_page *p, **pages;
720 struct pagevec pvec;
721 int i, npages;
722
723
724 if (WARN_ON(!obj->filp)({ int __ret = !!(!obj->filp); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!obj->filp", "/usr/src/sys/dev/pci/drm/drm_gem.c", 724)
; __builtin_expect(!!(__ret), 0); })
)
725 return ERR_PTR(-EINVAL22);
726
727 /* This is the shared memory object that backs the GEM resource */
728 mapping = obj->filp->f_mapping;
729
730 /* We already BUG_ON() for non-page-aligned sizes in
731 * drm_gem_object_init(), so we should never hit this unless
732 * driver author is doing something really wrong:
733 */
734 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0)({ int __ret = !!((obj->size & ((1 << 12) - 1)) !=
0); if (__ret) printf("WARNING %s failed at %s:%d\n", "(obj->size & ((1 << 12) - 1)) != 0"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 734); __builtin_expect
(!!(__ret), 0); })
;
735
736 npages = obj->size >> PAGE_SHIFT12;
737
738 pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL(0x0001 | 0x0004));
739 if (pages == NULL((void *)0))
740 return ERR_PTR(-ENOMEM12);
741
742 mapping_set_unevictable(mapping);
743
744 for (i = 0; i < npages; i++) {
745 p = shmem_read_mapping_page(mapping, i);
746 if (IS_ERR(p))
747 goto fail;
748 pages[i] = p;
749
750 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
751 * correct region during swapin. Note that this requires
752 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
753 * so shmem can relocate pages during swapin if required.
754 */
755 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&((!(mapping_gfp_constraint(mapping, 0x00010000) && ((
((p)->phys_addr) / (1 << 12)) >= 0x00100000UL))) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_gem.c"
, 756, "!(mapping_gfp_constraint(mapping, 0x00010000) && ((((p)->phys_addr) / (1 << 12)) >= 0x00100000UL))"
))
756 (page_to_pfn(p) >= 0x00100000UL))((!(mapping_gfp_constraint(mapping, 0x00010000) && ((
((p)->phys_addr) / (1 << 12)) >= 0x00100000UL))) ?
(void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_gem.c"
, 756, "!(mapping_gfp_constraint(mapping, 0x00010000) && ((((p)->phys_addr) / (1 << 12)) >= 0x00100000UL))"
))
;
757 }
758
759 return pages;
760
761fail:
762 mapping_clear_unevictable(mapping);
763 pagevec_init(&pvec);
764 while (i--) {
765 if (!pagevec_add(&pvec, pages[i]))
766 drm_gem_check_release_pagevec(&pvec);
767 }
768 if (pagevec_count(&pvec))
769 drm_gem_check_release_pagevec(&pvec);
770
771 kvfree(pages);
772 return ERR_CAST(p);
773#endif
774}
775EXPORT_SYMBOL(drm_gem_get_pages);
776
777/**
778 * drm_gem_put_pages - helper to free backing pages for a GEM object
779 * @obj: obj in question
780 * @pages: pages to free
781 * @dirty: if true, pages will be marked as dirty
782 * @accessed: if true, the pages will be marked as accessed
783 */
784void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages,
785 bool_Bool dirty, bool_Bool accessed)
786{
787 STUB()do { printf("%s: stub\n", __func__); } while(0);
788#ifdef notyet
789 int i, npages;
790 struct address_space *mapping;
791 struct pagevec pvec;
792
793 mapping = file_inode(obj->filp)->i_mapping;
794 mapping_clear_unevictable(mapping);
795
796 /* We already BUG_ON() for non-page-aligned sizes in
797 * drm_gem_object_init(), so we should never hit this unless
798 * driver author is doing something really wrong:
799 */
800 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0)({ int __ret = !!((obj->size & ((1 << 12) - 1)) !=
0); if (__ret) printf("WARNING %s failed at %s:%d\n", "(obj->size & ((1 << 12) - 1)) != 0"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 800); __builtin_expect
(!!(__ret), 0); })
;
801
802 npages = obj->size >> PAGE_SHIFT12;
803
804 pagevec_init(&pvec);
805 for (i = 0; i < npages; i++) {
806 if (!pages[i])
807 continue;
808
809 if (dirty)
810 set_page_dirty(pages[i])x86_atomic_clearbits_u32(&pages[i]->pg_flags, 0x00000008
)
;
811
812 if (accessed)
813 mark_page_accessed(pages[i]);
814
815 /* Undo the reference we took when populating the table */
816 if (!pagevec_add(&pvec, pages[i]))
817 drm_gem_check_release_pagevec(&pvec);
818 }
819 if (pagevec_count(&pvec))
820 drm_gem_check_release_pagevec(&pvec);
821
822 kvfree(pages);
823#endif
824}
825EXPORT_SYMBOL(drm_gem_put_pages);
826
827static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
828 struct drm_gem_object **objs)
829{
830 int i, ret = 0;
831 struct drm_gem_object *obj;
832
833 spin_lock(&filp->table_lock)mtx_enter(&filp->table_lock);
834
835 for (i = 0; i < count; i++) {
836 /* Check if we currently have a reference on the object */
837 obj = idr_find(&filp->object_idr, handle[i]);
838 if (!obj) {
839 ret = -ENOENT2;
840 break;
841 }
842 drm_gem_object_get(obj);
843 objs[i] = obj;
844 }
845 spin_unlock(&filp->table_lock)mtx_leave(&filp->table_lock);
846
847 return ret;
848}
849
850/**
851 * drm_gem_objects_lookup - look up GEM objects from an array of handles
852 * @filp: DRM file private date
853 * @bo_handles: user pointer to array of userspace handle
854 * @count: size of handle array
855 * @objs_out: returned pointer to array of drm_gem_object pointers
856 *
857 * Takes an array of userspace handles and returns a newly allocated array of
858 * GEM objects.
859 *
860 * For a single handle lookup, use drm_gem_object_lookup().
861 *
862 * Returns:
863 *
864 * @objs filled in with GEM object pointers. Returned GEM objects need to be
865 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
866 * failure. 0 is returned on success.
867 *
868 */
869int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
870 int count, struct drm_gem_object ***objs_out)
871{
872 int ret;
873 u32 *handles;
874 struct drm_gem_object **objs;
875
876 if (!count)
877 return 0;
878
879 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
880 GFP_KERNEL(0x0001 | 0x0004) | __GFP_ZERO0x0008);
881 if (!objs)
882 return -ENOMEM12;
883
884 *objs_out = objs;
885
886 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL(0x0001 | 0x0004));
887 if (!handles) {
888 ret = -ENOMEM12;
889 goto out;
890 }
891
892 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
893 ret = -EFAULT14;
894 DRM_DEBUG("Failed to copy in GEM handles\n")___drm_dbg(((void *)0), DRM_UT_CORE, "Failed to copy in GEM handles\n"
)
;
895 goto out;
896 }
897
898 ret = objects_lookup(filp, handles, count, objs);
899out:
900 kvfree(handles);
901 return ret;
902
903}
904EXPORT_SYMBOL(drm_gem_objects_lookup);
905
906/**
907 * drm_gem_object_lookup - look up a GEM object from its handle
908 * @filp: DRM file private date
909 * @handle: userspace handle
910 *
911 * Returns:
912 *
913 * A reference to the object named by the handle if such exists on @filp, NULL
914 * otherwise.
915 *
916 * If looking up an array of handles, use drm_gem_objects_lookup().
917 */
918struct drm_gem_object *
919drm_gem_object_lookup(struct drm_file *filp, u32 handle)
920{
921 struct drm_gem_object *obj = NULL((void *)0);
922
923 objects_lookup(filp, &handle, 1, &obj);
924 return obj;
925}
926EXPORT_SYMBOL(drm_gem_object_lookup);
927
928/**
929 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
930 * shared and/or exclusive fences.
931 * @filep: DRM file private date
932 * @handle: userspace handle
933 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
934 * @timeout: timeout value in jiffies or zero to return immediately
935 *
936 * Returns:
937 *
938 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
939 * greater than 0 on success.
940 */
941long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
942 bool_Bool wait_all, unsigned long timeout)
943{
944 long ret;
945 struct drm_gem_object *obj;
946
947 obj = drm_gem_object_lookup(filep, handle);
948 if (!obj) {
949 DRM_DEBUG("Failed to look up GEM BO %d\n", handle)___drm_dbg(((void *)0), DRM_UT_CORE, "Failed to look up GEM BO %d\n"
, handle)
;
950 return -EINVAL22;
951 }
952
953 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
954 true1, timeout);
955 if (ret == 0)
956 ret = -ETIME60;
957 else if (ret > 0)
958 ret = 0;
959
960 drm_gem_object_put(obj);
961
962 return ret;
963}
964EXPORT_SYMBOL(drm_gem_dma_resv_wait);
965
966/**
967 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
968 * @dev: drm_device
969 * @data: ioctl data
970 * @file_priv: drm file-private structure
971 *
972 * Releases the handle to an mm object.
973 */
974int
975drm_gem_close_ioctl(struct drm_device *dev, void *data,
976 struct drm_file *file_priv)
977{
978 struct drm_gem_close *args = data;
979 int ret;
980
981 if (!drm_core_check_feature(dev, DRIVER_GEM))
982 return -EOPNOTSUPP45;
983
984 ret = drm_gem_handle_delete(file_priv, args->handle);
985
986 return ret;
987}
988
989/**
990 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
991 * @dev: drm_device
992 * @data: ioctl data
993 * @file_priv: drm file-private structure
994 *
995 * Create a global name for an object, returning the name.
996 *
997 * Note that the name does not hold a reference; when the object
998 * is freed, the name goes away.
999 */
1000int
1001drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1002 struct drm_file *file_priv)
1003{
1004 struct drm_gem_flink *args = data;
1005 struct drm_gem_object *obj;
1006 int ret;
1007
1008 if (!drm_core_check_feature(dev, DRIVER_GEM))
1009 return -EOPNOTSUPP45;
1010
1011 obj = drm_gem_object_lookup(file_priv, args->handle);
1012 if (obj == NULL((void *)0))
1013 return -ENOENT2;
1014
1015 mutex_lock(&dev->object_name_lock)rw_enter_write(&dev->object_name_lock);
1016 /* prevent races with concurrent gem_close. */
1017 if (obj->handle_count == 0) {
1018 ret = -ENOENT2;
1019 goto err;
1020 }
1021
1022 if (!obj->name) {
1023 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL(0x0001 | 0x0004));
1024 if (ret < 0)
1025 goto err;
1026
1027 obj->name = ret;
1028 }
1029
1030 args->name = (uint64_t) obj->name;
1031 ret = 0;
1032
1033err:
1034 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
1035 drm_gem_object_put(obj);
1036 return ret;
1037}
1038
1039/**
1040 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
1041 * @dev: drm_device
1042 * @data: ioctl data
1043 * @file_priv: drm file-private structure
1044 *
1045 * Open an object using the global name, returning a handle and the size.
1046 *
1047 * This handle (of course) holds a reference to the object, so the object
1048 * will not go away until the handle is deleted.
1049 */
1050int
1051drm_gem_open_ioctl(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv)
1053{
1054 struct drm_gem_open *args = data;
1055 struct drm_gem_object *obj;
1056 int ret;
1057 u32 handle;
1058
1059 if (!drm_core_check_feature(dev, DRIVER_GEM))
1060 return -EOPNOTSUPP45;
1061
1062 mutex_lock(&dev->object_name_lock)rw_enter_write(&dev->object_name_lock);
1063 obj = idr_find(&dev->object_name_idr, (int) args->name);
1064 if (obj) {
1065 drm_gem_object_get(obj);
1066 } else {
1067 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
1068 return -ENOENT2;
1069 }
1070
1071 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
1072 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
1073 if (ret)
1074 goto err;
1075
1076 args->handle = handle;
1077 args->size = obj->size;
1078
1079err:
1080 drm_gem_object_put(obj);
1081 return ret;
1082}
1083
1084/**
1085 * drm_gem_open - initializes GEM file-private structures at devnode open time
1086 * @dev: drm_device which is being opened by userspace
1087 * @file_private: drm file-private structure to set up
1088 *
1089 * Called at device open time, sets up the structure for handling refcounting
1090 * of mm objects.
1091 */
1092void
1093drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
1094{
1095 idr_init_base(&file_private->object_idr, 1);
1096 mtx_init(&file_private->table_lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&file_private
->table_lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9
) ? 0x9 : ((0x0)))); } while (0)
;
1097}
1098
1099/**
1100 * drm_gem_release - release file-private GEM resources
1101 * @dev: drm_device which is being closed by userspace
1102 * @file_private: drm file-private structure to clean up
1103 *
1104 * Called at close time when the filp is going away.
1105 *
1106 * Releases any remaining references on objects by this filp.
1107 */
1108void
1109drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
1110{
1111 idr_for_each(&file_private->object_idr,
1112 &drm_gem_object_release_handle, file_private);
1113 idr_destroy(&file_private->object_idr);
1114}
1115
1116/**
1117 * drm_gem_object_release - release GEM buffer object resources
1118 * @obj: GEM buffer object
1119 *
1120 * This releases any structures and resources used by @obj and is the inverse of
1121 * drm_gem_object_init().
1122 */
1123void
1124drm_gem_object_release(struct drm_gem_object *obj)
1125{
1126 WARN_ON(obj->dma_buf)({ int __ret = !!(obj->dma_buf); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "obj->dma_buf", "/usr/src/sys/dev/pci/drm/drm_gem.c", 1126
); __builtin_expect(!!(__ret), 0); })
;
1127
1128#ifdef __linux__
1129 if (obj->filp)
1130 fput(obj->filp);
1131#else
1132 if (obj->uao)
1133 uao_detach(obj->uao);
1134 if (obj->uobj.pgops)
1135 uvm_obj_destroy(&obj->uobj);
1136#endif
1137
1138 dma_resv_fini(&obj->_resv);
1139 drm_gem_free_mmap_offset(obj);
1140 drm_gem_lru_remove(obj);
1141}
1142EXPORT_SYMBOL(drm_gem_object_release);
1143
1144/**
1145 * drm_gem_object_free - free a GEM object
1146 * @kref: kref of the object to free
1147 *
1148 * Called after the last reference to the object has been lost.
1149 *
1150 * Frees the object
1151 */
1152void
1153drm_gem_object_free(struct kref *kref)
1154{
1155 struct drm_gem_object *obj =
1156 container_of(kref, struct drm_gem_object, refcount)({ const __typeof( ((struct drm_gem_object *)0)->refcount )
*__mptr = (kref); (struct drm_gem_object *)( (char *)__mptr -
__builtin_offsetof(struct drm_gem_object, refcount) );})
;
1157
1158 if (WARN_ON(!obj->funcs->free)({ int __ret = !!(!obj->funcs->free); if (__ret) printf
("WARNING %s failed at %s:%d\n", "!obj->funcs->free", "/usr/src/sys/dev/pci/drm/drm_gem.c"
, 1158); __builtin_expect(!!(__ret), 0); })
)
1159 return;
1160
1161 obj->funcs->free(obj);
1162}
1163EXPORT_SYMBOL(drm_gem_object_free);
1164
1165#ifdef __linux__
1166/**
1167 * drm_gem_vm_open - vma->ops->open implementation for GEM
1168 * @vma: VM area structure
1169 *
1170 * This function implements the #vm_operations_struct open() callback for GEM
1171 * drivers. This must be used together with drm_gem_vm_close().
1172 */
1173void drm_gem_vm_open(struct vm_area_struct *vma)
1174{
1175 struct drm_gem_object *obj = vma->vm_private_data;
1176
1177 drm_gem_object_get(obj);
1178}
1179EXPORT_SYMBOL(drm_gem_vm_open);
1180
1181/**
1182 * drm_gem_vm_close - vma->ops->close implementation for GEM
1183 * @vma: VM area structure
1184 *
1185 * This function implements the #vm_operations_struct close() callback for GEM
1186 * drivers. This must be used together with drm_gem_vm_open().
1187 */
1188void drm_gem_vm_close(struct vm_area_struct *vma)
1189{
1190 struct drm_gem_object *obj = vma->vm_private_data;
1191
1192 drm_gem_object_put(obj);
1193}
1194EXPORT_SYMBOL(drm_gem_vm_close);
1195
1196/**
1197 * drm_gem_mmap_obj - memory map a GEM object
1198 * @obj: the GEM object to map
1199 * @obj_size: the object size to be mapped, in bytes
1200 * @vma: VMA for the area to be mapped
1201 *
1202 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1203 * vm_ops. Depending on their requirements, GEM objects can either
1204 * provide a fault handler in their vm_ops (in which case any accesses to
1205 * the object will be trapped, to perform migration, GTT binding, surface
1206 * register allocation, or performance monitoring), or mmap the buffer memory
1207 * synchronously after calling drm_gem_mmap_obj.
1208 *
1209 * This function is mainly intended to implement the DMABUF mmap operation, when
1210 * the GEM object is not looked up based on its fake offset. To implement the
1211 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1212 *
1213 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1214 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1215 * callers must verify access restrictions before calling this helper.
1216 *
1217 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1218 * size, or if no vm_ops are provided.
1219 */
1220int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1221 struct vm_area_struct *vma)
1222{
1223 int ret;
1224
1225 /* Check for valid size. */
1226 if (obj_size < vma->vm_end - vma->vm_start)
1227 return -EINVAL22;
1228
1229 /* Take a ref for this mapping of the object, so that the fault
1230 * handler can dereference the mmap offset's pointer to the object.
1231 * This reference is cleaned up by the corresponding vm_close
1232 * (which should happen whether the vma was created by this call, or
1233 * by a vm_open due to mremap or partial unmap or whatever).
1234 */
1235 drm_gem_object_get(obj);
1236
1237 vma->vm_private_data = obj;
1238 vma->vm_ops = obj->funcs->vm_ops;
1239
1240 if (obj->funcs->mmap) {
1241 ret = obj->funcs->mmap(obj, vma);
1242 if (ret)
1243 goto err_drm_gem_object_put;
1244 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND))({ int __ret = !!(!(vma->vm_flags & VM_DONTEXPAND)); if
(__ret) printf("WARNING %s failed at %s:%d\n", "!(vma->vm_flags & VM_DONTEXPAND)"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 1244); __builtin_expect
(!!(__ret), 0); })
;
1245 } else {
1246 if (!vma->vm_ops) {
1247 ret = -EINVAL22;
1248 goto err_drm_gem_object_put;
1249 }
1250
1251 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1252 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1253 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1254 }
1255
1256 return 0;
1257
1258err_drm_gem_object_put:
1259 drm_gem_object_put(obj);
1260 return ret;
1261}
1262EXPORT_SYMBOL(drm_gem_mmap_obj);
1263
1264/**
1265 * drm_gem_mmap - memory map routine for GEM objects
1266 * @filp: DRM file pointer
1267 * @vma: VMA for the area to be mapped
1268 *
1269 * If a driver supports GEM object mapping, mmap calls on the DRM file
1270 * descriptor will end up here.
1271 *
1272 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1273 * contain the fake offset we created when the GTT map ioctl was called on
1274 * the object) and map it with a call to drm_gem_mmap_obj().
1275 *
1276 * If the caller is not granted access to the buffer object, the mmap will fail
1277 * with EACCES. Please see the vma manager for more information.
1278 */
1279int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1280{
1281 struct drm_file *priv = filp->private_data;
1282 struct drm_device *dev = priv->minor->dev;
1283 struct drm_gem_object *obj = NULL((void *)0);
1284 struct drm_vma_offset_node *node;
1285 int ret;
1286
1287 if (drm_dev_is_unplugged(dev))
1288 return -ENODEV19;
1289
1290 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1291 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1292 vma->vm_pgoff,
1293 vma_pages(vma));
1294 if (likely(node)__builtin_expect(!!(node), 1)) {
1295 obj = container_of(node, struct drm_gem_object, vma_node)({ const __typeof( ((struct drm_gem_object *)0)->vma_node )
*__mptr = (node); (struct drm_gem_object *)( (char *)__mptr -
__builtin_offsetof(struct drm_gem_object, vma_node) );})
;
1296 /*
1297 * When the object is being freed, after it hits 0-refcnt it
1298 * proceeds to tear down the object. In the process it will
1299 * attempt to remove the VMA offset and so acquire this
1300 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1301 * that matches our range, we know it is in the process of being
1302 * destroyed and will be freed as soon as we release the lock -
1303 * so we have to check for the 0-refcnted object and treat it as
1304 * invalid.
1305 */
1306 if (!kref_get_unless_zero(&obj->refcount))
1307 obj = NULL((void *)0);
1308 }
1309 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1310
1311 if (!obj)
1312 return -EINVAL22;
1313
1314 if (!drm_vma_node_is_allowed(node, priv)) {
1315 drm_gem_object_put(obj);
1316 return -EACCES13;
1317 }
1318
1319 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT12,
1320 vma);
1321
1322 drm_gem_object_put(obj);
1323
1324 return ret;
1325}
1326EXPORT_SYMBOL(drm_gem_mmap);
1327#else /* ! __linux__ */
1328
1329int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1330 vm_prot_t accessprot, voff_t off, vsize_t size)
1331{
1332 int ret;
1333
1334 /* Check for valid size. */
1335 if (obj_size < size)
1336 return -EINVAL22;
1337
1338 /* Take a ref for this mapping of the object, so that the fault
1339 * handler can dereference the mmap offset's pointer to the object.
1340 * This reference is cleaned up by the corresponding vm_close
1341 * (which should happen whether the vma was created by this call, or
1342 * by a vm_open due to mremap or partial unmap or whatever).
1343 */
1344 drm_gem_object_get(obj);
1345
1346#ifdef __linux__
1347 vma->vm_private_data = obj;
1348 vma->vm_ops = obj->funcs->vm_ops;
1349#else
1350 if (obj->uobj.pgops == NULL((void *)0))
1351 uvm_obj_init(&obj->uobj, obj->funcs->vm_ops, 1);
1352#endif
1353
1354 if (obj->funcs->mmap) {
1355 ret = obj->funcs->mmap(obj, accessprot, off, size);
1356 if (ret)
1357 goto err_drm_gem_object_put;
1358#ifdef notyet
1359 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND))({ int __ret = !!(!(vma->vm_flags & VM_DONTEXPAND)); if
(__ret) printf("WARNING %s failed at %s:%d\n", "!(vma->vm_flags & VM_DONTEXPAND)"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 1359); __builtin_expect
(!!(__ret), 0); })
;
1360#endif
1361 } else {
1362#ifdef notyet
1363 if (!vma->vm_ops) {
1364 ret = -EINVAL22;
1365 goto err_drm_gem_object_put;
1366 }
1367
1368 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1369 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1370 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1371#else
1372 ret = -EINVAL22;
1373 goto err_drm_gem_object_put;
1374#endif
1375 }
1376
1377 return 0;
1378
1379err_drm_gem_object_put:
1380 drm_gem_object_put(obj);
1381 return ret;
1382}
1383
1384struct uvm_object *
1385drm_gem_mmap(struct file *filp, vm_prot_t accessprot, voff_t off,
1386 vsize_t size)
1387{
1388 struct drm_file *priv = (void *)filp;
1389 struct drm_device *dev = priv->minor->dev;
1390 struct drm_gem_object *obj = NULL((void *)0);
1391 struct drm_vma_offset_node *node;
1392 int ret;
1393
1394 if (drm_dev_is_unplugged(dev))
1395 return NULL((void *)0);
1396
1397 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1398 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1399 off >> PAGE_SHIFT12,
1400 atop(round_page(size))(((((size) + ((1 << 12) - 1)) & ~((1 << 12) -
1))) >> 12)
);
1401 if (likely(node)__builtin_expect(!!(node), 1)) {
1402 obj = container_of(node, struct drm_gem_object, vma_node)({ const __typeof( ((struct drm_gem_object *)0)->vma_node )
*__mptr = (node); (struct drm_gem_object *)( (char *)__mptr -
__builtin_offsetof(struct drm_gem_object, vma_node) );})
;
1403 /*
1404 * When the object is being freed, after it hits 0-refcnt it
1405 * proceeds to tear down the object. In the process it will
1406 * attempt to remove the VMA offset and so acquire this
1407 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1408 * that matches our range, we know it is in the process of being
1409 * destroyed and will be freed as soon as we release the lock -
1410 * so we have to check for the 0-refcnted object and treat it as
1411 * invalid.
1412 */
1413 if (!kref_get_unless_zero(&obj->refcount))
1414 obj = NULL((void *)0);
1415 }
1416 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1417
1418 if (!obj)
1419 return NULL((void *)0);
1420
1421 if (!drm_vma_node_is_allowed(node, priv)) {
1422 drm_gem_object_put(obj);
1423 return NULL((void *)0);
1424 }
1425
1426 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT12,
Value stored to 'ret' is never read
1427 accessprot, off, size);
1428
1429 drm_gem_object_put(obj);
1430
1431 return &obj->uobj;
1432}
1433
1434#endif /* __linux__ */
1435
1436void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1437 const struct drm_gem_object *obj)
1438{
1439 drm_printf_indent(p, indent, "name=%d\n", obj->name)drm_printf((p), "%.*s" "name=%d\n", (indent), "\t\t\t\t\tX", obj
->name)
;
1440 drm_printf_indent(p, indent, "refcount=%u\n",drm_printf((p), "%.*s" "refcount=%u\n", (indent), "\t\t\t\t\tX"
, kref_read(&obj->refcount))
1441 kref_read(&obj->refcount))drm_printf((p), "%.*s" "refcount=%u\n", (indent), "\t\t\t\t\tX"
, kref_read(&obj->refcount))
;
1442 drm_printf_indent(p, indent, "start=%08lx\n",drm_printf((p), "%.*s" "start=%08lx\n", (indent), "\t\t\t\t\tX"
, drm_vma_node_start(&obj->vma_node))
1443 drm_vma_node_start(&obj->vma_node))drm_printf((p), "%.*s" "start=%08lx\n", (indent), "\t\t\t\t\tX"
, drm_vma_node_start(&obj->vma_node))
;
1444 drm_printf_indent(p, indent, "size=%zu\n", obj->size)drm_printf((p), "%.*s" "size=%zu\n", (indent), "\t\t\t\t\tX",
obj->size)
;
1445 drm_printf_indent(p, indent, "imported=%s\n",drm_printf((p), "%.*s" "imported=%s\n", (indent), "\t\t\t\t\tX"
, str_yes_no(obj->import_attach))
1446 str_yes_no(obj->import_attach))drm_printf((p), "%.*s" "imported=%s\n", (indent), "\t\t\t\t\tX"
, str_yes_no(obj->import_attach))
;
1447
1448 if (obj->funcs->print_info)
1449 obj->funcs->print_info(p, indent, obj);
1450}
1451
1452int drm_gem_pin(struct drm_gem_object *obj)
1453{
1454 if (obj->funcs->pin)
1455 return obj->funcs->pin(obj);
1456 else
1457 return 0;
1458}
1459
1460void drm_gem_unpin(struct drm_gem_object *obj)
1461{
1462 if (obj->funcs->unpin)
1463 obj->funcs->unpin(obj);
1464}
1465
1466int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
1467{
1468 int ret;
1469
1470 if (!obj->funcs->vmap)
1471 return -EOPNOTSUPP45;
1472
1473 ret = obj->funcs->vmap(obj, map);
1474 if (ret)
1475 return ret;
1476 else if (iosys_map_is_null(map))
1477 return -ENOMEM12;
1478
1479 return 0;
1480}
1481EXPORT_SYMBOL(drm_gem_vmap);
1482
1483void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
1484{
1485 if (iosys_map_is_null(map))
1486 return;
1487
1488 if (obj->funcs->vunmap)
1489 obj->funcs->vunmap(obj, map);
1490
1491 /* Always set the mapping to NULL. Callers may rely on this. */
1492 iosys_map_clear(map);
1493}
1494EXPORT_SYMBOL(drm_gem_vunmap);
1495
1496/**
1497 * drm_gem_lock_reservations - Sets up the ww context and acquires
1498 * the lock on an array of GEM objects.
1499 *
1500 * Once you've locked your reservations, you'll want to set up space
1501 * for your shared fences (if applicable), submit your job, then
1502 * drm_gem_unlock_reservations().
1503 *
1504 * @objs: drm_gem_objects to lock
1505 * @count: Number of objects in @objs
1506 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1507 * part of tracking this set of locked reservations.
1508 */
1509int
1510drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1511 struct ww_acquire_ctx *acquire_ctx)
1512{
1513 int contended = -1;
1514 int i, ret;
1515
1516 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1517
1518retry:
1519 if (contended != -1) {
1520 struct drm_gem_object *obj = objs[contended];
1521
1522 ret = dma_resv_lock_slow_interruptible(obj->resv,
1523 acquire_ctx);
1524 if (ret) {
1525 ww_acquire_fini(acquire_ctx);
1526 return ret;
1527 }
1528 }
1529
1530 for (i = 0; i < count; i++) {
1531 if (i == contended)
1532 continue;
1533
1534 ret = dma_resv_lock_interruptible(objs[i]->resv,
1535 acquire_ctx);
1536 if (ret) {
1537 int j;
1538
1539 for (j = 0; j < i; j++)
1540 dma_resv_unlock(objs[j]->resv);
1541
1542 if (contended != -1 && contended >= i)
1543 dma_resv_unlock(objs[contended]->resv);
1544
1545 if (ret == -EDEADLK11) {
1546 contended = i;
1547 goto retry;
1548 }
1549
1550 ww_acquire_fini(acquire_ctx);
1551 return ret;
1552 }
1553 }
1554
1555 ww_acquire_done(acquire_ctx);
1556
1557 return 0;
1558}
1559EXPORT_SYMBOL(drm_gem_lock_reservations);
1560
1561void
1562drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1563 struct ww_acquire_ctx *acquire_ctx)
1564{
1565 int i;
1566
1567 for (i = 0; i < count; i++)
1568 dma_resv_unlock(objs[i]->resv);
1569
1570 ww_acquire_fini(acquire_ctx);
1571}
1572EXPORT_SYMBOL(drm_gem_unlock_reservations);
1573
1574/**
1575 * drm_gem_lru_init - initialize a LRU
1576 *
1577 * @lru: The LRU to initialize
1578 * @lock: The lock protecting the LRU
1579 */
1580void
1581drm_gem_lru_init(struct drm_gem_lru *lru, struct rwlock *lock)
1582{
1583 lru->lock = lock;
1584 lru->count = 0;
1585 INIT_LIST_HEAD(&lru->list);
1586}
1587EXPORT_SYMBOL(drm_gem_lru_init);
1588
1589static void
1590drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1591{
1592 obj->lru->count -= obj->size >> PAGE_SHIFT12;
1593 WARN_ON(obj->lru->count < 0)({ int __ret = !!(obj->lru->count < 0); if (__ret) printf
("WARNING %s failed at %s:%d\n", "obj->lru->count < 0"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 1593); __builtin_expect
(!!(__ret), 0); })
;
1594 list_del(&obj->lru_node);
1595 obj->lru = NULL((void *)0);
1596}
1597
1598/**
1599 * drm_gem_lru_remove - remove object from whatever LRU it is in
1600 *
1601 * If the object is currently in any LRU, remove it.
1602 *
1603 * @obj: The GEM object to remove from current LRU
1604 */
1605void
1606drm_gem_lru_remove(struct drm_gem_object *obj)
1607{
1608 struct drm_gem_lru *lru = obj->lru;
1609
1610 if (!lru)
1611 return;
1612
1613 mutex_lock(lru->lock)rw_enter_write(lru->lock);
1614 drm_gem_lru_remove_locked(obj);
1615 mutex_unlock(lru->lock)rw_exit_write(lru->lock);
1616}
1617EXPORT_SYMBOL(drm_gem_lru_remove);
1618
1619static void
1620drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1621{
1622 lockdep_assert_held_once(lru->lock)do { (void)(lru->lock); } while(0);
1623
1624 if (obj->lru)
1625 drm_gem_lru_remove_locked(obj);
1626
1627 lru->count += obj->size >> PAGE_SHIFT12;
1628 list_add_tail(&obj->lru_node, &lru->list);
1629 obj->lru = lru;
1630}
1631
1632/**
1633 * drm_gem_lru_move_tail - move the object to the tail of the LRU
1634 *
1635 * If the object is already in this LRU it will be moved to the
1636 * tail. Otherwise it will be removed from whichever other LRU
1637 * it is in (if any) and moved into this LRU.
1638 *
1639 * @lru: The LRU to move the object into.
1640 * @obj: The GEM object to move into this LRU
1641 */
1642void
1643drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1644{
1645 mutex_lock(lru->lock)rw_enter_write(lru->lock);
1646 drm_gem_lru_move_tail_locked(lru, obj);
1647 mutex_unlock(lru->lock)rw_exit_write(lru->lock);
1648}
1649EXPORT_SYMBOL(drm_gem_lru_move_tail);
1650
1651/**
1652 * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1653 *
1654 * If the shrink callback succeeds, it is expected that the driver
1655 * move the object out of this LRU.
1656 *
1657 * If the LRU possibly contain active buffers, it is the responsibility
1658 * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1659 * or if necessary block until the buffer becomes idle.
1660 *
1661 * @lru: The LRU to scan
1662 * @nr_to_scan: The number of pages to try to reclaim
1663 * @remaining: The number of pages left to reclaim, should be initialized by caller
1664 * @shrink: Callback to try to shrink/reclaim the object.
1665 */
1666unsigned long
1667drm_gem_lru_scan(struct drm_gem_lru *lru,
1668 unsigned int nr_to_scan,
1669 unsigned long *remaining,
1670 bool_Bool (*shrink)(struct drm_gem_object *obj))
1671{
1672 struct drm_gem_lru still_in_lru;
1673 struct drm_gem_object *obj;
1674 unsigned freed = 0;
1675
1676 drm_gem_lru_init(&still_in_lru, lru->lock);
1677
1678 mutex_lock(lru->lock)rw_enter_write(lru->lock);
1679
1680 while (freed < nr_to_scan) {
1681 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node)(list_empty(&lru->list) ? ((void *)0) : ({ const __typeof
( ((typeof(*obj) *)0)->lru_node ) *__mptr = ((&lru->
list)->next); (typeof(*obj) *)( (char *)__mptr - __builtin_offsetof
(typeof(*obj), lru_node) );}))
;
1682
1683 if (!obj)
1684 break;
1685
1686 drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1687
1688 /*
1689 * If it's in the process of being freed, gem_object->free()
1690 * may be blocked on lock waiting to remove it. So just
1691 * skip it.
1692 */
1693 if (!kref_get_unless_zero(&obj->refcount))
1694 continue;
1695
1696 /*
1697 * Now that we own a reference, we can drop the lock for the
1698 * rest of the loop body, to reduce contention with other
1699 * code paths that need the LRU lock
1700 */
1701 mutex_unlock(lru->lock)rw_exit_write(lru->lock);
1702
1703 /*
1704 * Note that this still needs to be trylock, since we can
1705 * hit shrinker in response to trying to get backing pages
1706 * for this obj (ie. while it's lock is already held)
1707 */
1708 if (!dma_resv_trylock(obj->resv)) {
1709 *remaining += obj->size >> PAGE_SHIFT12;
1710 goto tail;
1711 }
1712
1713 if (shrink(obj)) {
1714 freed += obj->size >> PAGE_SHIFT12;
1715
1716 /*
1717 * If we succeeded in releasing the object's backing
1718 * pages, we expect the driver to have moved the object
1719 * out of this LRU
1720 */
1721 WARN_ON(obj->lru == &still_in_lru)({ int __ret = !!(obj->lru == &still_in_lru); if (__ret
) printf("WARNING %s failed at %s:%d\n", "obj->lru == &still_in_lru"
, "/usr/src/sys/dev/pci/drm/drm_gem.c", 1721); __builtin_expect
(!!(__ret), 0); })
;
1722 WARN_ON(obj->lru == lru)({ int __ret = !!(obj->lru == lru); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "obj->lru == lru", "/usr/src/sys/dev/pci/drm/drm_gem.c",
1722); __builtin_expect(!!(__ret), 0); })
;
1723 }
1724
1725 dma_resv_unlock(obj->resv);
1726
1727tail:
1728 drm_gem_object_put(obj);
1729 mutex_lock(lru->lock)rw_enter_write(lru->lock);
1730 }
1731
1732 /*
1733 * Move objects we've skipped over out of the temporary still_in_lru
1734 * back into this LRU
1735 */
1736 list_for_each_entry (obj, &still_in_lru.list, lru_node)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->lru_node
) *__mptr = ((&still_in_lru.list)->next); (__typeof(*
obj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*obj), lru_node
) );}); &obj->lru_node != (&still_in_lru.list); obj
= ({ const __typeof( ((__typeof(*obj) *)0)->lru_node ) *__mptr
= (obj->lru_node.next); (__typeof(*obj) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*obj), lru_node) );}))
1737 obj->lru = lru;
1738 list_splice_tail(&still_in_lru.list, &lru->list);
1739 lru->count += still_in_lru.count;
1740
1741 mutex_unlock(lru->lock)rw_exit_write(lru->lock);
1742
1743 return freed;
1744}
1745EXPORT_SYMBOL(drm_gem_lru_scan);