Bug Summary

File:dev/pci/drm/drm_prime.c
Warning:line 992, column 2
Value stored to 'ret' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name drm_prime.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/drm_prime.c
1/*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32
33#include <drm/drm.h>
34#include <drm/drm_drv.h>
35#include <drm/drm_file.h>
36#include <drm/drm_framebuffer.h>
37#include <drm/drm_gem.h>
38#include <drm/drm_prime.h>
39
40#include "drm_internal.h"
41
42/**
43 * DOC: overview and lifetime rules
44 *
45 * Similar to GEM global names, PRIME file descriptors are also used to share
46 * buffer objects across processes. They offer additional security: as file
47 * descriptors must be explicitly sent over UNIX domain sockets to be shared
48 * between applications, they can't be guessed like the globally unique GEM
49 * names.
50 *
51 * Drivers that support the PRIME API implement the
52 * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
53 * GEM based drivers must use drm_gem_prime_handle_to_fd() and
54 * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
55 * actual driver interfaces is provided through the &drm_gem_object_funcs.export
56 * and &drm_driver.gem_prime_import hooks.
57 *
58 * &dma_buf_ops implementations for GEM drivers are all individually exported
59 * for drivers which need to overwrite or reimplement some of them.
60 *
61 * Reference Counting for GEM Drivers
62 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
63 *
64 * On the export the &dma_buf holds a reference to the exported buffer object,
65 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
66 * IOCTL, when it first calls &drm_gem_object_funcs.export
67 * and stores the exporting GEM object in the &dma_buf.priv field. This
68 * reference needs to be released when the final reference to the &dma_buf
69 * itself is dropped and its &dma_buf_ops.release function is called. For
70 * GEM-based drivers, the &dma_buf should be exported using
71 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
72 *
73 * Thus the chain of references always flows in one direction, avoiding loops:
74 * importing GEM object -> dma-buf -> exported GEM bo. A further complication
75 * are the lookup caches for import and export. These are required to guarantee
76 * that any given object will always have only one uniqe userspace handle. This
77 * is required to allow userspace to detect duplicated imports, since some GEM
78 * drivers do fail command submissions if a given buffer object is listed more
79 * than once. These import and export caches in &drm_prime_file_private only
80 * retain a weak reference, which is cleaned up when the corresponding object is
81 * released.
82 *
83 * Self-importing: If userspace is using PRIME as a replacement for flink then
84 * it will get a fd->handle request for a GEM object that it created. Drivers
85 * should detect this situation and return back the underlying object from the
86 * dma-buf private. For GEM based drivers this is handled in
87 * drm_gem_prime_import() already.
88 */
89
90struct drm_prime_member {
91 struct dma_buf *dma_buf;
92 uint32_t handle;
93
94 struct rb_node dmabuf_rb;
95 struct rb_node handle_rb;
96};
97
98static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
99 struct dma_buf *dma_buf, uint32_t handle)
100{
101 struct drm_prime_member *member;
102 struct rb_node **p, *rb;
103
104 member = kmalloc(sizeof(*member), GFP_KERNEL(0x0001 | 0x0004));
105 if (!member)
106 return -ENOMEM12;
107
108 get_dma_buf(dma_buf);
109 member->dma_buf = dma_buf;
110 member->handle = handle;
111
112 rb = NULL((void *)0);
113 p = &prime_fpriv->dmabufs.rb_node;
114 while (*p) {
115 struct drm_prime_member *pos;
116
117 rb = *p;
118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb)({ const __typeof( ((struct drm_prime_member *)0)->dmabuf_rb
) *__mptr = (rb); (struct drm_prime_member *)( (char *)__mptr
- __builtin_offsetof(struct drm_prime_member, dmabuf_rb) );}
)
;
119 if (dma_buf > pos->dma_buf)
120 p = &rb->rb_right__entry.rbe_right;
121 else
122 p = &rb->rb_left__entry.rbe_left;
123 }
124 rb_link_node(&member->dmabuf_rb, rb, p);
125 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs)linux_root_RB_INSERT_COLOR((struct linux_root *)(&prime_fpriv
->dmabufs), (&member->dmabuf_rb))
;
126
127 rb = NULL((void *)0);
128 p = &prime_fpriv->handles.rb_node;
129 while (*p) {
130 struct drm_prime_member *pos;
131
132 rb = *p;
133 pos = rb_entry(rb, struct drm_prime_member, handle_rb)({ const __typeof( ((struct drm_prime_member *)0)->handle_rb
) *__mptr = (rb); (struct drm_prime_member *)( (char *)__mptr
- __builtin_offsetof(struct drm_prime_member, handle_rb) );}
)
;
134 if (handle > pos->handle)
135 p = &rb->rb_right__entry.rbe_right;
136 else
137 p = &rb->rb_left__entry.rbe_left;
138 }
139 rb_link_node(&member->handle_rb, rb, p);
140 rb_insert_color(&member->handle_rb, &prime_fpriv->handles)linux_root_RB_INSERT_COLOR((struct linux_root *)(&prime_fpriv
->handles), (&member->handle_rb))
;
141
142 return 0;
143}
144
145static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
146 uint32_t handle)
147{
148 struct rb_node *rb;
149
150 rb = prime_fpriv->handles.rb_node;
151 while (rb) {
152 struct drm_prime_member *member;
153
154 member = rb_entry(rb, struct drm_prime_member, handle_rb)({ const __typeof( ((struct drm_prime_member *)0)->handle_rb
) *__mptr = (rb); (struct drm_prime_member *)( (char *)__mptr
- __builtin_offsetof(struct drm_prime_member, handle_rb) );}
)
;
155 if (member->handle == handle)
156 return member->dma_buf;
157 else if (member->handle < handle)
158 rb = rb->rb_right__entry.rbe_right;
159 else
160 rb = rb->rb_left__entry.rbe_left;
161 }
162
163 return NULL((void *)0);
164}
165
166static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
167 struct dma_buf *dma_buf,
168 uint32_t *handle)
169{
170 struct rb_node *rb;
171
172 rb = prime_fpriv->dmabufs.rb_node;
173 while (rb) {
174 struct drm_prime_member *member;
175
176 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb)({ const __typeof( ((struct drm_prime_member *)0)->dmabuf_rb
) *__mptr = (rb); (struct drm_prime_member *)( (char *)__mptr
- __builtin_offsetof(struct drm_prime_member, dmabuf_rb) );}
)
;
177 if (member->dma_buf == dma_buf) {
178 *handle = member->handle;
179 return 0;
180 } else if (member->dma_buf < dma_buf) {
181 rb = rb->rb_right__entry.rbe_right;
182 } else {
183 rb = rb->rb_left__entry.rbe_left;
184 }
185 }
186
187 return -ENOENT2;
188}
189
190void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
191 struct dma_buf *dma_buf)
192{
193 struct rb_node *rb;
194
195 rb = prime_fpriv->dmabufs.rb_node;
196 while (rb) {
197 struct drm_prime_member *member;
198
199 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb)({ const __typeof( ((struct drm_prime_member *)0)->dmabuf_rb
) *__mptr = (rb); (struct drm_prime_member *)( (char *)__mptr
- __builtin_offsetof(struct drm_prime_member, dmabuf_rb) );}
)
;
200 if (member->dma_buf == dma_buf) {
201 rb_erase(&member->handle_rb, &prime_fpriv->handles)linux_root_RB_REMOVE((struct linux_root *)(&prime_fpriv->
handles), (&member->handle_rb))
;
202 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs)linux_root_RB_REMOVE((struct linux_root *)(&prime_fpriv->
dmabufs), (&member->dmabuf_rb))
;
203
204 dma_buf_put(dma_buf);
205 kfree(member);
206 return;
207 } else if (member->dma_buf < dma_buf) {
208 rb = rb->rb_right__entry.rbe_right;
209 } else {
210 rb = rb->rb_left__entry.rbe_left;
211 }
212 }
213}
214
215void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
216{
217 rw_init(&prime_fpriv->lock, "primlk")_rw_init_flags(&prime_fpriv->lock, "primlk", 0, ((void
*)0))
;
218 prime_fpriv->dmabufs = RB_ROOT(struct rb_root) { ((void *)0) };
219 prime_fpriv->handles = RB_ROOT(struct rb_root) { ((void *)0) };
220}
221
222void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
223{
224 /* by now drm_gem_release should've made sure the list is empty */
225 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs))({ int __ret = !!(!((&prime_fpriv->dmabufs)->rb_node
== ((void *)0))); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!((&prime_fpriv->dmabufs)->rb_node == ((void *)0))"
, "/usr/src/sys/dev/pci/drm/drm_prime.c", 225); __builtin_expect
(!!(__ret), 0); })
;
226}
227
228/**
229 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
230 * @dev: parent device for the exported dmabuf
231 * @exp_info: the export information used by dma_buf_export()
232 *
233 * This wraps dma_buf_export() for use by generic GEM drivers that are using
234 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
235 * a reference to the &drm_device and the exported &drm_gem_object (stored in
236 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
237 *
238 * Returns the new dmabuf.
239 */
240struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
241 struct dma_buf_export_info *exp_info)
242{
243 struct drm_gem_object *obj = exp_info->priv;
244 struct dma_buf *dma_buf;
245
246 dma_buf = dma_buf_export(exp_info);
247 if (IS_ERR(dma_buf))
248 return dma_buf;
249
250 drm_dev_get(dev);
251 drm_gem_object_get(obj);
252#ifdef __linux__
253 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
254#endif
255
256 return dma_buf;
257}
258EXPORT_SYMBOL(drm_gem_dmabuf_export);
259
260/**
261 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
262 * @dma_buf: buffer to be released
263 *
264 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
265 * must use this in their &dma_buf_ops structure as the release callback.
266 * drm_gem_dmabuf_release() should be used in conjunction with
267 * drm_gem_dmabuf_export().
268 */
269void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
270{
271 struct drm_gem_object *obj = dma_buf->priv;
272 struct drm_device *dev = obj->dev;
273
274 /* drop the reference on the export fd holds */
275 drm_gem_object_put(obj);
276
277 drm_dev_put(dev);
278}
279EXPORT_SYMBOL(drm_gem_dmabuf_release);
280
281/**
282 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
283 * @dev: dev to export the buffer from
284 * @file_priv: drm file-private structure
285 * @prime_fd: fd id of the dma-buf which should be imported
286 * @handle: pointer to storage for the handle of the imported buffer object
287 *
288 * This is the PRIME import function which must be used mandatorily by GEM
289 * drivers to ensure correct lifetime management of the underlying GEM object.
290 * The actual importing of GEM object from the dma-buf is done through the
291 * &drm_driver.gem_prime_import driver callback.
292 *
293 * Returns 0 on success or a negative error code on failure.
294 */
295int drm_gem_prime_fd_to_handle(struct drm_device *dev,
296 struct drm_file *file_priv, int prime_fd,
297 uint32_t *handle)
298{
299 struct dma_buf *dma_buf;
300 struct drm_gem_object *obj;
301 int ret;
302
303 dma_buf = dma_buf_get(prime_fd);
304 if (IS_ERR(dma_buf))
305 return PTR_ERR(dma_buf);
306
307 mutex_lock(&file_priv->prime.lock)rw_enter_write(&file_priv->prime.lock);
308
309 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
310 dma_buf, handle);
311 if (ret == 0)
312 goto out_put;
313
314 /* never seen this one, need to import */
315 mutex_lock(&dev->object_name_lock)rw_enter_write(&dev->object_name_lock);
316 if (dev->driver->gem_prime_import)
317 obj = dev->driver->gem_prime_import(dev, dma_buf);
318 else
319 obj = drm_gem_prime_import(dev, dma_buf);
320 if (IS_ERR(obj)) {
321 ret = PTR_ERR(obj);
322 goto out_unlock;
323 }
324
325 if (obj->dma_buf) {
326 WARN_ON(obj->dma_buf != dma_buf)({ int __ret = !!(obj->dma_buf != dma_buf); if (__ret) printf
("WARNING %s failed at %s:%d\n", "obj->dma_buf != dma_buf"
, "/usr/src/sys/dev/pci/drm/drm_prime.c", 326); __builtin_expect
(!!(__ret), 0); })
;
327 } else {
328 obj->dma_buf = dma_buf;
329 get_dma_buf(dma_buf);
330 }
331
332 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
333 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
334 drm_gem_object_put(obj);
335 if (ret)
336 goto out_put;
337
338 ret = drm_prime_add_buf_handle(&file_priv->prime,
339 dma_buf, *handle);
340 mutex_unlock(&file_priv->prime.lock)rw_exit_write(&file_priv->prime.lock);
341 if (ret)
342 goto fail;
343
344 dma_buf_put(dma_buf);
345
346 return 0;
347
348fail:
349 /* hmm, if driver attached, we are relying on the free-object path
350 * to detach.. which seems ok..
351 */
352 drm_gem_handle_delete(file_priv, *handle);
353 dma_buf_put(dma_buf);
354 return ret;
355
356out_unlock:
357 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
358out_put:
359 mutex_unlock(&file_priv->prime.lock)rw_exit_write(&file_priv->prime.lock);
360 dma_buf_put(dma_buf);
361 return ret;
362}
363EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
364
365int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
366 struct drm_file *file_priv)
367{
368 struct drm_prime_handle *args = data;
369
370 if (!dev->driver->prime_fd_to_handle)
371 return -ENOSYS78;
372
373 return dev->driver->prime_fd_to_handle(dev, file_priv,
374 args->fd, &args->handle);
375}
376
377static struct dma_buf *export_and_register_object(struct drm_device *dev,
378 struct drm_gem_object *obj,
379 uint32_t flags)
380{
381 struct dma_buf *dmabuf;
382
383 /* prevent races with concurrent gem_close. */
384 if (obj->handle_count == 0) {
385 dmabuf = ERR_PTR(-ENOENT2);
386 return dmabuf;
387 }
388
389 if (obj->funcs && obj->funcs->export)
390 dmabuf = obj->funcs->export(obj, flags);
391 else if (dev->driver->gem_prime_export)
392 dmabuf = dev->driver->gem_prime_export(obj, flags);
393 else
394 dmabuf = drm_gem_prime_export(obj, flags);
395 if (IS_ERR(dmabuf)) {
396 /* normally the created dma-buf takes ownership of the ref,
397 * but if that fails then drop the ref
398 */
399 return dmabuf;
400 }
401
402 /*
403 * Note that callers do not need to clean up the export cache
404 * since the check for obj->handle_count guarantees that someone
405 * will clean it up.
406 */
407 obj->dma_buf = dmabuf;
408 get_dma_buf(obj->dma_buf);
409
410 return dmabuf;
411}
412
413/**
414 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
415 * @dev: dev to export the buffer from
416 * @file_priv: drm file-private structure
417 * @handle: buffer handle to export
418 * @flags: flags like DRM_CLOEXEC
419 * @prime_fd: pointer to storage for the fd id of the create dma-buf
420 *
421 * This is the PRIME export function which must be used mandatorily by GEM
422 * drivers to ensure correct lifetime management of the underlying GEM object.
423 * The actual exporting from GEM object to a dma-buf is done through the
424 * &drm_driver.gem_prime_export driver callback.
425 */
426int drm_gem_prime_handle_to_fd(struct drm_device *dev,
427 struct drm_file *file_priv, uint32_t handle,
428 uint32_t flags,
429 int *prime_fd)
430{
431 struct drm_gem_object *obj;
432 int ret = 0;
433 struct dma_buf *dmabuf;
434
435 mutex_lock(&file_priv->prime.lock)rw_enter_write(&file_priv->prime.lock);
436 obj = drm_gem_object_lookup(file_priv, handle);
437 if (!obj) {
438 ret = -ENOENT2;
439 goto out_unlock;
440 }
441
442 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
443 if (dmabuf) {
444 get_dma_buf(dmabuf);
445 goto out_have_handle;
446 }
447
448 mutex_lock(&dev->object_name_lock)rw_enter_write(&dev->object_name_lock);
449#ifdef notyet
450 /* re-export the original imported object */
451 if (obj->import_attach) {
452 dmabuf = obj->import_attach->dmabuf;
453 get_dma_buf(dmabuf);
454 goto out_have_obj;
455 }
456#endif
457
458 if (obj->dma_buf) {
459 get_dma_buf(obj->dma_buf);
460 dmabuf = obj->dma_buf;
461 goto out_have_obj;
462 }
463
464 dmabuf = export_and_register_object(dev, obj, flags);
465 if (IS_ERR(dmabuf)) {
466 /* normally the created dma-buf takes ownership of the ref,
467 * but if that fails then drop the ref
468 */
469 ret = PTR_ERR(dmabuf);
470 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
471 goto out;
472 }
473
474out_have_obj:
475 /*
476 * If we've exported this buffer then cheat and add it to the import list
477 * so we get the correct handle back. We must do this under the
478 * protection of dev->object_name_lock to ensure that a racing gem close
479 * ioctl doesn't miss to remove this buffer handle from the cache.
480 */
481 ret = drm_prime_add_buf_handle(&file_priv->prime,
482 dmabuf, handle);
483 mutex_unlock(&dev->object_name_lock)rw_exit_write(&dev->object_name_lock);
484 if (ret)
485 goto fail_put_dmabuf;
486
487out_have_handle:
488 ret = dma_buf_fd(dmabuf, flags);
489 /*
490 * We must _not_ remove the buffer from the handle cache since the newly
491 * created dma buf is already linked in the global obj->dma_buf pointer,
492 * and that is invariant as long as a userspace gem handle exists.
493 * Closing the handle will clean out the cache anyway, so we don't leak.
494 */
495 if (ret < 0) {
496 goto fail_put_dmabuf;
497 } else {
498 *prime_fd = ret;
499 ret = 0;
500 }
501
502 goto out;
503
504fail_put_dmabuf:
505 dma_buf_put(dmabuf);
506out:
507 drm_gem_object_put(obj);
508out_unlock:
509 mutex_unlock(&file_priv->prime.lock)rw_exit_write(&file_priv->prime.lock);
510
511 return ret;
512}
513EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
514
515int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
517{
518 struct drm_prime_handle *args = data;
519
520 if (!dev->driver->prime_handle_to_fd)
521 return -ENOSYS78;
522
523 /* check flags are valid */
524 if (args->flags & ~(DRM_CLOEXEC0x10000 | DRM_RDWR0x0002))
525 return -EINVAL22;
526
527 return dev->driver->prime_handle_to_fd(dev, file_priv,
528 args->handle, args->flags, &args->fd);
529}
530
531/**
532 * DOC: PRIME Helpers
533 *
534 * Drivers can implement &drm_gem_object_funcs.export and
535 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
536 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
537 * implement dma-buf support in terms of some lower-level helpers, which are
538 * again exported for drivers to use individually:
539 *
540 * Exporting buffers
541 * ~~~~~~~~~~~~~~~~~
542 *
543 * Optional pinning of buffers is handled at dma-buf attach and detach time in
544 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
545 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
546 * &drm_gem_object_funcs.get_sg_table.
547 *
548 * For kernel-internal access there's drm_gem_dmabuf_vmap() and
549 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
550 * drm_gem_dmabuf_mmap().
551 *
552 * Note that these export helpers can only be used if the underlying backing
553 * storage is fully coherent and either permanently pinned, or it is safe to pin
554 * it indefinitely.
555 *
556 * FIXME: The underlying helper functions are named rather inconsistently.
557 *
558 * Exporting buffers
559 * ~~~~~~~~~~~~~~~~~
560 *
561 * Importing dma-bufs using drm_gem_prime_import() relies on
562 * &drm_driver.gem_prime_import_sg_table.
563 *
564 * Note that similarly to the export helpers this permanently pins the
565 * underlying backing storage. Which is ok for scanout, but is not the best
566 * option for sharing lots of buffers for rendering.
567 */
568
569/**
570 * drm_gem_map_attach - dma_buf attach implementation for GEM
571 * @dma_buf: buffer to attach device to
572 * @attach: buffer attachment data
573 *
574 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
575 * used as the &dma_buf_ops.attach callback. Must be used together with
576 * drm_gem_map_detach().
577 *
578 * Returns 0 on success, negative error code on failure.
579 */
580int drm_gem_map_attach(struct dma_buf *dma_buf,
581 struct dma_buf_attachment *attach)
582{
583 struct drm_gem_object *obj = dma_buf->priv;
584
585 return drm_gem_pin(obj);
586}
587EXPORT_SYMBOL(drm_gem_map_attach);
588
589/**
590 * drm_gem_map_detach - dma_buf detach implementation for GEM
591 * @dma_buf: buffer to detach from
592 * @attach: attachment to be detached
593 *
594 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
595 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
596 * &dma_buf_ops.detach callback.
597 */
598void drm_gem_map_detach(struct dma_buf *dma_buf,
599 struct dma_buf_attachment *attach)
600{
601 struct drm_gem_object *obj = dma_buf->priv;
602
603 drm_gem_unpin(obj);
604}
605EXPORT_SYMBOL(drm_gem_map_detach);
606
607#ifdef notyet
608
609/**
610 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
611 * @attach: attachment whose scatterlist is to be returned
612 * @dir: direction of DMA transfer
613 *
614 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
615 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
616 * with drm_gem_unmap_dma_buf().
617 *
618 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
619 * on error. May return -EINTR if it is interrupted by a signal.
620 */
621struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
622 enum dma_data_direction dir)
623{
624 struct drm_gem_object *obj = attach->dmabuf->priv;
625 struct sg_table *sgt;
626 int ret;
627
628 if (WARN_ON(dir == DMA_NONE)({ int __ret = !!(dir == DMA_NONE); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "dir == DMA_NONE", "/usr/src/sys/dev/pci/drm/drm_prime.c", 628
); __builtin_expect(!!(__ret), 0); })
)
629 return ERR_PTR(-EINVAL22);
630
631 if (obj->funcs)
632 sgt = obj->funcs->get_sg_table(obj);
633 else
634 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
635
636 ret = dma_map_sgtable(attach->dev, sgt, dir,
637 DMA_ATTR_SKIP_CPU_SYNC);
638 if (ret) {
639 sg_free_table(sgt);
640 kfree(sgt);
641 sgt = ERR_PTR(ret);
642 }
643
644 return sgt;
645}
646EXPORT_SYMBOL(drm_gem_map_dma_buf);
647
648/**
649 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
650 * @attach: attachment to unmap buffer from
651 * @sgt: scatterlist info of the buffer to unmap
652 * @dir: direction of DMA transfer
653 *
654 * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
655 */
656void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
657 struct sg_table *sgt,
658 enum dma_data_direction dir)
659{
660 if (!sgt)
661 return;
662
663 dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
664 sg_free_table(sgt);
665 kfree(sgt);
666}
667EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
668
669#endif /* notyet */
670
671/**
672 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
673 * @dma_buf: buffer to be mapped
674 *
675 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
676 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
677 *
678 * Returns the kernel virtual address or NULL on failure.
679 */
680void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
681{
682 struct drm_gem_object *obj = dma_buf->priv;
683 void *vaddr;
684
685 vaddr = drm_gem_vmap(obj);
686 if (IS_ERR(vaddr))
687 vaddr = NULL((void *)0);
688
689 return vaddr;
690}
691EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
692
693/**
694 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
695 * @dma_buf: buffer to be unmapped
696 * @vaddr: the virtual address of the buffer
697 *
698 * Releases a kernel virtual mapping. This can be used as the
699 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
700 */
701void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
702{
703 struct drm_gem_object *obj = dma_buf->priv;
704
705 drm_gem_vunmap(obj, vaddr);
706}
707EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
708
709#ifdef notyet
710
711/**
712 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
713 * @obj: GEM object
714 * @vma: Virtual address range
715 *
716 * This function sets up a userspace mapping for PRIME exported buffers using
717 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
718 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
719 * called to set up the mapping.
720 *
721 * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
722 */
723int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
724{
725 struct drm_file *priv;
726 struct file *fil;
727 int ret;
728
729 /* Add the fake offset */
730 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
731
732 if (obj->funcs && obj->funcs->mmap) {
733 ret = obj->funcs->mmap(obj, vma);
734 if (ret)
735 return ret;
736 vma->vm_private_data = obj;
737 drm_gem_object_get(obj);
738 return 0;
739 }
740
741 priv = kzalloc(sizeof(*priv), GFP_KERNEL(0x0001 | 0x0004));
742 fil = kzalloc(sizeof(*fil), GFP_KERNEL(0x0001 | 0x0004));
743 if (!priv || !fil) {
744 ret = -ENOMEM12;
745 goto out;
746 }
747
748 /* Used by drm_gem_mmap() to lookup the GEM object */
749 priv->minor = obj->dev->primary;
750 fil->private_data = priv;
751
752 ret = drm_vma_node_allow(&obj->vma_node, priv);
753 if (ret)
754 goto out;
755
756 ret = obj->dev->driver->fops->mmap(fil, vma);
757
758 drm_vma_node_revoke(&obj->vma_node, priv);
759out:
760 kfree(priv);
761 kfree(fil);
762
763 return ret;
764}
765EXPORT_SYMBOL(drm_gem_prime_mmap);
766
767/**
768 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
769 * @dma_buf: buffer to be mapped
770 * @vma: virtual address range
771 *
772 * Provides memory mapping for the buffer. This can be used as the
773 * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
774 * which should be set to drm_gem_prime_mmap().
775 *
776 * FIXME: There's really no point to this wrapper, drivers which need anything
777 * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
778 *
779 * Returns 0 on success or a negative error code on failure.
780 */
781int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
782{
783 struct drm_gem_object *obj = dma_buf->priv;
784 struct drm_device *dev = obj->dev;
785
786 if (!dev->driver->gem_prime_mmap)
787 return -ENOSYS78;
788
789 return dev->driver->gem_prime_mmap(obj, vma);
790}
791EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
792
793#endif /* notyet */
794
795static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
796#ifdef notyet
797 .cache_sgt_mapping = true1,
798 .attach = drm_gem_map_attach,
799 .detach = drm_gem_map_detach,
800 .map_dma_buf = drm_gem_map_dma_buf,
801 .unmap_dma_buf = drm_gem_unmap_dma_buf,
802#endif
803 .release = drm_gem_dmabuf_release,
804#ifdef notyet
805 .mmap = drm_gem_dmabuf_mmap,
806 .vmap = drm_gem_dmabuf_vmap,
807 .vunmap = drm_gem_dmabuf_vunmap,
808#endif
809};
810
811/**
812 * drm_prime_pages_to_sg - converts a page array into an sg list
813 * @dev: DRM device
814 * @pages: pointer to the array of page pointers to convert
815 * @nr_pages: length of the page vector
816 *
817 * This helper creates an sg table object from a set of pages
818 * the driver is responsible for mapping the pages into the
819 * importers address space for use with dma_buf itself.
820 *
821 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
822 */
823struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
824 struct vm_page **pages, unsigned int nr_pages)
825{
826 STUB()do { printf("%s: stub\n", __func__); } while(0);
827 return NULL((void *)0);
828#ifdef notyet
829 struct sg_table *sg;
830 struct scatterlist *sge;
831 size_t max_segment = 0;
832
833 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL(0x0001 | 0x0004));
834 if (!sg)
835 return ERR_PTR(-ENOMEM12);
836
837 if (dev)
838 max_segment = dma_max_mapping_size(dev->dev);
839 if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
840 max_segment = SCATTERLIST_MAX_SEGMENT;
841 sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
842 nr_pages << PAGE_SHIFT12,
843 max_segment,
844 NULL((void *)0), 0, GFP_KERNEL(0x0001 | 0x0004));
845 if (IS_ERR(sge)) {
846 kfree(sg);
847 sg = ERR_CAST(sge);
848 }
849 return sg;
850#endif
851}
852EXPORT_SYMBOL(drm_prime_pages_to_sg);
853
854/**
855 * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
856 * @sgt: sg_table describing the buffer to check
857 *
858 * This helper calculates the contiguous size in the DMA address space
859 * of the the buffer described by the provided sg_table.
860 *
861 * This is useful for implementing
862 * &drm_gem_object_funcs.gem_prime_import_sg_table.
863 */
864unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
865{
866 STUB()do { printf("%s: stub\n", __func__); } while(0);
867 return 0;
868#ifdef notyet
869 dma_addr_t expected = sg_dma_address(sgt->sgl)((sgt->sgl)->dma_address);
870 struct scatterlist *sg;
871 unsigned long size = 0;
872 int i;
873
874 for_each_sgtable_dma_sg(sgt, sg, i) {
875 unsigned int len = sg_dma_len(sg)((sg)->length);
876
877 if (!len)
878 break;
879 if (sg_dma_address(sg)((sg)->dma_address) != expected)
880 break;
881 expected += len;
882 size += len;
883 }
884 return size;
885#endif
886}
887EXPORT_SYMBOL(drm_prime_get_contiguous_size);
888
889/**
890 * drm_gem_prime_export - helper library implementation of the export callback
891 * @obj: GEM object to export
892 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
893 *
894 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
895 * using the PRIME helpers. It is used as the default in
896 * drm_gem_prime_handle_to_fd().
897 */
898struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
899 int flags)
900{
901 struct drm_device *dev = obj->dev;
902 struct dma_buf_export_info exp_info = {
903#ifdef __linux__
904 .exp_name = KBUILD_MODNAME, /* white lie for debug */
905 .owner = dev->driver->fops->owner,
906#endif
907 .ops = &drm_gem_prime_dmabuf_ops,
908 .size = obj->size,
909 .flags = flags,
910 .priv = obj,
911 .resv = obj->resv,
912 };
913
914 return drm_gem_dmabuf_export(dev, &exp_info);
915}
916EXPORT_SYMBOL(drm_gem_prime_export);
917
918/**
919 * drm_gem_prime_import_dev - core implementation of the import callback
920 * @dev: drm_device to import into
921 * @dma_buf: dma-buf object to import
922 * @attach_dev: struct device to dma_buf attach
923 *
924 * This is the core of drm_gem_prime_import(). It's designed to be called by
925 * drivers who want to use a different device structure than &drm_device.dev for
926 * attaching via dma_buf. This function calls
927 * &drm_driver.gem_prime_import_sg_table internally.
928 *
929 * Drivers must arrange to call drm_prime_gem_destroy() from their
930 * &drm_gem_object_funcs.free hook when using this function.
931 */
932struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
933 struct dma_buf *dma_buf,
934 struct device *attach_dev)
935{
936 struct dma_buf_attachment *attach;
937#ifdef notyet
938 struct sg_table *sgt;
939#endif
940 struct drm_gem_object *obj;
941 int ret;
942
943 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
944 obj = dma_buf->priv;
945 if (obj->dev == dev) {
946 /*
947 * Importing dmabuf exported from out own gem increases
948 * refcount on gem itself instead of f_count of dmabuf.
949 */
950 drm_gem_object_get(obj);
951 return obj;
952 }
953 }
954
955#ifdef notyet
956 if (!dev->driver->gem_prime_import_sg_table)
957 return ERR_PTR(-EINVAL22);
958#endif
959
960 attach = dma_buf_attach(dma_buf, attach_dev)((void *)0);
961 if (IS_ERR(attach))
962 return ERR_CAST(attach);
963
964#ifdef notyet
965 get_dma_buf(dma_buf);
966
967 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
968 if (IS_ERR(sgt)) {
969 ret = PTR_ERR(sgt);
970 goto fail_detach;
971 }
972
973 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
974 if (IS_ERR(obj)) {
975 ret = PTR_ERR(obj);
976 goto fail_unmap;
977 }
978
979 obj->import_attach = attach;
980 obj->resv = dma_buf->resv;
981
982 return obj;
983
984fail_unmap:
985 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
986fail_detach:
987 dma_buf_detach(dma_buf, attach)panic("dma_buf_detach");
988 dma_buf_put(dma_buf);
989
990 return ERR_PTR(ret);
991#else
992 ret = 0;
Value stored to 'ret' is never read
993 panic(__func__);
994#endif
995}
996EXPORT_SYMBOL(drm_gem_prime_import_dev);
997
998/**
999 * drm_gem_prime_import - helper library implementation of the import callback
1000 * @dev: drm_device to import into
1001 * @dma_buf: dma-buf object to import
1002 *
1003 * This is the implementation of the gem_prime_import functions for GEM drivers
1004 * using the PRIME helpers. Drivers can use this as their
1005 * &drm_driver.gem_prime_import implementation. It is used as the default
1006 * implementation in drm_gem_prime_fd_to_handle().
1007 *
1008 * Drivers must arrange to call drm_prime_gem_destroy() from their
1009 * &drm_gem_object_funcs.free hook when using this function.
1010 */
1011struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1012 struct dma_buf *dma_buf)
1013{
1014 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1015}
1016EXPORT_SYMBOL(drm_gem_prime_import);
1017
1018/**
1019 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
1020 * @sgt: scatter-gather table to convert
1021 * @pages: optional array of page pointers to store the page array in
1022 * @addrs: optional array to store the dma bus address of each page
1023 * @max_entries: size of both the passed-in arrays
1024 *
1025 * Exports an sg table into an array of pages and addresses. This is currently
1026 * required by the TTM driver in order to do correct fault handling.
1027 *
1028 * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1029 * implementation.
1030 */
1031int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct vm_page **pages,
1032 dma_addr_t *addrs, int max_entries)
1033{
1034 STUB()do { printf("%s: stub\n", __func__); } while(0);
1035 return -ENOSYS78;
1036#ifdef notyet
1037 struct sg_dma_page_iter dma_iter;
1038 struct sg_page_iter page_iter;
1039 struct vm_page **p = pages;
1040 dma_addr_t *a = addrs;
1041
1042 if (pages) {
1043 for_each_sgtable_page(sgt, &page_iter, 0)__sg_page_iter_start((&page_iter), ((sgt)->sgl), ((sgt
)->orig_nents), (0)); while (__sg_page_iter_next(&page_iter
))
{
1044 if (WARN_ON(p - pages >= max_entries)({ int __ret = !!(p - pages >= max_entries); if (__ret) printf
("WARNING %s failed at %s:%d\n", "p - pages >= max_entries"
, "/usr/src/sys/dev/pci/drm/drm_prime.c", 1044); __builtin_expect
(!!(__ret), 0); })
)
1045 return -1;
1046 *p++ = sg_page_iter_page(&page_iter);
1047 }
1048 }
1049 if (addrs) {
1050 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1051 if (WARN_ON(a - addrs >= max_entries)({ int __ret = !!(a - addrs >= max_entries); if (__ret) printf
("WARNING %s failed at %s:%d\n", "a - addrs >= max_entries"
, "/usr/src/sys/dev/pci/drm/drm_prime.c", 1051); __builtin_expect
(!!(__ret), 0); })
)
1052 return -1;
1053 *a++ = sg_page_iter_dma_address(&dma_iter);
1054 }
1055 }
1056
1057 return 0;
1058#endif
1059}
1060EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
1061
1062/**
1063 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1064 * @obj: GEM object which was created from a dma-buf
1065 * @sg: the sg-table which was pinned at import time
1066 *
1067 * This is the cleanup functions which GEM drivers need to call when they use
1068 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1069 */
1070void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1071{
1072 STUB()do { printf("%s: stub\n", __func__); } while(0);
1073#ifdef notyet
1074 struct dma_buf_attachment *attach;
1075 struct dma_buf *dma_buf;
1076
1077 attach = obj->import_attach;
1078 if (sg)
1079 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1080 dma_buf = attach->dmabuf;
1081 dma_buf_detach(attach->dmabuf, attach)panic("dma_buf_detach");
1082 /* remove the reference */
1083 dma_buf_put(dma_buf);
1084#endif
1085}
1086EXPORT_SYMBOL(drm_prime_gem_destroy);