Bug Summary

File:dev/pci/drm/include/drm/drm_drv.h
Warning:line 563, column 3
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name drm_drv.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/drm_drv.c

/usr/src/sys/dev/pci/drm/drm_drv.c

1/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29#include <sys/param.h>
30#include <sys/fcntl.h>
31#include <sys/specdev.h>
32#include <sys/vnode.h>
33
34#include <machine/bus.h>
35
36#ifdef __HAVE_ACPI
37#include <dev/acpi/acpidev.h>
38#include <dev/acpi/acpivar.h>
39#include <dev/acpi/dsdt.h>
40#endif
41
42#include <linux/debugfs.h>
43#include <linux/fs.h>
44#include <linux/module.h>
45#include <linux/moduleparam.h>
46#include <linux/mount.h>
47#include <linux/pseudo_fs.h>
48#include <linux/slab.h>
49#include <linux/srcu.h>
50
51#include <drm/drm_cache.h>
52#include <drm/drm_client.h>
53#include <drm/drm_color_mgmt.h>
54#include <drm/drm_drv.h>
55#include <drm/drm_file.h>
56#include <drm/drm_managed.h>
57#include <drm/drm_mode_object.h>
58#include <drm/drm_print.h>
59#include <drm/drm_privacy_screen_machine.h>
60
61#include <drm/drm_gem.h>
62
63#include "drm_crtc_internal.h"
64#include "drm_internal.h"
65#include "drm_legacy.h"
66
67MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
68MODULE_DESCRIPTION("DRM shared core routines");
69MODULE_LICENSE("GPL and additional rights");
70
71static DEFINE_SPINLOCK(drm_minor_lock)struct mutex drm_minor_lock = { ((void *)0), ((((0x9)) > 0x0
&& ((0x9)) < 0x9) ? 0x9 : ((0x9))), 0x0 }
;
72static struct idr drm_minors_idr;
73
74/*
75 * If the drm core fails to init for whatever reason,
76 * we should prevent any drivers from registering with it.
77 * It's best to check this at drm_dev_init(), as some drivers
78 * prefer to embed struct drm_device into their own device
79 * structure and call drm_dev_init() themselves.
80 */
81static bool_Bool drm_core_init_complete;
82
83static struct dentry *drm_debugfs_root;
84
85#ifdef notyet
86DEFINE_STATIC_SRCU(drm_unplug_srcu);
87#endif
88
89/*
90 * Some functions are only called once on init regardless of how many times
91 * drm attaches. In linux this is handled via module_init()/module_exit()
92 */
93int drm_refcnt;
94
95struct drm_softc {
96 struct device sc_dev;
97 struct drm_device *sc_drm;
98 int sc_allocated;
99};
100
101struct drm_attach_args {
102 struct drm_device *drm;
103 const struct drm_driver *driver;
104 char *busid;
105 bus_dma_tag_t dmat;
106 bus_space_tag_t bst;
107 size_t busid_len;
108 int is_agp;
109 struct pci_attach_args *pa;
110 int primary;
111};
112
113void drm_linux_init(void);
114void drm_linux_exit(void);
115int drm_linux_acpi_notify(struct aml_node *, int, void *);
116
117int drm_dequeue_event(struct drm_device *, struct drm_file *, size_t,
118 struct drm_pending_event **);
119
120int drmprint(void *, const char *);
121int drmsubmatch(struct device *, void *, void *);
122const struct pci_device_id *
123 drm_find_description(int, int, const struct pci_device_id *);
124
125int drm_file_cmp(struct drm_file *, struct drm_file *);
126SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp)void drm_file_tree_SPLAY(struct drm_file_tree *, struct drm_file
*); void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *, int
); struct drm_file *drm_file_tree_SPLAY_INSERT(struct drm_file_tree
*, struct drm_file *); struct drm_file *drm_file_tree_SPLAY_REMOVE
(struct drm_file_tree *, struct drm_file *); static __attribute__
((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_FIND
(struct drm_file_tree *head, struct drm_file *elm) { if (((head
)->sph_root == ((void *)0))) return(((void *)0)); drm_file_tree_SPLAY
(head, elm); if ((drm_file_cmp)(elm, (head)->sph_root) == 0
) return (head->sph_root); return (((void *)0)); } static __attribute__
((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_NEXT
(struct drm_file_tree *head, struct drm_file *elm) { drm_file_tree_SPLAY
(head, elm); if ((elm)->link.spe_right != ((void *)0)) { elm
= (elm)->link.spe_right; while ((elm)->link.spe_left !=
((void *)0)) { elm = (elm)->link.spe_left; } } else elm =
((void *)0); return (elm); } static __attribute__((__unused__
)) __inline struct drm_file * drm_file_tree_SPLAY_MIN_MAX(struct
drm_file_tree *head, int val) { drm_file_tree_SPLAY_MINMAX(head
, val); return ((head)->sph_root); }
;
127
128#define DRMDEVCF_PRIMARY0 0
129#define drmdevcf_primarycf_loc[0] cf_loc[DRMDEVCF_PRIMARY0] /* spec'd as primary? */
130#define DRMDEVCF_PRIMARY_UNK-1 -1
131
132/*
133 * DRM Minors
134 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
135 * of them is represented by a drm_minor object. Depending on the capabilities
136 * of the device-driver, different interfaces are registered.
137 *
138 * Minors can be accessed via dev->$minor_name. This pointer is either
139 * NULL or a valid drm_minor pointer and stays valid as long as the device is
140 * valid. This means, DRM minors have the same life-time as the underlying
141 * device. However, this doesn't mean that the minor is active. Minors are
142 * registered and unregistered dynamically according to device-state.
143 */
144
145static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
146 unsigned int type)
147{
148 switch (type) {
149 case DRM_MINOR_PRIMARY:
150 return &dev->primary;
151 case DRM_MINOR_RENDER:
152 return &dev->render;
153 default:
154 BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/drm_drv.c"
, 154); } while (0)
;
155 }
156}
157
158static void drm_minor_alloc_release(struct drm_device *dev, void *data)
159{
160 struct drm_minor *minor = data;
161 unsigned long flags;
162
163 WARN_ON(dev != minor->dev)({ int __ret = !!(dev != minor->dev); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "dev != minor->dev", "/usr/src/sys/dev/pci/drm/drm_drv.c"
, 163); __builtin_expect(!!(__ret), 0); })
;
164
165#ifdef __linux__
166 put_device(minor->kdev);
167#endif
168
169 spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0);
170 idr_remove(&drm_minors_idr, minor->index);
171 spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while (
0)
;
172}
173
174static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
175{
176 struct drm_minor *minor;
177 unsigned long flags;
178 int r;
179
180 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL(0x0001 | 0x0004));
181 if (!minor)
182 return -ENOMEM12;
183
184 minor->type = type;
185 minor->dev = dev;
186
187 idr_preload(GFP_KERNEL(0x0001 | 0x0004));
188 spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0);
189 r = idr_alloc(&drm_minors_idr,
190 NULL((void *)0),
191 64 * type,
192 64 * (type + 1),
193 GFP_NOWAIT0x0002);
194 spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while (
0)
;
195 idr_preload_end();
196
197 if (r < 0)
198 return r;
199
200 minor->index = r;
201
202 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
203 if (r)
204 return r;
205
206#ifdef __linux__
207 minor->kdev = drm_sysfs_minor_alloc(minor);
208 if (IS_ERR(minor->kdev))
209 return PTR_ERR(minor->kdev);
210#endif
211
212 *drm_minor_get_slot(dev, type) = minor;
213 return 0;
214}
215
216static int drm_minor_register(struct drm_device *dev, unsigned int type)
217{
218 struct drm_minor *minor;
219 unsigned long flags;
220#ifdef __linux__
221 int ret;
222#endif
223
224 DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n");
225
226 minor = *drm_minor_get_slot(dev, type);
227 if (!minor)
228 return 0;
229
230#ifdef __linux__
231 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
232 if (ret) {
233 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n")__drm_err("DRM: Failed to initialize /sys/kernel/debug/dri.\n"
)
;
234 goto err_debugfs;
235 }
236
237 ret = device_add(minor->kdev);
238 if (ret)
239 goto err_debugfs;
240#else
241 drm_debugfs_root = NULL((void *)0);
242#endif
243
244 /* replace NULL with @minor so lookups will succeed from now on */
245 spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0);
246 idr_replace(&drm_minors_idr, minor, minor->index);
247 spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while (
0)
;
248
249 DRM_DEBUG("new minor registered %d\n", minor->index)___drm_dbg(((void *)0), DRM_UT_CORE, "new minor registered %d\n"
, minor->index)
;
250 return 0;
251
252#ifdef __linux__
253err_debugfs:
254 drm_debugfs_cleanup(minor);
255 return ret;
256#endif
257}
258
259static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
260{
261 struct drm_minor *minor;
262 unsigned long flags;
263
264 minor = *drm_minor_get_slot(dev, type);
265#ifdef __linux__
266 if (!minor || !device_is_registered(minor->kdev))
267#else
268 if (!minor)
269#endif
270 return;
271
272 /* replace @minor with NULL so lookups will fail from now on */
273 spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0);
274 idr_replace(&drm_minors_idr, NULL((void *)0), minor->index);
275 spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while (
0)
;
276
277#ifdef __linux__
278 device_del(minor->kdev);
279#endif
280 dev_set_drvdata(minor->kdev, NULL((void *)0)); /* safety belt */
281 drm_debugfs_cleanup(minor);
282}
283
284/*
285 * Looks up the given minor-ID and returns the respective DRM-minor object. The
286 * refence-count of the underlying device is increased so you must release this
287 * object with drm_minor_release().
288 *
289 * As long as you hold this minor, it is guaranteed that the object and the
290 * minor->dev pointer will stay valid! However, the device may get unplugged and
291 * unregistered while you hold the minor.
292 */
293struct drm_minor *drm_minor_acquire(unsigned int minor_id)
294{
295 struct drm_minor *minor;
296 unsigned long flags;
297
298 spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0);
1
Loop condition is false. Exiting loop
299 minor = idr_find(&drm_minors_idr, minor_id);
300 if (minor)
2
Assuming 'minor' is non-null
3
Taking true branch
301 drm_dev_get(minor->dev);
302 spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while (
0)
;
4
Loop condition is false. Exiting loop
303
304 if (!minor
4.1
'minor' is non-null
4.1
'minor' is non-null
) {
5
Taking false branch
305 return ERR_PTR(-ENODEV19);
306 } else if (drm_dev_is_unplugged(minor->dev)) {
6
Calling 'drm_dev_is_unplugged'
307 drm_dev_put(minor->dev);
308 return ERR_PTR(-ENODEV19);
309 }
310
311 return minor;
312}
313
314void drm_minor_release(struct drm_minor *minor)
315{
316 drm_dev_put(minor->dev);
317}
318
319/**
320 * DOC: driver instance overview
321 *
322 * A device instance for a drm driver is represented by &struct drm_device. This
323 * is allocated and initialized with devm_drm_dev_alloc(), usually from
324 * bus-specific ->probe() callbacks implemented by the driver. The driver then
325 * needs to initialize all the various subsystems for the drm device like memory
326 * management, vblank handling, modesetting support and initial output
327 * configuration plus obviously initialize all the corresponding hardware bits.
328 * Finally when everything is up and running and ready for userspace the device
329 * instance can be published using drm_dev_register().
330 *
331 * There is also deprecated support for initializing device instances using
332 * bus-specific helpers and the &drm_driver.load callback. But due to
333 * backwards-compatibility needs the device instance have to be published too
334 * early, which requires unpretty global locking to make safe and is therefore
335 * only support for existing drivers not yet converted to the new scheme.
336 *
337 * When cleaning up a device instance everything needs to be done in reverse:
338 * First unpublish the device instance with drm_dev_unregister(). Then clean up
339 * any other resources allocated at device initialization and drop the driver's
340 * reference to &drm_device using drm_dev_put().
341 *
342 * Note that any allocation or resource which is visible to userspace must be
343 * released only when the final drm_dev_put() is called, and not when the
344 * driver is unbound from the underlying physical struct &device. Best to use
345 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
346 * related functions.
347 *
348 * devres managed resources like devm_kmalloc() can only be used for resources
349 * directly related to the underlying hardware device, and only used in code
350 * paths fully protected by drm_dev_enter() and drm_dev_exit().
351 *
352 * Display driver example
353 * ~~~~~~~~~~~~~~~~~~~~~~
354 *
355 * The following example shows a typical structure of a DRM display driver.
356 * The example focus on the probe() function and the other functions that is
357 * almost always present and serves as a demonstration of devm_drm_dev_alloc().
358 *
359 * .. code-block:: c
360 *
361 * struct driver_device {
362 * struct drm_device drm;
363 * void *userspace_facing;
364 * struct clk *pclk;
365 * };
366 *
367 * static const struct drm_driver driver_drm_driver = {
368 * [...]
369 * };
370 *
371 * static int driver_probe(struct platform_device *pdev)
372 * {
373 * struct driver_device *priv;
374 * struct drm_device *drm;
375 * int ret;
376 *
377 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
378 * struct driver_device, drm);
379 * if (IS_ERR(priv))
380 * return PTR_ERR(priv);
381 * drm = &priv->drm;
382 *
383 * ret = drmm_mode_config_init(drm);
384 * if (ret)
385 * return ret;
386 *
387 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
388 * if (!priv->userspace_facing)
389 * return -ENOMEM;
390 *
391 * priv->pclk = devm_clk_get(dev, "PCLK");
392 * if (IS_ERR(priv->pclk))
393 * return PTR_ERR(priv->pclk);
394 *
395 * // Further setup, display pipeline etc
396 *
397 * platform_set_drvdata(pdev, drm);
398 *
399 * drm_mode_config_reset(drm);
400 *
401 * ret = drm_dev_register(drm);
402 * if (ret)
403 * return ret;
404 *
405 * drm_fbdev_generic_setup(drm, 32);
406 *
407 * return 0;
408 * }
409 *
410 * // This function is called before the devm_ resources are released
411 * static int driver_remove(struct platform_device *pdev)
412 * {
413 * struct drm_device *drm = platform_get_drvdata(pdev);
414 *
415 * drm_dev_unregister(drm);
416 * drm_atomic_helper_shutdown(drm)
417 *
418 * return 0;
419 * }
420 *
421 * // This function is called on kernel restart and shutdown
422 * static void driver_shutdown(struct platform_device *pdev)
423 * {
424 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
425 * }
426 *
427 * static int __maybe_unused driver_pm_suspend(struct device *dev)
428 * {
429 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
430 * }
431 *
432 * static int __maybe_unused driver_pm_resume(struct device *dev)
433 * {
434 * drm_mode_config_helper_resume(dev_get_drvdata(dev));
435 *
436 * return 0;
437 * }
438 *
439 * static const struct dev_pm_ops driver_pm_ops = {
440 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
441 * };
442 *
443 * static struct platform_driver driver_driver = {
444 * .driver = {
445 * [...]
446 * .pm = &driver_pm_ops,
447 * },
448 * .probe = driver_probe,
449 * .remove = driver_remove,
450 * .shutdown = driver_shutdown,
451 * };
452 * module_platform_driver(driver_driver);
453 *
454 * Drivers that want to support device unplugging (USB, DT overlay unload) should
455 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
456 * regions that is accessing device resources to prevent use after they're
457 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
458 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
459 * drm_atomic_helper_shutdown() is called. This means that if the disable code
460 * paths are protected, they will not run on regular driver module unload,
461 * possibly leaving the hardware enabled.
462 */
463
464/**
465 * drm_put_dev - Unregister and release a DRM device
466 * @dev: DRM device
467 *
468 * Called at module unload time or when a PCI device is unplugged.
469 *
470 * Cleans up all DRM device, calling drm_lastclose().
471 *
472 * Note: Use of this function is deprecated. It will eventually go away
473 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
474 * instead to make sure that the device isn't userspace accessible any more
475 * while teardown is in progress, ensuring that userspace can't access an
476 * inconsistent state.
477 */
478void drm_put_dev(struct drm_device *dev)
479{
480 DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n");
481
482 if (!dev) {
483 DRM_ERROR("cleanup called no dev\n")__drm_err("cleanup called no dev\n");
484 return;
485 }
486
487 drm_dev_unregister(dev);
488 drm_dev_put(dev);
489}
490EXPORT_SYMBOL(drm_put_dev);
491
492/**
493 * drm_dev_enter - Enter device critical section
494 * @dev: DRM device
495 * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
496 *
497 * This function marks and protects the beginning of a section that should not
498 * be entered after the device has been unplugged. The section end is marked
499 * with drm_dev_exit(). Calls to this function can be nested.
500 *
501 * Returns:
502 * True if it is OK to enter the section, false otherwise.
503 */
504bool_Bool drm_dev_enter(struct drm_device *dev, int *idx)
505{
506#ifdef notyet
507 *idx = srcu_read_lock(&drm_unplug_srcu)0;
508
509 if (dev->unplugged) {
510 srcu_read_unlock(&drm_unplug_srcu, *idx);
511 return false0;
512 }
513#endif
514
515 return true1;
9
Returning without writing to '*idx'
516}
517EXPORT_SYMBOL(drm_dev_enter);
518
519/**
520 * drm_dev_exit - Exit device critical section
521 * @idx: index returned from drm_dev_enter()
522 *
523 * This function marks the end of a section that should not be entered after
524 * the device has been unplugged.
525 */
526void drm_dev_exit(int idx)
527{
528#ifdef notyet
529 srcu_read_unlock(&drm_unplug_srcu, idx);
530#endif
531}
532EXPORT_SYMBOL(drm_dev_exit);
533
534/**
535 * drm_dev_unplug - unplug a DRM device
536 * @dev: DRM device
537 *
538 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
539 * userspace operations. Entry-points can use drm_dev_enter() and
540 * drm_dev_exit() to protect device resources in a race free manner. This
541 * essentially unregisters the device like drm_dev_unregister(), but can be
542 * called while there are still open users of @dev.
543 */
544void drm_dev_unplug(struct drm_device *dev)
545{
546 STUB()do { printf("%s: stub\n", __func__); } while(0);
547#ifdef notyet
548 /*
549 * After synchronizing any critical read section is guaranteed to see
550 * the new value of ->unplugged, and any critical section which might
551 * still have seen the old value of ->unplugged is guaranteed to have
552 * finished.
553 */
554 dev->unplugged = true1;
555 synchronize_srcu(&drm_unplug_srcu);
556
557 drm_dev_unregister(dev);
558
559 /* Clear all CPU mappings pointing to this device */
560 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1);
561#endif
562}
563EXPORT_SYMBOL(drm_dev_unplug);
564
565#ifdef __linux__
566/*
567 * DRM internal mount
568 * We want to be able to allocate our own "struct address_space" to control
569 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
570 * stand-alone address_space objects, so we need an underlying inode. As there
571 * is no way to allocate an independent inode easily, we need a fake internal
572 * VFS mount-point.
573 *
574 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
575 * frees it again. You are allowed to use iget() and iput() to get references to
576 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
577 * drm_fs_inode_free() call (which does not have to be the last iput()).
578 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
579 * between multiple inode-users. You could, technically, call
580 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
581 * iput(), but this way you'd end up with a new vfsmount for each inode.
582 */
583
584static int drm_fs_cnt;
585static struct vfsmount *drm_fs_mnt;
586
587static int drm_fs_init_fs_context(struct fs_context *fc)
588{
589 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM12;
590}
591
592static struct file_system_type drm_fs_type = {
593 .name = "drm",
594 .owner = THIS_MODULE((void *)0),
595 .init_fs_context = drm_fs_init_fs_context,
596 .kill_sb = kill_anon_super,
597};
598
599static struct inode *drm_fs_inode_new(void)
600{
601 struct inode *inode;
602 int r;
603
604 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
605 if (r < 0) {
606 DRM_ERROR("Cannot mount pseudo fs: %d\n", r)__drm_err("Cannot mount pseudo fs: %d\n", r);
607 return ERR_PTR(r);
608 }
609
610 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
611 if (IS_ERR(inode))
612 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
613
614 return inode;
615}
616
617static void drm_fs_inode_free(struct inode *inode)
618{
619 if (inode) {
620 iput(inode);
621 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
622 }
623}
624
625#endif /* __linux__ */
626
627/**
628 * DOC: component helper usage recommendations
629 *
630 * DRM drivers that drive hardware where a logical device consists of a pile of
631 * independent hardware blocks are recommended to use the :ref:`component helper
632 * library<component>`. For consistency and better options for code reuse the
633 * following guidelines apply:
634 *
635 * - The entire device initialization procedure should be run from the
636 * &component_master_ops.master_bind callback, starting with
637 * devm_drm_dev_alloc(), then binding all components with
638 * component_bind_all() and finishing with drm_dev_register().
639 *
640 * - The opaque pointer passed to all components through component_bind_all()
641 * should point at &struct drm_device of the device instance, not some driver
642 * specific private structure.
643 *
644 * - The component helper fills the niche where further standardization of
645 * interfaces is not practical. When there already is, or will be, a
646 * standardized interface like &drm_bridge or &drm_panel, providing its own
647 * functions to find such components at driver load time, like
648 * drm_of_find_panel_or_bridge(), then the component helper should not be
649 * used.
650 */
651
652static void drm_dev_init_release(struct drm_device *dev, void *res)
653{
654 drm_legacy_ctxbitmap_cleanup(dev);
655 drm_legacy_remove_map_hash(dev);
656#ifdef __linux__
657 drm_fs_inode_free(dev->anon_inode);
658
659 put_device(dev->dev);
660#endif
661 /* Prevent use-after-free in drm_managed_release when debugging is
662 * enabled. Slightly awkward, but can't really be helped. */
663 dev->dev = NULL((void *)0);
664 mutex_destroy(&dev->master_mutex);
665 mutex_destroy(&dev->clientlist_mutex);
666 mutex_destroy(&dev->filelist_mutex);
667 mutex_destroy(&dev->struct_mutex);
668 drm_legacy_destroy_members(dev);
669}
670
671#ifdef notyet
672
673static int drm_dev_init(struct drm_device *dev,
674 const struct drm_driver *driver,
675 struct device *parent)
676{
677 struct inode *inode;
678 int ret;
679
680 if (!drm_core_init_complete) {
681 DRM_ERROR("DRM core is not initialized\n")__drm_err("DRM core is not initialized\n");
682 return -ENODEV19;
683 }
684
685 if (WARN_ON(!parent)({ int __ret = !!(!parent); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!parent", "/usr/src/sys/dev/pci/drm/drm_drv.c", 685); __builtin_expect
(!!(__ret), 0); })
)
686 return -EINVAL22;
687
688 kref_init(&dev->ref);
689 dev->dev = get_device(parent);
690 dev->driver = driver;
691
692 INIT_LIST_HEAD(&dev->managed.resources);
693 spin_lock_init(&dev->managed.lock);
694
695 /* no per-device feature limits by default */
696 dev->driver_features = ~0u;
697
698 drm_legacy_init_members(dev);
699 INIT_LIST_HEAD(&dev->filelist);
700 INIT_LIST_HEAD(&dev->filelist_internal);
701 INIT_LIST_HEAD(&dev->clientlist);
702 INIT_LIST_HEAD(&dev->vblank_event_list);
703
704 spin_lock_init(&dev->event_lock);
705 mutex_init(&dev->struct_mutex);
706 mutex_init(&dev->filelist_mutex);
707 mutex_init(&dev->clientlist_mutex);
708 mutex_init(&dev->master_mutex);
709
710 ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL((void *)0));
711 if (ret)
712 return ret;
713
714 inode = drm_fs_inode_new();
715 if (IS_ERR(inode)) {
716 ret = PTR_ERR(inode);
717 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret)__drm_err("Cannot allocate anonymous inode: %d\n", ret);
718 goto err;
719 }
720
721 dev->anon_inode = inode;
722
723 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
724 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
725 if (ret)
726 goto err;
727 }
728
729 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
730 if (ret)
731 goto err;
732
733 ret = drm_legacy_create_map_hash(dev);
734 if (ret)
735 goto err;
736
737 drm_legacy_ctxbitmap_init(dev);
738
739 if (drm_core_check_feature(dev, DRIVER_GEM)) {
740 ret = drm_gem_init(dev);
741 if (ret) {
742 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n"
)
;
743 goto err;
744 }
745 }
746
747 ret = drm_dev_set_unique(dev, dev_name(parent)"");
748 if (ret)
749 goto err;
750
751 return 0;
752
753err:
754 drm_managed_release(dev);
755
756 return ret;
757}
758
759static void devm_drm_dev_init_release(void *data)
760{
761 drm_dev_put(data);
762}
763
764static int devm_drm_dev_init(struct device *parent,
765 struct drm_device *dev,
766 const struct drm_driver *driver)
767{
768 int ret;
769
770 ret = drm_dev_init(dev, driver, parent);
771 if (ret)
772 return ret;
773
774 return devm_add_action_or_reset(parent,
775 devm_drm_dev_init_release, dev);
776}
777
778#endif
779
780void *__devm_drm_dev_alloc(struct device *parent,
781 const struct drm_driver *driver,
782 size_t size, size_t offset)
783{
784 void *container;
785 struct drm_device *drm;
786#ifdef notyet
787 int ret;
788#endif
789
790 container = kzalloc(size, GFP_KERNEL(0x0001 | 0x0004));
791 if (!container)
792 return ERR_PTR(-ENOMEM12);
793
794 drm = container + offset;
795#ifdef notyet
796 ret = devm_drm_dev_init(parent, drm, driver);
797 if (ret) {
798 kfree(container);
799 return ERR_PTR(ret);
800 }
801 drmm_add_final_kfree(drm, container);
802#endif
803
804 return container;
805}
806EXPORT_SYMBOL(__devm_drm_dev_alloc);
807
808#ifdef notyet
809
810/**
811 * drm_dev_alloc - Allocate new DRM device
812 * @driver: DRM driver to allocate device for
813 * @parent: Parent device object
814 *
815 * This is the deprecated version of devm_drm_dev_alloc(), which does not support
816 * subclassing through embedding the struct &drm_device in a driver private
817 * structure, and which does not support automatic cleanup through devres.
818 *
819 * RETURNS:
820 * Pointer to new DRM device, or ERR_PTR on failure.
821 */
822struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
823 struct device *parent)
824{
825 struct drm_device *dev;
826 int ret;
827
828 dev = kzalloc(sizeof(*dev), GFP_KERNEL(0x0001 | 0x0004));
829 if (!dev)
830 return ERR_PTR(-ENOMEM12);
831
832 ret = drm_dev_init(dev, driver, parent);
833 if (ret) {
834 kfree(dev);
835 return ERR_PTR(ret);
836 }
837
838 drmm_add_final_kfree(dev, dev);
839
840 return dev;
841}
842EXPORT_SYMBOL(drm_dev_alloc);
843
844#endif
845
846static void drm_dev_release(struct kref *ref)
847{
848 struct drm_device *dev = container_of(ref, struct drm_device, ref)({ const __typeof( ((struct drm_device *)0)->ref ) *__mptr
= (ref); (struct drm_device *)( (char *)__mptr - __builtin_offsetof
(struct drm_device, ref) );})
;
849
850 if (dev->driver->release)
851 dev->driver->release(dev);
852
853 drm_managed_release(dev);
854
855 kfree(dev->managed.final_kfree);
856}
857
858/**
859 * drm_dev_get - Take reference of a DRM device
860 * @dev: device to take reference of or NULL
861 *
862 * This increases the ref-count of @dev by one. You *must* already own a
863 * reference when calling this. Use drm_dev_put() to drop this reference
864 * again.
865 *
866 * This function never fails. However, this function does not provide *any*
867 * guarantee whether the device is alive or running. It only provides a
868 * reference to the object and the memory associated with it.
869 */
870void drm_dev_get(struct drm_device *dev)
871{
872 if (dev)
873 kref_get(&dev->ref);
874}
875EXPORT_SYMBOL(drm_dev_get);
876
877/**
878 * drm_dev_put - Drop reference of a DRM device
879 * @dev: device to drop reference of or NULL
880 *
881 * This decreases the ref-count of @dev by one. The device is destroyed if the
882 * ref-count drops to zero.
883 */
884void drm_dev_put(struct drm_device *dev)
885{
886 if (dev)
887 kref_put(&dev->ref, drm_dev_release);
888}
889EXPORT_SYMBOL(drm_dev_put);
890
891static int create_compat_control_link(struct drm_device *dev)
892{
893 struct drm_minor *minor;
894 char *name;
895 int ret;
896
897 if (!drm_core_check_feature(dev, DRIVER_MODESET))
898 return 0;
899
900 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
901 if (!minor)
902 return 0;
903
904 /*
905 * Some existing userspace out there uses the existing of the controlD*
906 * sysfs files to figure out whether it's a modeset driver. It only does
907 * readdir, hence a symlink is sufficient (and the least confusing
908 * option). Otherwise controlD* is entirely unused.
909 *
910 * Old controlD chardev have been allocated in the range
911 * 64-127.
912 */
913 name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64);
914 if (!name)
915 return -ENOMEM12;
916
917 ret = sysfs_create_link(minor->kdev->kobj.parent,0
918 &minor->kdev->kobj,0
919 name)0;
920
921 kfree(name);
922
923 return ret;
924}
925
926static void remove_compat_control_link(struct drm_device *dev)
927{
928 struct drm_minor *minor;
929 char *name;
930
931 if (!drm_core_check_feature(dev, DRIVER_MODESET))
932 return;
933
934 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
935 if (!minor)
936 return;
937
938 name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64);
939 if (!name)
940 return;
941
942 sysfs_remove_link(minor->kdev->kobj.parent, name);
943
944 kfree(name);
945}
946
947/**
948 * drm_dev_register - Register DRM device
949 * @dev: Device to register
950 * @flags: Flags passed to the driver's .load() function
951 *
952 * Register the DRM device @dev with the system, advertise device to user-space
953 * and start normal device operation. @dev must be initialized via drm_dev_init()
954 * previously.
955 *
956 * Never call this twice on any device!
957 *
958 * NOTE: To ensure backward compatibility with existing drivers method this
959 * function calls the &drm_driver.load method after registering the device
960 * nodes, creating race conditions. Usage of the &drm_driver.load methods is
961 * therefore deprecated, drivers must perform all initialization before calling
962 * drm_dev_register().
963 *
964 * RETURNS:
965 * 0 on success, negative error code on failure.
966 */
967int drm_dev_register(struct drm_device *dev, unsigned long flags)
968{
969 const struct drm_driver *driver = dev->driver;
970 int ret;
971
972 if (!driver->load)
973 drm_mode_config_validate(dev);
974
975 WARN_ON(!dev->managed.final_kfree)({ int __ret = !!(!dev->managed.final_kfree); if (__ret) printf
("WARNING %s failed at %s:%d\n", "!dev->managed.final_kfree"
, "/usr/src/sys/dev/pci/drm/drm_drv.c", 975); __builtin_expect
(!!(__ret), 0); })
;
976
977 if (drm_dev_needs_global_mutex(dev))
978 mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex);
979
980 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
981 if (ret)
982 goto err_minors;
983
984 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
985 if (ret)
986 goto err_minors;
987
988 ret = create_compat_control_link(dev);
989 if (ret)
990 goto err_minors;
991
992 dev->registered = true1;
993
994 if (dev->driver->load) {
995 ret = dev->driver->load(dev, flags);
996 if (ret)
997 goto err_minors;
998 }
999
1000 if (drm_core_check_feature(dev, DRIVER_MODESET))
1001 drm_modeset_register_all(dev);
1002
1003 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n"
, driver->name, driver->major, driver->minor, driver
->patchlevel, driver->date, dev->dev ? "" : "virtual device"
, dev->primary->index)
1004 driver->name, driver->major, driver->minor,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n"
, driver->name, driver->major, driver->minor, driver
->patchlevel, driver->date, dev->dev ? "" : "virtual device"
, dev->primary->index)
1005 driver->patchlevel, driver->date,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n"
, driver->name, driver->major, driver->minor, driver
->patchlevel, driver->date, dev->dev ? "" : "virtual device"
, dev->primary->index)
1006 dev->dev ? dev_name(dev->dev) : "virtual device",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n"
, driver->name, driver->major, driver->minor, driver
->patchlevel, driver->date, dev->dev ? "" : "virtual device"
, dev->primary->index)
1007 dev->primary->index)printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n"
, driver->name, driver->major, driver->minor, driver
->patchlevel, driver->date, dev->dev ? "" : "virtual device"
, dev->primary->index)
;
1008
1009 goto out_unlock;
1010
1011err_minors:
1012 remove_compat_control_link(dev);
1013 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1014 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1015out_unlock:
1016 if (drm_dev_needs_global_mutex(dev))
1017 mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex);
1018 return ret;
1019}
1020EXPORT_SYMBOL(drm_dev_register);
1021
1022/**
1023 * drm_dev_unregister - Unregister DRM device
1024 * @dev: Device to unregister
1025 *
1026 * Unregister the DRM device from the system. This does the reverse of
1027 * drm_dev_register() but does not deallocate the device. The caller must call
1028 * drm_dev_put() to drop their final reference.
1029 *
1030 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
1031 * which can be called while there are still open users of @dev.
1032 *
1033 * This should be called first in the device teardown code to make sure
1034 * userspace can't access the device instance any more.
1035 */
1036void drm_dev_unregister(struct drm_device *dev)
1037{
1038 if (drm_core_check_feature(dev, DRIVER_LEGACY))
1039 drm_lastclose(dev);
1040
1041 dev->registered = false0;
1042
1043 drm_client_dev_unregister(dev);
1044
1045 if (drm_core_check_feature(dev, DRIVER_MODESET))
1046 drm_modeset_unregister_all(dev);
1047
1048 if (dev->driver->unload)
1049 dev->driver->unload(dev);
1050
1051 drm_legacy_pci_agp_destroy(dev);
1052 drm_legacy_rmmaps(dev);
1053
1054 remove_compat_control_link(dev);
1055 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
1056 drm_minor_unregister(dev, DRM_MINOR_RENDER);
1057}
1058EXPORT_SYMBOL(drm_dev_unregister);
1059
1060/**
1061 * drm_dev_set_unique - Set the unique name of a DRM device
1062 * @dev: device of which to set the unique name
1063 * @name: unique name
1064 *
1065 * Sets the unique name of a DRM device using the specified string. This is
1066 * already done by drm_dev_init(), drivers should only override the default
1067 * unique name for backwards compatibility reasons.
1068 *
1069 * Return: 0 on success or a negative error code on failure.
1070 */
1071int drm_dev_set_unique(struct drm_device *dev, const char *name)
1072{
1073 drmm_kfree(dev, dev->unique);
1074 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL(0x0001 | 0x0004));
1075
1076 return dev->unique ? 0 : -ENOMEM12;
1077}
1078EXPORT_SYMBOL(drm_dev_set_unique);
1079
1080/*
1081 * DRM Core
1082 * The DRM core module initializes all global DRM objects and makes them
1083 * available to drivers. Once setup, drivers can probe their respective
1084 * devices.
1085 * Currently, core management includes:
1086 * - The "DRM-Global" key/value database
1087 * - Global ID management for connectors
1088 * - DRM major number allocation
1089 * - DRM minor management
1090 * - DRM sysfs class
1091 * - DRM debugfs root
1092 *
1093 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1094 * interface registered on a DRM device, you can request minor numbers from DRM
1095 * core. DRM core takes care of major-number management and char-dev
1096 * registration. A stub ->open() callback forwards any open() requests to the
1097 * registered minor.
1098 */
1099
1100#ifdef __linux__
1101static int drm_stub_open(struct inode *inode, struct file *filp)
1102{
1103 const struct file_operations *new_fops;
1104 struct drm_minor *minor;
1105 int err;
1106
1107 DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n");
1108
1109 minor = drm_minor_acquire(iminor(inode));
1110 if (IS_ERR(minor))
1111 return PTR_ERR(minor);
1112
1113 new_fops = fops_get(minor->dev->driver->fops);
1114 if (!new_fops) {
1115 err = -ENODEV19;
1116 goto out;
1117 }
1118
1119 replace_fops(filp, new_fops);
1120 if (filp->f_op->open)
1121 err = filp->f_op->open(inode, filp);
1122 else
1123 err = 0;
1124
1125out:
1126 drm_minor_release(minor);
1127
1128 return err;
1129}
1130
1131static const struct file_operations drm_stub_fops = {
1132 .owner = THIS_MODULE((void *)0),
1133 .open = drm_stub_open,
1134 .llseek = noop_llseek,
1135};
1136#endif /* __linux__ */
1137
1138static void drm_core_exit(void)
1139{
1140 drm_privacy_screen_lookup_exit();
1141#ifdef __linux__
1142 unregister_chrdev(DRM_MAJOR, "drm");
1143 debugfs_remove(drm_debugfs_root);
1144 drm_sysfs_destroy();
1145#endif
1146 idr_destroy(&drm_minors_idr);
1147 drm_connector_ida_destroy();
1148}
1149
1150static int __init drm_core_init(void)
1151{
1152#ifdef __linux__
1153 int ret;
1154#endif
1155
1156 drm_connector_ida_init();
1157 idr_init(&drm_minors_idr);
1158 drm_memcpy_init_early();
1159
1160#ifdef __linux__
1161 ret = drm_sysfs_init();
1162 if (ret < 0) {
1163 DRM_ERROR("Cannot create DRM class: %d\n", ret)__drm_err("Cannot create DRM class: %d\n", ret);
1164 goto error;
1165 }
1166
1167 drm_debugfs_root = debugfs_create_dir("dri", NULL)ERR_PTR(-78);
1168
1169 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1170 if (ret < 0)
1171 goto error;
1172#endif
1173
1174 drm_privacy_screen_lookup_init();
1175
1176 drm_core_init_complete = true1;
1177
1178 DRM_DEBUG("Initialized\n")___drm_dbg(((void *)0), DRM_UT_CORE, "Initialized\n");
1179 return 0;
1180#ifdef __linux__
1181error:
1182 drm_core_exit();
1183 return ret;
1184#endif
1185}
1186
1187#ifdef __linux__
1188module_init(drm_core_init);
1189module_exit(drm_core_exit);
1190#endif
1191
1192void
1193drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot,
1194 bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm)
1195{
1196 struct drm_attach_args arg;
1197
1198 memset(&arg, 0, sizeof(arg))__builtin_memset((&arg), (0), (sizeof(arg)));
1199 arg.driver = driver;
1200 arg.bst = iot;
1201 arg.dmat = dmat;
1202 arg.drm = drm;
1203
1204 arg.busid = dev->dv_xname;
1205 arg.busid_len = strlen(dev->dv_xname) + 1;
1206 config_found_sm(dev, &arg, drmprint, drmsubmatch);
1207}
1208
1209struct drm_device *
1210drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa,
1211 int is_agp, int primary, struct device *dev, struct drm_device *drm)
1212{
1213 struct drm_attach_args arg;
1214 struct drm_softc *sc;
1215
1216 arg.drm = drm;
1217 arg.driver = driver;
1218 arg.dmat = pa->pa_dmat;
1219 arg.bst = pa->pa_memt;
1220 arg.is_agp = is_agp;
1221 arg.primary = primary;
1222 arg.pa = pa;
1223
1224 arg.busid_len = 20;
1225 arg.busid = malloc(arg.busid_len + 1, M_DRM145, M_NOWAIT0x0002);
1226 if (arg.busid == NULL((void *)0)) {
1227 printf("%s: no memory for drm\n", dev->dv_xname);
1228 return (NULL((void *)0));
1229 }
1230 snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x",
1231 pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function);
1232
1233 sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch);
1234 if (sc == NULL((void *)0))
1235 return NULL((void *)0);
1236
1237 return sc->sc_drm;
1238}
1239
1240int
1241drmprint(void *aux, const char *pnp)
1242{
1243 if (pnp != NULL((void *)0))
1244 printf("drm at %s", pnp);
1245 return (UNCONF1);
1246}
1247
1248int
1249drmsubmatch(struct device *parent, void *match, void *aux)
1250{
1251 extern struct cfdriver drm_cd;
1252 struct cfdata *cf = match;
1253
1254 /* only allow drm to attach */
1255 if (cf->cf_driver == &drm_cd)
1256 return ((*cf->cf_attach->ca_match)(parent, match, aux));
1257 return (0);
1258}
1259
1260int
1261drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist)
1262{
1263 const struct pci_device_id *id_entry;
1264
1265 id_entry = drm_find_description(PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff),
1266 PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff), idlist);
1267 if (id_entry != NULL((void *)0))
1268 return 1;
1269
1270 return 0;
1271}
1272
1273int
1274drm_probe(struct device *parent, void *match, void *aux)
1275{
1276 struct cfdata *cf = match;
1277 struct drm_attach_args *da = aux;
1278
1279 if (cf->drmdevcf_primarycf_loc[0] != DRMDEVCF_PRIMARY_UNK-1) {
1280 /*
1281 * If primary-ness of device specified, either match
1282 * exactly (at high priority), or fail.
1283 */
1284 if (cf->drmdevcf_primarycf_loc[0] != 0 && da->primary != 0)
1285 return (10);
1286 else
1287 return (0);
1288 }
1289
1290 /* If primary-ness unspecified, it wins. */
1291 return (1);
1292}
1293
1294int drm_buddy_module_init(void);
1295void drm_buddy_module_exit(void);
1296
1297void
1298drm_attach(struct device *parent, struct device *self, void *aux)
1299{
1300 struct drm_softc *sc = (struct drm_softc *)self;
1301 struct drm_attach_args *da = aux;
1302 struct drm_device *dev = da->drm;
1303 int ret;
1304
1305 if (drm_refcnt == 0) {
1306 drm_linux_init();
1307 drm_core_init();
1308 drm_buddy_module_init();
1309 }
1310 drm_refcnt++;
1311
1312 if (dev == NULL((void *)0)) {
1313 dev = malloc(sizeof(struct drm_device), M_DRM145,
1314 M_WAITOK0x0001 | M_ZERO0x0008);
1315 sc->sc_allocated = 1;
1316 }
1317
1318 sc->sc_drm = dev;
1319
1320 kref_init(&dev->ref);
1321 dev->dev = self;
1322 dev->dev_private = parent;
1323 dev->driver = da->driver;
1324
1325 INIT_LIST_HEAD(&dev->managed.resources);
1326 mtx_init(&dev->managed.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev->
managed.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9
) ? 0x9 : ((0x9)))); } while (0)
;
1327
1328 /* no per-device feature limits by default */
1329 dev->driver_features = ~0u;
1330
1331 dev->dmat = da->dmat;
1332 dev->bst = da->bst;
1333 dev->unique = da->busid;
1334
1335 if (da->pa) {
1336 struct pci_attach_args *pa = da->pa;
1337 pcireg_t subsys;
1338
1339 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag,
1340 PCI_SUBSYS_ID_REG0x2c);
1341
1342 dev->pdev = &dev->_pdev;
1343 dev->pdev->vendor = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff);
1344 dev->pdev->device = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff);
1345 dev->pdev->subsystem_vendor = PCI_VENDOR(subsys)(((subsys) >> 0) & 0xffff);
1346 dev->pdev->subsystem_device = PCI_PRODUCT(subsys)(((subsys) >> 16) & 0xffff);
1347 dev->pdev->revision = PCI_REVISION(pa->pa_class)(((pa->pa_class) >> 0) & 0xff);
1348 dev->pdev->class = (PCI_CLASS(pa->pa_class)(((pa->pa_class) >> 24) & 0xff) << 16) |
1349 (PCI_SUBCLASS(pa->pa_class)(((pa->pa_class) >> 16) & 0xff) << 8) |
1350 PCI_INTERFACE(pa->pa_class)(((pa->pa_class) >> 8) & 0xff);
1351
1352 dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function)((pa->pa_device) << 3 | (pa->pa_function));
1353 dev->pdev->bus = &dev->pdev->_bus;
1354 dev->pdev->bus->pc = pa->pa_pc;
1355 dev->pdev->bus->number = pa->pa_bus;
1356 dev->pdev->bus->domain_nr = pa->pa_domain;
1357 dev->pdev->bus->bridgetag = pa->pa_bridgetag;
1358
1359 if (pa->pa_bridgetag != NULL((void *)0)) {
1360 dev->pdev->bus->self = malloc(sizeof(struct pci_dev),
1361 M_DRM145, M_WAITOK0x0001 | M_ZERO0x0008);
1362 dev->pdev->bus->self->pc = pa->pa_pc;
1363 dev->pdev->bus->self->tag = *pa->pa_bridgetag;
1364 }
1365
1366 dev->pdev->pc = pa->pa_pc;
1367 dev->pdev->tag = pa->pa_tag;
1368 dev->pdev->pci = (struct pci_softc *)parent->dv_parent;
1369
1370#ifdef CONFIG_ACPI1
1371 dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag);
1372 aml_register_notify(dev->pdev->dev.node, NULL((void *)0),
1373 drm_linux_acpi_notify, NULL((void *)0), ACPIDEV_NOPOLL0);
1374#endif
1375 }
1376
1377 mtx_init(&dev->quiesce_mtx, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev->
quiesce_mtx), ((((0x0)) > 0x0 && ((0x0)) < 0x9)
? 0x9 : ((0x0)))); } while (0)
;
1378 mtx_init(&dev->event_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev->
event_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ?
0x9 : ((0x9)))); } while (0)
;
1379 rw_init(&dev->struct_mutex, "drmdevlk")_rw_init_flags(&dev->struct_mutex, "drmdevlk", 0, ((void
*)0))
;
1380 rw_init(&dev->filelist_mutex, "drmflist")_rw_init_flags(&dev->filelist_mutex, "drmflist", 0, ((
void *)0))
;
1381 rw_init(&dev->clientlist_mutex, "drmclist")_rw_init_flags(&dev->clientlist_mutex, "drmclist", 0, (
(void *)0))
;
1382 rw_init(&dev->master_mutex, "drmmast")_rw_init_flags(&dev->master_mutex, "drmmast", 0, ((void
*)0))
;
1383
1384 ret = drmm_add_action(dev, drm_dev_init_release, NULL((void *)0));
1385 if (ret)
1386 goto error;
1387
1388 SPLAY_INIT(&dev->files)do { (&dev->files)->sph_root = ((void *)0); } while
(0)
;
1389 INIT_LIST_HEAD(&dev->filelist_internal);
1390 INIT_LIST_HEAD(&dev->clientlist);
1391 INIT_LIST_HEAD(&dev->vblank_event_list);
1392
1393 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
1394 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
1395 if (ret)
1396 goto error;
1397 }
1398
1399 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
1400 if (ret)
1401 goto error;
1402
1403#ifdef CONFIG_DRM_LEGACY
1404 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
1405#if IS_ENABLED(CONFIG_AGP)1
1406 if (da->is_agp)
1407 dev->agp = drm_agp_init();
1408#endif
1409 if (dev->agp != NULL((void *)0)) {
1410 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
1411 dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1)) == 0)
1412 dev->agp->mtrr = 1;
1413 }
1414 }
1415#endif
1416
1417 if (dev->driver->gem_size > 0) {
1418 KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object))((dev->driver->gem_size >= sizeof(struct drm_gem_object
)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_drv.c"
, 1418, "dev->driver->gem_size >= sizeof(struct drm_gem_object)"
))
;
1419 /* XXX unique name */
1420 pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE0x0, 0,
1421 "drmobjpl", NULL((void *)0));
1422 }
1423
1424 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1425 ret = drm_gem_init(dev);
1426 if (ret) {
1427 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n"
)
;
1428 goto error;
1429 }
1430 }
1431
1432 drmm_add_final_kfree(dev, dev);
1433
1434 printf("\n");
1435 return;
1436
1437error:
1438 drm_managed_release(dev);
1439 dev->dev_private = NULL((void *)0);
1440}
1441
1442int
1443drm_detach(struct device *self, int flags)
1444{
1445 struct drm_softc *sc = (struct drm_softc *)self;
1446 struct drm_device *dev = sc->sc_drm;
1447
1448 drm_refcnt--;
1449 if (drm_refcnt == 0) {
1450 drm_buddy_module_exit();
1451 drm_core_exit();
1452 drm_linux_exit();
1453 }
1454
1455 drm_lastclose(dev);
1456
1457 if (drm_core_check_feature(dev, DRIVER_GEM)) {
1458 if (dev->driver->gem_size > 0)
1459 pool_destroy(&dev->objpl);
1460 }
1461
1462#ifdef CONFIG_DRM_LEGACY
1463 if (dev->agp && dev->agp->mtrr) {
1464 int retcode;
1465
1466 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
1467 dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1));
1468 DRM_DEBUG("mtrr_del = %d", retcode)___drm_dbg(((void *)0), DRM_UT_CORE, "mtrr_del = %d", retcode
)
;
1469 }
1470
1471 free(dev->agp, M_DRM145, 0);
1472#endif
1473 if (dev->pdev && dev->pdev->bus)
1474 free(dev->pdev->bus->self, M_DRM145, sizeof(struct pci_dev));
1475
1476 if (sc->sc_allocated)
1477 free(dev, M_DRM145, sizeof(struct drm_device));
1478
1479 return 0;
1480}
1481
1482void
1483drm_quiesce(struct drm_device *dev)
1484{
1485 mtx_enter(&dev->quiesce_mtx);
1486 dev->quiesce = 1;
1487 while (dev->quiesce_count > 0) {
1488 msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx,
1489 PZERO22, "drmqui", INFSLP0xffffffffffffffffULL);
1490 }
1491 mtx_leave(&dev->quiesce_mtx);
1492}
1493
1494void
1495drm_wakeup(struct drm_device *dev)
1496{
1497 mtx_enter(&dev->quiesce_mtx);
1498 dev->quiesce = 0;
1499 wakeup(&dev->quiesce);
1500 mtx_leave(&dev->quiesce_mtx);
1501}
1502
1503int
1504drm_activate(struct device *self, int act)
1505{
1506 struct drm_softc *sc = (struct drm_softc *)self;
1507 struct drm_device *dev = sc->sc_drm;
1508
1509 switch (act) {
1510 case DVACT_QUIESCE2:
1511 drm_quiesce(dev);
1512 break;
1513 case DVACT_WAKEUP5:
1514 drm_wakeup(dev);
1515 break;
1516 }
1517
1518 return (0);
1519}
1520
1521const struct cfattach drm_ca = {
1522 sizeof(struct drm_softc), drm_probe, drm_attach,
1523 drm_detach, drm_activate
1524};
1525
1526struct cfdriver drm_cd = {
1527 0, "drm", DV_DULL
1528};
1529
1530const struct pci_device_id *
1531drm_find_description(int vendor, int device, const struct pci_device_id *idlist)
1532{
1533 int i = 0;
1534
1535 for (i = 0; idlist[i].vendor != 0; i++) {
1536 if ((idlist[i].vendor == vendor) &&
1537 (idlist[i].device == device ||
1538 idlist[i].device == PCI_ANY_ID(uint16_t) (~0U)) &&
1539 (idlist[i].subvendor == PCI_ANY_ID(uint16_t) (~0U)) &&
1540 (idlist[i].subdevice == PCI_ANY_ID(uint16_t) (~0U)))
1541 return &idlist[i];
1542 }
1543 return NULL((void *)0);
1544}
1545
1546int
1547drm_file_cmp(struct drm_file *f1, struct drm_file *f2)
1548{
1549 return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor);
1550}
1551
1552SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp)struct drm_file * drm_file_tree_SPLAY_INSERT(struct drm_file_tree
*head, struct drm_file *elm) { if (((head)->sph_root == (
(void *)0))) { (elm)->link.spe_left = (elm)->link.spe_right
= ((void *)0); } else { int __comp; drm_file_tree_SPLAY(head
, elm); __comp = (drm_file_cmp)(elm, (head)->sph_root); if
(__comp < 0) { (elm)->link.spe_left = ((head)->sph_root
)->link.spe_left; (elm)->link.spe_right = (head)->sph_root
; ((head)->sph_root)->link.spe_left = ((void *)0); } else
if (__comp > 0) { (elm)->link.spe_right = ((head)->
sph_root)->link.spe_right; (elm)->link.spe_left = (head
)->sph_root; ((head)->sph_root)->link.spe_right = ((
void *)0); } else return ((head)->sph_root); } (head)->
sph_root = (elm); return (((void *)0)); } struct drm_file * drm_file_tree_SPLAY_REMOVE
(struct drm_file_tree *head, struct drm_file *elm) { struct drm_file
*__tmp; if (((head)->sph_root == ((void *)0))) return (((
void *)0)); drm_file_tree_SPLAY(head, elm); if ((drm_file_cmp
)(elm, (head)->sph_root) == 0) { if (((head)->sph_root)
->link.spe_left == ((void *)0)) { (head)->sph_root = ((
head)->sph_root)->link.spe_right; } else { __tmp = ((head
)->sph_root)->link.spe_right; (head)->sph_root = ((head
)->sph_root)->link.spe_left; drm_file_tree_SPLAY(head, elm
); ((head)->sph_root)->link.spe_right = __tmp; } return
(elm); } return (((void *)0)); } void drm_file_tree_SPLAY(struct
drm_file_tree *head, struct drm_file *elm) { struct drm_file
__node, *__left, *__right, *__tmp; int __comp; (&__node)
->link.spe_left = (&__node)->link.spe_right = ((void
*)0); __left = __right = &__node; while ((__comp = (drm_file_cmp
)(elm, (head)->sph_root))) { if (__comp < 0) { __tmp = (
(head)->sph_root)->link.spe_left; if (__tmp == ((void *
)0)) break; if ((drm_file_cmp)(elm, __tmp) < 0){ do { ((head
)->sph_root)->link.spe_left = (__tmp)->link.spe_right
; (__tmp)->link.spe_right = (head)->sph_root; (head)->
sph_root = __tmp; } while (0); if (((head)->sph_root)->
link.spe_left == ((void *)0)) break; } do { (__right)->link
.spe_left = (head)->sph_root; __right = (head)->sph_root
; (head)->sph_root = ((head)->sph_root)->link.spe_left
; } while (0); } else if (__comp > 0) { __tmp = ((head)->
sph_root)->link.spe_right; if (__tmp == ((void *)0)) break
; if ((drm_file_cmp)(elm, __tmp) > 0){ do { ((head)->sph_root
)->link.spe_right = (__tmp)->link.spe_left; (__tmp)->
link.spe_left = (head)->sph_root; (head)->sph_root = __tmp
; } while (0); if (((head)->sph_root)->link.spe_right ==
((void *)0)) break; } do { (__left)->link.spe_right = (head
)->sph_root; __left = (head)->sph_root; (head)->sph_root
= ((head)->sph_root)->link.spe_right; } while (0); } }
do { (__left)->link.spe_right = ((head)->sph_root)->
link.spe_left; (__right)->link.spe_left = ((head)->sph_root
)->link.spe_right; ((head)->sph_root)->link.spe_left
= (&__node)->link.spe_right; ((head)->sph_root)->
link.spe_right = (&__node)->link.spe_left; } while (0)
; } void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *head
, int __comp) { struct drm_file __node, *__left, *__right, *__tmp
; (&__node)->link.spe_left = (&__node)->link.spe_right
= ((void *)0); __left = __right = &__node; while (1) { if
(__comp < 0) { __tmp = ((head)->sph_root)->link.spe_left
; if (__tmp == ((void *)0)) break; if (__comp < 0){ do { (
(head)->sph_root)->link.spe_left = (__tmp)->link.spe_right
; (__tmp)->link.spe_right = (head)->sph_root; (head)->
sph_root = __tmp; } while (0); if (((head)->sph_root)->
link.spe_left == ((void *)0)) break; } do { (__right)->link
.spe_left = (head)->sph_root; __right = (head)->sph_root
; (head)->sph_root = ((head)->sph_root)->link.spe_left
; } while (0); } else if (__comp > 0) { __tmp = ((head)->
sph_root)->link.spe_right; if (__tmp == ((void *)0)) break
; if (__comp > 0) { do { ((head)->sph_root)->link.spe_right
= (__tmp)->link.spe_left; (__tmp)->link.spe_left = (head
)->sph_root; (head)->sph_root = __tmp; } while (0); if (
((head)->sph_root)->link.spe_right == ((void *)0)) break
; } do { (__left)->link.spe_right = (head)->sph_root; __left
= (head)->sph_root; (head)->sph_root = ((head)->sph_root
)->link.spe_right; } while (0); } } do { (__left)->link
.spe_right = ((head)->sph_root)->link.spe_left; (__right
)->link.spe_left = ((head)->sph_root)->link.spe_right
; ((head)->sph_root)->link.spe_left = (&__node)->
link.spe_right; ((head)->sph_root)->link.spe_right = (&
__node)->link.spe_left; } while (0); }
;
1553
1554struct drm_file *
1555drm_find_file_by_minor(struct drm_device *dev, int minor)
1556{
1557 struct drm_file key;
1558
1559 key.fminor = minor;
1560 return (SPLAY_FIND(drm_file_tree, &dev->files, &key)drm_file_tree_SPLAY_FIND(&dev->files, &key));
1561}
1562
1563struct drm_device *
1564drm_get_device_from_kdev(dev_t kdev)
1565{
1566 int unit = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
& ((1 << CLONE_SHIFT8) - 1);
1567 /* render */
1568 if (unit >= 128)
1569 unit -= 128;
1570 struct drm_softc *sc;
1571
1572 if (unit < drm_cd.cd_ndevs) {
1573 sc = (struct drm_softc *)drm_cd.cd_devs[unit];
1574 if (sc)
1575 return sc->sc_drm;
1576 }
1577
1578 return NULL((void *)0);
1579}
1580
1581void
1582filt_drmdetach(struct knote *kn)
1583{
1584 struct drm_device *dev = kn->kn_hook;
1585 int s;
1586
1587 s = spltty()splraise(0x9);
1588 klist_remove_locked(&dev->note, kn);
1589 splx(s)spllower(s);
1590}
1591
1592int
1593filt_drmkms(struct knote *kn, long hint)
1594{
1595 if (kn->kn_sfflags & hint)
1596 kn->kn_fflagskn_kevent.fflags |= hint;
1597 return (kn->kn_fflagskn_kevent.fflags != 0);
1598}
1599
1600void
1601filt_drmreaddetach(struct knote *kn)
1602{
1603 struct drm_file *file_priv = kn->kn_hook;
1604 int s;
1605
1606 s = spltty()splraise(0x9);
1607 klist_remove_locked(&file_priv->rsel.si_note, kn);
1608 splx(s)spllower(s);
1609}
1610
1611int
1612filt_drmread(struct knote *kn, long hint)
1613{
1614 struct drm_file *file_priv = kn->kn_hook;
1615 int val = 0;
1616
1617 if ((hint & NOTE_SUBMIT0x01000000) == 0)
1618 mtx_enter(&file_priv->minor->dev->event_lock);
1619 val = !list_empty(&file_priv->event_list);
1620 if ((hint & NOTE_SUBMIT0x01000000) == 0)
1621 mtx_leave(&file_priv->minor->dev->event_lock);
1622 return (val);
1623}
1624
1625const struct filterops drm_filtops = {
1626 .f_flags = FILTEROP_ISFD0x00000001,
1627 .f_attach = NULL((void *)0),
1628 .f_detach = filt_drmdetach,
1629 .f_event = filt_drmkms,
1630};
1631
1632const struct filterops drmread_filtops = {
1633 .f_flags = FILTEROP_ISFD0x00000001,
1634 .f_attach = NULL((void *)0),
1635 .f_detach = filt_drmreaddetach,
1636 .f_event = filt_drmread,
1637};
1638
1639int
1640drmkqfilter(dev_t kdev, struct knote *kn)
1641{
1642 struct drm_device *dev = NULL((void *)0);
1643 struct drm_file *file_priv = NULL((void *)0);
1644 int s;
1645
1646 dev = drm_get_device_from_kdev(kdev);
1647 if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0))
1648 return (ENXIO6);
1649
1650 switch (kn->kn_filterkn_kevent.filter) {
1651 case EVFILT_READ(-1):
1652 mutex_lock(&dev->struct_mutex)rw_enter_write(&dev->struct_mutex);
1653 file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
);
1654 mutex_unlock(&dev->struct_mutex)rw_exit_write(&dev->struct_mutex);
1655 if (file_priv == NULL((void *)0))
1656 return (ENXIO6);
1657
1658 kn->kn_fop = &drmread_filtops;
1659 kn->kn_hook = file_priv;
1660
1661 s = spltty()splraise(0x9);
1662 klist_insert_locked(&file_priv->rsel.si_note, kn);
1663 splx(s)spllower(s);
1664 break;
1665 case EVFILT_DEVICE(-8):
1666 kn->kn_fop = &drm_filtops;
1667 kn->kn_hook = dev;
1668
1669 s = spltty()splraise(0x9);
1670 klist_insert_locked(&dev->note, kn);
1671 splx(s)spllower(s);
1672 break;
1673 default:
1674 return (EINVAL22);
1675 }
1676
1677 return (0);
1678}
1679
1680int
1681drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
1682{
1683 struct drm_device *dev = NULL((void *)0);
1684 struct drm_file *file_priv;
1685 struct drm_minor *dm;
1686 int ret = 0;
1687 int dminor, realminor, minor_type;
1688 int need_setup = 0;
1689
1690 dev = drm_get_device_from_kdev(kdev);
1691 if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0))
1692 return (ENXIO6);
1693
1694 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))___drm_dbg(((void *)0), DRM_UT_CORE, "open_count = %d\n", ({ typeof
(*(&dev->open_count)) __tmp = *(volatile typeof(*(&
dev->open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer
(); __tmp; }))
;
1695
1696 if (flags & O_EXCL0x0800)
1697 return (EBUSY16); /* No exclusive opens */
1698
1699 if (drm_dev_needs_global_mutex(dev))
1700 mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex);
1701
1702 if (!atomic_fetch_inc(&dev->open_count)__sync_fetch_and_add(&dev->open_count, 1))
1703 need_setup = 1;
1704
1705 dminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
;
1706 realminor = dminor & ((1 << CLONE_SHIFT8) - 1);
1707 if (realminor < 64)
1708 minor_type = DRM_MINOR_PRIMARY;
1709 else if (realminor >= 128 && realminor < 192)
1710 minor_type = DRM_MINOR_RENDER;
1711 else {
1712 ret = ENXIO6;
1713 goto err;
1714 }
1715
1716 dm = *drm_minor_get_slot(dev, minor_type);
1717 if (dm == NULL((void *)0)) {
1718 ret = ENXIO6;
1719 goto err;
1720 }
1721 dm->index = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
;
1722
1723 file_priv = drm_file_alloc(dm);
1724 if (IS_ERR(file_priv)) {
1725 ret = ENOMEM12;
1726 goto err;
1727 }
1728
1729 /* first opener automatically becomes master */
1730 if (drm_is_primary_client(file_priv)) {
1731 ret = drm_master_open(file_priv);
1732 if (ret != 0)
1733 goto out_file_free;
1734 }
1735
1736 file_priv->filp = (void *)file_priv;
1737 file_priv->fminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
;
1738
1739 mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex);
1740 SPLAY_INSERT(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_INSERT(&dev->files, file_priv);
1741 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
1742
1743 if (need_setup) {
1744 ret = drm_legacy_setup(dev);
1745 if (ret)
1746 goto out_file_free;
1747 }
1748
1749 if (drm_dev_needs_global_mutex(dev))
1750 mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex);
1751
1752 return 0;
1753
1754out_file_free:
1755 drm_file_free(file_priv);
1756err:
1757 atomic_dec(&dev->open_count)__sync_fetch_and_sub(&dev->open_count, 1);
1758 if (drm_dev_needs_global_mutex(dev))
1759 mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex);
1760 return (ret);
1761}
1762
1763int
1764drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
1765{
1766 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1767 struct drm_file *file_priv;
1768 int retcode = 0;
1769
1770 if (dev == NULL((void *)0))
1771 return (ENXIO6);
1772
1773 if (drm_dev_needs_global_mutex(dev))
1774 mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex);
1775
1776 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))___drm_dbg(((void *)0), DRM_UT_CORE, "open_count = %d\n", ({ typeof
(*(&dev->open_count)) __tmp = *(volatile typeof(*(&
dev->open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer
(); __tmp; }))
;
1777
1778 mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex);
1779 file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
);
1780 if (file_priv == NULL((void *)0)) {
1781 DRM_ERROR("can't find authenticator\n")__drm_err("can't find authenticator\n");
1782 retcode = EINVAL22;
1783 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
1784 goto done;
1785 }
1786
1787 SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_REMOVE(&dev->files, file_priv);
1788 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
1789 drm_file_free(file_priv);
1790done:
1791 if (atomic_dec_and_test(&dev->open_count)(__sync_sub_and_fetch((&dev->open_count), 1) == 0))
1792 drm_lastclose(dev);
1793
1794 if (drm_dev_needs_global_mutex(dev))
1795 mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex);
1796
1797 return (retcode);
1798}
1799
1800int
1801drmread(dev_t kdev, struct uio *uio, int ioflag)
1802{
1803 struct drm_device *dev = drm_get_device_from_kdev(kdev);
1804 struct drm_file *file_priv;
1805 struct drm_pending_event *ev;
1806 int error = 0;
1807
1808 if (dev == NULL((void *)0))
1809 return (ENXIO6);
1810
1811 mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex);
1812 file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >>
8))
);
1813 mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex);
1814 if (file_priv == NULL((void *)0))
1815 return (ENXIO6);
1816
1817 /*
1818 * The semantics are a little weird here. We will wait until we
1819 * have events to process, but as soon as we have events we will
1820 * only deliver as many as we have.
1821 * Note that events are atomic, if the read buffer will not fit in
1822 * a whole event, we won't read any of it out.
1823 */
1824 mtx_enter(&dev->event_lock);
1825 while (error == 0 && list_empty(&file_priv->event_list)) {
1826 if (ioflag & IO_NDELAY0x10) {
1827 mtx_leave(&dev->event_lock);
1828 return (EAGAIN35);
1829 }
1830 error = msleep_nsec(&file_priv->event_wait, &dev->event_lock,
1831 PWAIT32 | PCATCH0x100, "drmread", INFSLP0xffffffffffffffffULL);
1832 }
1833 if (error) {
1834 mtx_leave(&dev->event_lock);
1835 return (error);
1836 }
1837 while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) {
1838 MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})) && !(panicstr || db_active)) panic("mutex %p held in %s"
, (&dev->event_lock), __func__); } while (0)
;
1839 /* XXX we always destroy the event on error. */
1840 error = uiomove(ev->event, ev->event->length, uio);
1841 kfree(ev);
1842 if (error)
1843 break;
1844 mtx_enter(&dev->event_lock);
1845 }
1846 MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})) && !(panicstr || db_active)) panic("mutex %p held in %s"
, (&dev->event_lock), __func__); } while (0)
;
1847
1848 return (error);
1849}
1850
1851/*
1852 * Deqeue an event from the file priv in question. returning 1 if an
1853 * event was found. We take the resid from the read as a parameter because
1854 * we will only dequeue and event if the read buffer has space to fit the
1855 * entire thing.
1856 *
1857 * We are called locked, but we will *unlock* the queue on return so that
1858 * we may sleep to copyout the event.
1859 */
1860int
1861drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
1862 size_t resid, struct drm_pending_event **out)
1863{
1864 struct drm_pending_event *e = NULL((void *)0);
1865 int gotone = 0;
1866
1867 MUTEX_ASSERT_LOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner != ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})) && !(panicstr || db_active)) panic("mutex %p not held in %s"
, (&dev->event_lock), __func__); } while (0)
;
1868
1869 *out = NULL((void *)0);
1870 if (list_empty(&file_priv->event_list))
1871 goto out;
1872 e = list_first_entry(&file_priv->event_list,({ const __typeof( ((struct drm_pending_event *)0)->link )
*__mptr = ((&file_priv->event_list)->next); (struct
drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct
drm_pending_event, link) );})
1873 struct drm_pending_event, link)({ const __typeof( ((struct drm_pending_event *)0)->link )
*__mptr = ((&file_priv->event_list)->next); (struct
drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct
drm_pending_event, link) );})
;
1874 if (e->event->length > resid)
1875 goto out;
1876
1877 file_priv->event_space += e->event->length;
1878 list_del(&e->link);
1879 *out = e;
1880 gotone = 1;
1881
1882out:
1883 mtx_leave(&dev->event_lock);
1884
1885 return (gotone);
1886}
1887
1888paddr_t
1889drmmmap(dev_t kdev, off_t offset, int prot)
1890{
1891 return -1;
1892}
1893
1894struct drm_dmamem *
1895drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment,
1896 int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags)
1897{
1898 struct drm_dmamem *mem;
1899 size_t strsize;
1900 /*
1901 * segs is the last member of the struct since we modify the size
1902 * to allow extra segments if more than one are allowed.
1903 */
1904 strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1));
1905 mem = malloc(strsize, M_DRM145, M_NOWAIT0x0002 | M_ZERO0x0008);
1906 if (mem == NULL((void *)0))
1907 return (NULL((void *)0));
1908
1909 mem->size = size;
1910
1911 if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz
), (0), (0x0001 | 0x0002), (&mem->map))
1912 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map)(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz
), (0), (0x0001 | 0x0002), (&mem->map))
!= 0)
1913 goto strfree;
1914
1915 if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0),
(mem->segs), (nsegments), (&mem->nsegs), (0x0001 |
0x1000))
1916 &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0),
(mem->segs), (nsegments), (&mem->nsegs), (0x0001 |
0x1000))
!= 0)
1917 goto destroy;
1918
1919 if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs
), (size), (&mem->kva), (0x0001 | mapflags))
1920 &mem->kva, BUS_DMA_NOWAIT | mapflags)(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs
), (size), (&mem->kva), (0x0001 | mapflags))
!= 0)
1921 goto free;
1922
1923 if (bus_dmamap_load(dmat, mem->map, mem->kva, size,(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva
), (size), (((void *)0)), (0x0001 | loadflags))
1924 NULL, BUS_DMA_NOWAIT | loadflags)(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva
), (size), (((void *)0)), (0x0001 | loadflags))
!= 0)
1925 goto unmap;
1926
1927 return (mem);
1928
1929unmap:
1930 bus_dmamem_unmap(dmat, mem->kva, size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (size));
1931free:
1932 bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs
))
;
1933destroy:
1934 bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map));
1935strfree:
1936 free(mem, M_DRM145, 0);
1937
1938 return (NULL((void *)0));
1939}
1940
1941void
1942drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem)
1943{
1944 if (mem == NULL((void *)0))
1945 return;
1946
1947 bus_dmamap_unload(dmat, mem->map)(*(dmat)->_dmamap_unload)((dmat), (mem->map));
1948 bus_dmamem_unmap(dmat, mem->kva, mem->size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (mem->size
))
;
1949 bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs
))
;
1950 bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map));
1951 free(mem, M_DRM145, 0);
1952}
1953
1954struct drm_dma_handle *
1955drm_pci_alloc(struct drm_device *dev, size_t size, size_t align)
1956{
1957 struct drm_dma_handle *dmah;
1958
1959 dmah = malloc(sizeof(*dmah), M_DRM145, M_WAITOK0x0001);
1960 dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size,
1961 BUS_DMA_NOCACHE0x0800, 0);
1962 if (dmah->mem == NULL((void *)0)) {
1963 free(dmah, M_DRM145, sizeof(*dmah));
1964 return NULL((void *)0);
1965 }
1966 dmah->busaddr = dmah->mem->segs[0].ds_addr;
1967 dmah->size = dmah->mem->size;
1968 dmah->vaddr = dmah->mem->kva;
1969 return (dmah);
1970}
1971
1972void
1973drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah)
1974{
1975 if (dmah == NULL((void *)0))
1976 return;
1977
1978 drm_dmamem_free(dev->dmat, dmah->mem);
1979 free(dmah, M_DRM145, sizeof(*dmah));
1980}
1981
1982/*
1983 * Compute order. Can be made faster.
1984 */
1985int
1986drm_order(unsigned long size)
1987{
1988 int order;
1989 unsigned long tmp;
1990
1991 for (order = 0, tmp = size; tmp >>= 1; ++order)
1992 ;
1993
1994 if (size & ~(1 << order))
1995 ++order;
1996
1997 return order;
1998}
1999
2000int
2001drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv)
2002{
2003 struct drm_pciinfo *info = data;
2004
2005 if (dev->pdev == NULL((void *)0))
2006 return -ENOTTY25;
2007
2008 info->domain = dev->pdev->bus->domain_nr;
2009 info->bus = dev->pdev->bus->number;
2010 info->dev = PCI_SLOT(dev->pdev->devfn)((dev->pdev->devfn) >> 3);
2011 info->func = PCI_FUNC(dev->pdev->devfn)((dev->pdev->devfn) & 0x7);
2012 info->vendor_id = dev->pdev->vendor;
2013 info->device_id = dev->pdev->device;
2014 info->subvendor_id = dev->pdev->subsystem_vendor;
2015 info->subdevice_id = dev->pdev->subsystem_device;
2016 info->revision_id = 0;
2017
2018 return 0;
2019}

/usr/src/sys/dev/pci/drm/include/drm/drm_drv.h

1/*
2 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * Copyright (c) 2009-2010, Code Aurora Forum.
5 * Copyright 2016 Intel Corp.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#ifndef _DRM_DRV_H_
28#define _DRM_DRV_H_
29
30#include <linux/list.h>
31#include <linux/irqreturn.h>
32
33#include <drm/drm_device.h>
34
35#include <uvm/uvm_extern.h>
36
37struct drm_file;
38struct drm_gem_object;
39struct drm_master;
40struct drm_minor;
41struct dma_buf;
42struct dma_buf_attachment;
43struct drm_display_mode;
44struct drm_mode_create_dumb;
45struct drm_printer;
46struct sg_table;
47
48/**
49 * enum drm_driver_feature - feature flags
50 *
51 * See &drm_driver.driver_features, drm_device.driver_features and
52 * drm_core_check_feature().
53 */
54enum drm_driver_feature {
55 /**
56 * @DRIVER_GEM:
57 *
58 * Driver use the GEM memory manager. This should be set for all modern
59 * drivers.
60 */
61 DRIVER_GEM = BIT(0)(1UL << (0)),
62 /**
63 * @DRIVER_MODESET:
64 *
65 * Driver supports mode setting interfaces (KMS).
66 */
67 DRIVER_MODESET = BIT(1)(1UL << (1)),
68 /**
69 * @DRIVER_RENDER:
70 *
71 * Driver supports dedicated render nodes. See also the :ref:`section on
72 * render nodes <drm_render_node>` for details.
73 */
74 DRIVER_RENDER = BIT(3)(1UL << (3)),
75 /**
76 * @DRIVER_ATOMIC:
77 *
78 * Driver supports the full atomic modesetting userspace API. Drivers
79 * which only use atomic internally, but do not support the full
80 * userspace API (e.g. not all properties converted to atomic, or
81 * multi-plane updates are not guaranteed to be tear-free) should not
82 * set this flag.
83 */
84 DRIVER_ATOMIC = BIT(4)(1UL << (4)),
85 /**
86 * @DRIVER_SYNCOBJ:
87 *
88 * Driver supports &drm_syncobj for explicit synchronization of command
89 * submission.
90 */
91 DRIVER_SYNCOBJ = BIT(5)(1UL << (5)),
92 /**
93 * @DRIVER_SYNCOBJ_TIMELINE:
94 *
95 * Driver supports the timeline flavor of &drm_syncobj for explicit
96 * synchronization of command submission.
97 */
98 DRIVER_SYNCOBJ_TIMELINE = BIT(6)(1UL << (6)),
99
100 /* IMPORTANT: Below are all the legacy flags, add new ones above. */
101
102 /**
103 * @DRIVER_USE_AGP:
104 *
105 * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage
106 * AGP resources. New drivers don't need this.
107 */
108 DRIVER_USE_AGP = BIT(25)(1UL << (25)),
109 /**
110 * @DRIVER_LEGACY:
111 *
112 * Denote a legacy driver using shadow attach. Do not use.
113 */
114 DRIVER_LEGACY = BIT(26)(1UL << (26)),
115 /**
116 * @DRIVER_PCI_DMA:
117 *
118 * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace
119 * will be enabled. Only for legacy drivers. Do not use.
120 */
121 DRIVER_PCI_DMA = BIT(27)(1UL << (27)),
122 /**
123 * @DRIVER_SG:
124 *
125 * Driver can perform scatter/gather DMA, allocation and mapping of
126 * scatter/gather buffers will be enabled. Only for legacy drivers. Do
127 * not use.
128 */
129 DRIVER_SG = BIT(28)(1UL << (28)),
130
131 /**
132 * @DRIVER_HAVE_DMA:
133 *
134 * Driver supports DMA, the userspace DMA API will be supported. Only
135 * for legacy drivers. Do not use.
136 */
137 DRIVER_HAVE_DMA = BIT(29)(1UL << (29)),
138 /**
139 * @DRIVER_HAVE_IRQ:
140 *
141 * Legacy irq support. Only for legacy drivers. Do not use.
142 */
143 DRIVER_HAVE_IRQ = BIT(30)(1UL << (30)),
144 /**
145 * @DRIVER_KMS_LEGACY_CONTEXT:
146 *
147 * Used only by nouveau for backwards compatibility with existing
148 * userspace. Do not use.
149 */
150 DRIVER_KMS_LEGACY_CONTEXT = BIT(31)(1UL << (31)),
151};
152
153/**
154 * struct drm_driver - DRM driver structure
155 *
156 * This structure represent the common code for a family of cards. There will be
157 * one &struct drm_device for each card present in this family. It contains lots
158 * of vfunc entries, and a pile of those probably should be moved to more
159 * appropriate places like &drm_mode_config_funcs or into a new operations
160 * structure for GEM drivers.
161 */
162struct drm_driver {
163 /**
164 * @load:
165 *
166 * Backward-compatible driver callback to complete initialization steps
167 * after the driver is registered. For this reason, may suffer from
168 * race conditions and its use is deprecated for new drivers. It is
169 * therefore only supported for existing drivers not yet converted to
170 * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
171 * proper and race-free way to set up a &struct drm_device.
172 *
173 * This is deprecated, do not use!
174 *
175 * Returns:
176 *
177 * Zero on success, non-zero value on failure.
178 */
179 int (*load) (struct drm_device *, unsigned long flags);
180
181 /**
182 * @open:
183 *
184 * Driver callback when a new &struct drm_file is opened. Useful for
185 * setting up driver-private data structures like buffer allocators,
186 * execution contexts or similar things. Such driver-private resources
187 * must be released again in @postclose.
188 *
189 * Since the display/modeset side of DRM can only be owned by exactly
190 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
191 * there should never be a need to set up any modeset related resources
192 * in this callback. Doing so would be a driver design bug.
193 *
194 * Returns:
195 *
196 * 0 on success, a negative error code on failure, which will be
197 * promoted to userspace as the result of the open() system call.
198 */
199 int (*open) (struct drm_device *, struct drm_file *);
200
201 /**
202 * @postclose:
203 *
204 * One of the driver callbacks when a new &struct drm_file is closed.
205 * Useful for tearing down driver-private data structures allocated in
206 * @open like buffer allocators, execution contexts or similar things.
207 *
208 * Since the display/modeset side of DRM can only be owned by exactly
209 * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
210 * there should never be a need to tear down any modeset related
211 * resources in this callback. Doing so would be a driver design bug.
212 */
213 void (*postclose) (struct drm_device *, struct drm_file *);
214
215 /**
216 * @lastclose:
217 *
218 * Called when the last &struct drm_file has been closed and there's
219 * currently no userspace client for the &struct drm_device.
220 *
221 * Modern drivers should only use this to force-restore the fbdev
222 * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
223 * Anything else would indicate there's something seriously wrong.
224 * Modern drivers can also use this to execute delayed power switching
225 * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
226 * infrastructure.
227 *
228 * This is called after @postclose hook has been called.
229 *
230 * NOTE:
231 *
232 * All legacy drivers use this callback to de-initialize the hardware.
233 * This is purely because of the shadow-attach model, where the DRM
234 * kernel driver does not really own the hardware. Instead ownershipe is
235 * handled with the help of userspace through an inheritedly racy dance
236 * to set/unset the VT into raw mode.
237 *
238 * Legacy drivers initialize the hardware in the @firstopen callback,
239 * which isn't even called for modern drivers.
240 */
241 void (*lastclose) (struct drm_device *);
242
243 /**
244 * @unload:
245 *
246 * Reverse the effects of the driver load callback. Ideally,
247 * the clean up performed by the driver should happen in the
248 * reverse order of the initialization. Similarly to the load
249 * hook, this handler is deprecated and its usage should be
250 * dropped in favor of an open-coded teardown function at the
251 * driver layer. See drm_dev_unregister() and drm_dev_put()
252 * for the proper way to remove a &struct drm_device.
253 *
254 * The unload() hook is called right after unregistering
255 * the device.
256 *
257 */
258 void (*unload) (struct drm_device *);
259
260 /**
261 * @release:
262 *
263 * Optional callback for destroying device data after the final
264 * reference is released, i.e. the device is being destroyed.
265 *
266 * This is deprecated, clean up all memory allocations associated with a
267 * &drm_device using drmm_add_action(), drmm_kmalloc() and related
268 * managed resources functions.
269 */
270 void (*release) (struct drm_device *);
271
272 /**
273 * @master_set:
274 *
275 * Called whenever the minor master is set. Only used by vmwgfx.
276 */
277 void (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
278 bool_Bool from_open);
279 /**
280 * @master_drop:
281 *
282 * Called whenever the minor master is dropped. Only used by vmwgfx.
283 */
284 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
285
286 /**
287 * @debugfs_init:
288 *
289 * Allows drivers to create driver-specific debugfs files.
290 */
291 void (*debugfs_init)(struct drm_minor *minor);
292
293 /**
294 * @gem_create_object: constructor for gem objects
295 *
296 * Hook for allocating the GEM object struct, for use by the CMA
297 * and SHMEM GEM helpers. Returns a GEM object on success, or an
298 * ERR_PTR()-encoded error code otherwise.
299 */
300 struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
301 size_t size);
302
303 /**
304 * @prime_handle_to_fd:
305 *
306 * Main PRIME export function. Should be implemented with
307 * drm_gem_prime_handle_to_fd() for GEM based drivers.
308 *
309 * For an in-depth discussion see :ref:`PRIME buffer sharing
310 * documentation <prime_buffer_sharing>`.
311 */
312 int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
313 uint32_t handle, uint32_t flags, int *prime_fd);
314 /**
315 * @prime_fd_to_handle:
316 *
317 * Main PRIME import function. Should be implemented with
318 * drm_gem_prime_fd_to_handle() for GEM based drivers.
319 *
320 * For an in-depth discussion see :ref:`PRIME buffer sharing
321 * documentation <prime_buffer_sharing>`.
322 */
323 int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
324 int prime_fd, uint32_t *handle);
325
326 /**
327 * @gem_prime_import:
328 *
329 * Import hook for GEM drivers.
330 *
331 * This defaults to drm_gem_prime_import() if not set.
332 */
333 struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
334 struct dma_buf *dma_buf);
335 /**
336 * @gem_prime_import_sg_table:
337 *
338 * Optional hook used by the PRIME helper functions
339 * drm_gem_prime_import() respectively drm_gem_prime_import_dev().
340 */
341 struct drm_gem_object *(*gem_prime_import_sg_table)(
342 struct drm_device *dev,
343 struct dma_buf_attachment *attach,
344 struct sg_table *sgt);
345 /**
346 * @gem_prime_mmap:
347 *
348 * mmap hook for GEM drivers, used to implement dma-buf mmap in the
349 * PRIME helpers.
350 *
351 * This hook only exists for historical reasons. Drivers must use
352 * drm_gem_prime_mmap() to implement it.
353 *
354 * FIXME: Convert all drivers to implement mmap in struct
355 * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
356 * its callers. This hook should be removed afterwards.
357 */
358#ifdef __linux__
359 int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
360#else
361 struct uvm_object *(*gem_prime_mmap)(struct file *, vm_prot_t, voff_t, vsize_t);
362#endif
363
364#ifdef __OpenBSD__1
365 struct uvm_object *(*mmap)(struct file *, vm_prot_t, voff_t, vsize_t);
366 size_t gem_size;
367#endif
368
369 /**
370 * @dumb_create:
371 *
372 * This creates a new dumb buffer in the driver's backing storage manager (GEM,
373 * TTM or something else entirely) and returns the resulting buffer handle. This
374 * handle can then be wrapped up into a framebuffer modeset object.
375 *
376 * Note that userspace is not allowed to use such objects for render
377 * acceleration - drivers must create their own private ioctls for such a use
378 * case.
379 *
380 * Width, height and depth are specified in the &drm_mode_create_dumb
381 * argument. The callback needs to fill the handle, pitch and size for
382 * the created buffer.
383 *
384 * Called by the user via ioctl.
385 *
386 * Returns:
387 *
388 * Zero on success, negative errno on failure.
389 */
390 int (*dumb_create)(struct drm_file *file_priv,
391 struct drm_device *dev,
392 struct drm_mode_create_dumb *args);
393 /**
394 * @dumb_map_offset:
395 *
396 * Allocate an offset in the drm device node's address space to be able to
397 * memory map a dumb buffer.
398 *
399 * The default implementation is drm_gem_create_mmap_offset(). GEM based
400 * drivers must not overwrite this.
401 *
402 * Called by the user via ioctl.
403 *
404 * Returns:
405 *
406 * Zero on success, negative errno on failure.
407 */
408 int (*dumb_map_offset)(struct drm_file *file_priv,
409 struct drm_device *dev, uint32_t handle,
410 uint64_t *offset);
411 /**
412 * @dumb_destroy:
413 *
414 * This destroys the userspace handle for the given dumb backing storage buffer.
415 * Since buffer objects must be reference counted in the kernel a buffer object
416 * won't be immediately freed if a framebuffer modeset object still uses it.
417 *
418 * Called by the user via ioctl.
419 *
420 * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
421 * must not overwrite this.
422 *
423 * Returns:
424 *
425 * Zero on success, negative errno on failure.
426 */
427 int (*dumb_destroy)(struct drm_file *file_priv,
428 struct drm_device *dev,
429 uint32_t handle);
430
431#ifdef __OpenBSD__1
432 int (*gem_fault)(struct drm_gem_object *,
433 struct uvm_faultinfo *, off_t, vaddr_t,
434 vm_page_t *, int, int, vm_prot_t, int);
435#endif
436
437 /** @major: driver major number */
438 int major;
439 /** @minor: driver minor number */
440 int minor;
441 /** @patchlevel: driver patch level */
442 int patchlevel;
443 /** @name: driver name */
444 char *name;
445 /** @desc: driver description */
446 char *desc;
447 /** @date: driver date */
448 char *date;
449
450 /**
451 * @driver_features:
452 * Driver features, see &enum drm_driver_feature. Drivers can disable
453 * some features on a per-instance basis using
454 * &drm_device.driver_features.
455 */
456 u32 driver_features;
457
458 /**
459 * @ioctls:
460 *
461 * Array of driver-private IOCTL description entries. See the chapter on
462 * :ref:`IOCTL support in the userland interfaces
463 * chapter<drm_driver_ioctl>` for the full details.
464 */
465
466 const struct drm_ioctl_desc *ioctls;
467 /** @num_ioctls: Number of entries in @ioctls. */
468 int num_ioctls;
469
470 /**
471 * @fops:
472 *
473 * File operations for the DRM device node. See the discussion in
474 * :ref:`file operations<drm_driver_fops>` for in-depth coverage and
475 * some examples.
476 */
477 const struct file_operations *fops;
478
479#ifdef CONFIG_DRM_LEGACY
480 /* Everything below here is for legacy driver, never use! */
481 /* private: */
482
483 int (*firstopen) (struct drm_device *);
484 void (*preclose) (struct drm_device *, struct drm_file *file_priv);
485 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
486 int (*dma_quiescent) (struct drm_device *);
487 int (*context_dtor) (struct drm_device *dev, int context);
488 irqreturn_t (*irq_handler)(int irq, void *arg);
489 void (*irq_preinstall)(struct drm_device *dev);
490 int (*irq_postinstall)(struct drm_device *dev);
491 void (*irq_uninstall)(struct drm_device *dev);
492 u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
493 int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
494 void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
495 int dev_priv_size;
496#endif
497};
498
499void *__devm_drm_dev_alloc(struct device *parent,
500 const struct drm_driver *driver,
501 size_t size, size_t offset);
502
503/**
504 * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
505 * @parent: Parent device object
506 * @driver: DRM driver
507 * @type: the type of the struct which contains struct &drm_device
508 * @member: the name of the &drm_device within @type.
509 *
510 * This allocates and initialize a new DRM device. No device registration is done.
511 * Call drm_dev_register() to advertice the device to user space and register it
512 * with other core subsystems. This should be done last in the device
513 * initialization sequence to make sure userspace can't access an inconsistent
514 * state.
515 *
516 * The initial ref-count of the object is 1. Use drm_dev_get() and
517 * drm_dev_put() to take and drop further ref-counts.
518 *
519 * It is recommended that drivers embed &struct drm_device into their own device
520 * structure.
521 *
522 * Note that this manages the lifetime of the resulting &drm_device
523 * automatically using devres. The DRM device initialized with this function is
524 * automatically put on driver detach using drm_dev_put().
525 *
526 * RETURNS:
527 * Pointer to new DRM device, or ERR_PTR on failure.
528 */
529#define devm_drm_dev_alloc(parent, driver, type, member)((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), __builtin_offsetof
(type, member)))
\
530 ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
531 offsetof(type, member)__builtin_offsetof(type, member)))
532
533struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
534 struct device *parent);
535int drm_dev_register(struct drm_device *dev, unsigned long flags);
536void drm_dev_unregister(struct drm_device *dev);
537
538void drm_dev_get(struct drm_device *dev);
539void drm_dev_put(struct drm_device *dev);
540void drm_put_dev(struct drm_device *dev);
541bool_Bool drm_dev_enter(struct drm_device *dev, int *idx);
542void drm_dev_exit(int idx);
543void drm_dev_unplug(struct drm_device *dev);
544
545/**
546 * drm_dev_is_unplugged - is a DRM device unplugged
547 * @dev: DRM device
548 *
549 * This function can be called to check whether a hotpluggable is unplugged.
550 * Unplugging itself is singalled through drm_dev_unplug(). If a device is
551 * unplugged, these two functions guarantee that any store before calling
552 * drm_dev_unplug() is visible to callers of this function after it completes
553 *
554 * WARNING: This function fundamentally races against drm_dev_unplug(). It is
555 * recommended that drivers instead use the underlying drm_dev_enter() and
556 * drm_dev_exit() function pairs.
557 */
558static inline bool_Bool drm_dev_is_unplugged(struct drm_device *dev)
559{
560 int idx;
7
'idx' declared without an initial value
561
562 if (drm_dev_enter(dev, &idx)) {
8
Calling 'drm_dev_enter'
10
Returning from 'drm_dev_enter'
11
Taking true branch
563 drm_dev_exit(idx);
12
1st function call argument is an uninitialized value
564 return false0;
565 }
566
567 return true1;
568}
569
570/**
571 * drm_core_check_all_features - check driver feature flags mask
572 * @dev: DRM device to check
573 * @features: feature flag(s) mask
574 *
575 * This checks @dev for driver features, see &drm_driver.driver_features,
576 * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
577 *
578 * Returns true if all features in the @features mask are supported, false
579 * otherwise.
580 */
581static inline bool_Bool drm_core_check_all_features(const struct drm_device *dev,
582 u32 features)
583{
584 u32 supported = dev->driver->driver_features & dev->driver_features;
585
586 return features && (supported & features) == features;
587}
588
589/**
590 * drm_core_check_feature - check driver feature flags
591 * @dev: DRM device to check
592 * @feature: feature flag
593 *
594 * This checks @dev for driver features, see &drm_driver.driver_features,
595 * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
596 *
597 * Returns true if the @feature is supported, false otherwise.
598 */
599static inline bool_Bool drm_core_check_feature(const struct drm_device *dev,
600 enum drm_driver_feature feature)
601{
602 return drm_core_check_all_features(dev, feature);
603}
604
605/**
606 * drm_drv_uses_atomic_modeset - check if the driver implements
607 * atomic_commit()
608 * @dev: DRM device
609 *
610 * This check is useful if drivers do not have DRIVER_ATOMIC set but
611 * have atomic modesetting internally implemented.
612 */
613static inline bool_Bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
614{
615 return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
616 (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL((void *)0));
617}
618
619
620int drm_dev_set_unique(struct drm_device *dev, const char *name);
621
622struct drm_file *drm_find_file_by_minor(struct drm_device *, int);
623struct drm_device *drm_get_device_from_kdev(dev_t);
624
625#ifdef __OpenBSD__1
626
627void drm_attach_platform(struct drm_driver *, bus_space_tag_t, bus_dma_tag_t,
628 struct device *, struct drm_device *);
629struct drm_device *drm_attach_pci(const struct drm_driver *,
630 struct pci_attach_args *, int, int, struct device *, struct drm_device *);
631
632int drm_pciprobe(struct pci_attach_args *, const struct pci_device_id * );
633const struct pci_device_id *drm_find_description(int, int,
634 const struct pci_device_id *);
635
636int drm_getpciinfo(struct drm_device *, void *, struct drm_file *);
637
638#endif
639extern bool_Bool drm_firmware_drivers_only(void);
640
641#endif