| File: | dev/pci/drm/include/drm/drm_drv.h |
| Warning: | line 669, column 3 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | ||||
| 2 | * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org | ||||
| 3 | * | ||||
| 4 | * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. | ||||
| 5 | * All Rights Reserved. | ||||
| 6 | * | ||||
| 7 | * Author Rickard E. (Rik) Faith <faith@valinux.com> | ||||
| 8 | * | ||||
| 9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||
| 10 | * copy of this software and associated documentation files (the "Software"), | ||||
| 11 | * to deal in the Software without restriction, including without limitation | ||||
| 12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| 13 | * and/or sell copies of the Software, and to permit persons to whom the | ||||
| 14 | * Software is furnished to do so, subject to the following conditions: | ||||
| 15 | * | ||||
| 16 | * The above copyright notice and this permission notice (including the next | ||||
| 17 | * paragraph) shall be included in all copies or substantial portions of the | ||||
| 18 | * Software. | ||||
| 19 | * | ||||
| 20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| 21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| 22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||
| 23 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||||
| 24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||||
| 25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||||
| 26 | * DEALINGS IN THE SOFTWARE. | ||||
| 27 | */ | ||||
| 28 | |||||
| 29 | #include <sys/param.h> | ||||
| 30 | #include <sys/fcntl.h> | ||||
| 31 | #include <sys/poll.h> | ||||
| 32 | #include <sys/specdev.h> | ||||
| 33 | #include <sys/vnode.h> | ||||
| 34 | |||||
| 35 | #include <machine/bus.h> | ||||
| 36 | |||||
| 37 | #ifdef __HAVE_ACPI | ||||
| 38 | #include <dev/acpi/acpidev.h> | ||||
| 39 | #include <dev/acpi/acpivar.h> | ||||
| 40 | #include <dev/acpi/dsdt.h> | ||||
| 41 | #endif | ||||
| 42 | |||||
| 43 | #include <linux/debugfs.h> | ||||
| 44 | #include <linux/fs.h> | ||||
| 45 | #include <linux/module.h> | ||||
| 46 | #include <linux/moduleparam.h> | ||||
| 47 | #include <linux/mount.h> | ||||
| 48 | #include <linux/pseudo_fs.h> | ||||
| 49 | #include <linux/slab.h> | ||||
| 50 | #include <linux/srcu.h> | ||||
| 51 | |||||
| 52 | #include <drm/drm_client.h> | ||||
| 53 | #include <drm/drm_color_mgmt.h> | ||||
| 54 | #include <drm/drm_drv.h> | ||||
| 55 | #include <drm/drm_file.h> | ||||
| 56 | #include <drm/drm_managed.h> | ||||
| 57 | #include <drm/drm_mode_object.h> | ||||
| 58 | #include <drm/drm_print.h> | ||||
| 59 | |||||
| 60 | #include <drm/drm_gem.h> | ||||
| 61 | #include <drm/drm_agpsupport.h> | ||||
| 62 | #include <drm/drm_irq.h> | ||||
| 63 | |||||
| 64 | #include "drm_crtc_internal.h" | ||||
| 65 | #include "drm_internal.h" | ||||
| 66 | #include "drm_legacy.h" | ||||
| 67 | |||||
| 68 | MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); | ||||
| 69 | MODULE_DESCRIPTION("DRM shared core routines"); | ||||
| 70 | MODULE_LICENSE("GPL and additional rights"); | ||||
| 71 | |||||
| 72 | static DEFINE_SPINLOCK(drm_minor_lock)struct mutex drm_minor_lock = { ((void *)0), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9))), 0x0 }; | ||||
| 73 | static struct idr drm_minors_idr; | ||||
| 74 | |||||
| 75 | /* | ||||
| 76 | * If the drm core fails to init for whatever reason, | ||||
| 77 | * we should prevent any drivers from registering with it. | ||||
| 78 | * It's best to check this at drm_dev_init(), as some drivers | ||||
| 79 | * prefer to embed struct drm_device into their own device | ||||
| 80 | * structure and call drm_dev_init() themselves. | ||||
| 81 | */ | ||||
| 82 | static bool_Bool drm_core_init_complete = false0; | ||||
| 83 | |||||
| 84 | static struct dentry *drm_debugfs_root; | ||||
| 85 | |||||
| 86 | #ifdef notyet | ||||
| 87 | DEFINE_STATIC_SRCU(drm_unplug_srcu); | ||||
| 88 | #endif | ||||
| 89 | |||||
| 90 | /* | ||||
| 91 | * Some functions are only called once on init regardless of how many times | ||||
| 92 | * drm attaches. In linux this is handled via module_init()/module_exit() | ||||
| 93 | */ | ||||
| 94 | int drm_refcnt; | ||||
| 95 | |||||
| 96 | struct drm_softc { | ||||
| 97 | struct device sc_dev; | ||||
| 98 | struct drm_device *sc_drm; | ||||
| 99 | int sc_allocated; | ||||
| 100 | }; | ||||
| 101 | |||||
| 102 | struct drm_attach_args { | ||||
| 103 | struct drm_device *drm; | ||||
| 104 | struct drm_driver *driver; | ||||
| 105 | char *busid; | ||||
| 106 | bus_dma_tag_t dmat; | ||||
| 107 | bus_space_tag_t bst; | ||||
| 108 | size_t busid_len; | ||||
| 109 | int is_agp; | ||||
| 110 | struct pci_attach_args *pa; | ||||
| 111 | int primary; | ||||
| 112 | }; | ||||
| 113 | |||||
| 114 | void drm_linux_init(void); | ||||
| 115 | void drm_linux_exit(void); | ||||
| 116 | int drm_linux_acpi_notify(struct aml_node *, int, void *); | ||||
| 117 | |||||
| 118 | int drm_dequeue_event(struct drm_device *, struct drm_file *, size_t, | ||||
| 119 | struct drm_pending_event **); | ||||
| 120 | |||||
| 121 | int drmprint(void *, const char *); | ||||
| 122 | int drmsubmatch(struct device *, void *, void *); | ||||
| 123 | const struct pci_device_id * | ||||
| 124 | drm_find_description(int, int, const struct pci_device_id *); | ||||
| 125 | |||||
| 126 | int drm_file_cmp(struct drm_file *, struct drm_file *); | ||||
| 127 | SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp)void drm_file_tree_SPLAY(struct drm_file_tree *, struct drm_file *); void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *, int ); struct drm_file *drm_file_tree_SPLAY_INSERT(struct drm_file_tree *, struct drm_file *); struct drm_file *drm_file_tree_SPLAY_REMOVE (struct drm_file_tree *, struct drm_file *); static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_FIND (struct drm_file_tree *head, struct drm_file *elm) { if (((head )->sph_root == ((void *)0))) return(((void *)0)); drm_file_tree_SPLAY (head, elm); if ((drm_file_cmp)(elm, (head)->sph_root) == 0 ) return (head->sph_root); return (((void *)0)); } static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_NEXT (struct drm_file_tree *head, struct drm_file *elm) { drm_file_tree_SPLAY (head, elm); if ((elm)->link.spe_right != ((void *)0)) { elm = (elm)->link.spe_right; while ((elm)->link.spe_left != ((void *)0)) { elm = (elm)->link.spe_left; } } else elm = ((void *)0); return (elm); } static __attribute__((__unused__ )) __inline struct drm_file * drm_file_tree_SPLAY_MIN_MAX(struct drm_file_tree *head, int val) { drm_file_tree_SPLAY_MINMAX(head , val); return ((head)->sph_root); }; | ||||
| 128 | |||||
| 129 | #define DRMDEVCF_PRIMARY0 0 | ||||
| 130 | #define drmdevcf_primarycf_loc[0] cf_loc[DRMDEVCF_PRIMARY0] /* spec'd as primary? */ | ||||
| 131 | #define DRMDEVCF_PRIMARY_UNK-1 -1 | ||||
| 132 | |||||
| 133 | /* | ||||
| 134 | * DRM Minors | ||||
| 135 | * A DRM device can provide several char-dev interfaces on the DRM-Major. Each | ||||
| 136 | * of them is represented by a drm_minor object. Depending on the capabilities | ||||
| 137 | * of the device-driver, different interfaces are registered. | ||||
| 138 | * | ||||
| 139 | * Minors can be accessed via dev->$minor_name. This pointer is either | ||||
| 140 | * NULL or a valid drm_minor pointer and stays valid as long as the device is | ||||
| 141 | * valid. This means, DRM minors have the same life-time as the underlying | ||||
| 142 | * device. However, this doesn't mean that the minor is active. Minors are | ||||
| 143 | * registered and unregistered dynamically according to device-state. | ||||
| 144 | */ | ||||
| 145 | |||||
| 146 | static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, | ||||
| 147 | unsigned int type) | ||||
| 148 | { | ||||
| 149 | switch (type) { | ||||
| 150 | case DRM_MINOR_PRIMARY: | ||||
| 151 | return &dev->primary; | ||||
| 152 | case DRM_MINOR_RENDER: | ||||
| 153 | return &dev->render; | ||||
| 154 | default: | ||||
| 155 | BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 155); } while (0); | ||||
| 156 | } | ||||
| 157 | } | ||||
| 158 | |||||
| 159 | static void drm_minor_alloc_release(struct drm_device *dev, void *data) | ||||
| 160 | { | ||||
| 161 | struct drm_minor *minor = data; | ||||
| 162 | unsigned long flags; | ||||
| 163 | |||||
| 164 | WARN_ON(dev != minor->dev)({ int __ret = !!(dev != minor->dev); if (__ret) printf("WARNING %s failed at %s:%d\n" , "dev != minor->dev", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 164); __builtin_expect(!!(__ret), 0); }); | ||||
| 165 | |||||
| 166 | #ifdef __linux__ | ||||
| 167 | put_device(minor->kdev); | ||||
| 168 | #endif | ||||
| 169 | |||||
| 170 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); | ||||
| 171 | idr_remove(&drm_minors_idr, minor->index); | ||||
| 172 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); | ||||
| 173 | } | ||||
| 174 | |||||
| 175 | static int drm_minor_alloc(struct drm_device *dev, unsigned int type) | ||||
| 176 | { | ||||
| 177 | struct drm_minor *minor; | ||||
| 178 | unsigned long flags; | ||||
| 179 | int r; | ||||
| 180 | |||||
| 181 | minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL(0x0001 | 0x0004)); | ||||
| 182 | if (!minor) | ||||
| 183 | return -ENOMEM12; | ||||
| 184 | |||||
| 185 | minor->type = type; | ||||
| 186 | minor->dev = dev; | ||||
| 187 | |||||
| 188 | idr_preload(GFP_KERNEL(0x0001 | 0x0004)); | ||||
| 189 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); | ||||
| 190 | r = idr_alloc(&drm_minors_idr, | ||||
| 191 | NULL((void *)0), | ||||
| 192 | 64 * type, | ||||
| 193 | 64 * (type + 1), | ||||
| 194 | GFP_NOWAIT0x0002); | ||||
| 195 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); | ||||
| 196 | idr_preload_end(); | ||||
| 197 | |||||
| 198 | if (r < 0) | ||||
| 199 | return r; | ||||
| 200 | |||||
| 201 | minor->index = r; | ||||
| 202 | |||||
| 203 | r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); | ||||
| 204 | if (r) | ||||
| 205 | return r; | ||||
| 206 | |||||
| 207 | #ifdef __linux__ | ||||
| 208 | minor->kdev = drm_sysfs_minor_alloc(minor); | ||||
| 209 | if (IS_ERR(minor->kdev)) | ||||
| 210 | return PTR_ERR(minor->kdev); | ||||
| 211 | #endif | ||||
| 212 | |||||
| 213 | *drm_minor_get_slot(dev, type) = minor; | ||||
| 214 | return 0; | ||||
| 215 | } | ||||
| 216 | |||||
| 217 | static int drm_minor_register(struct drm_device *dev, unsigned int type) | ||||
| 218 | { | ||||
| 219 | struct drm_minor *minor; | ||||
| 220 | unsigned long flags; | ||||
| 221 | #ifdef __linux__ | ||||
| 222 | int ret; | ||||
| 223 | #endif | ||||
| 224 | |||||
| 225 | DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n"); | ||||
| 226 | |||||
| 227 | minor = *drm_minor_get_slot(dev, type); | ||||
| 228 | if (!minor) | ||||
| 229 | return 0; | ||||
| 230 | |||||
| 231 | #ifdef __linux__ | ||||
| 232 | ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); | ||||
| 233 | if (ret) { | ||||
| 234 | DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n")__drm_err("DRM: Failed to initialize /sys/kernel/debug/dri.\n" ); | ||||
| 235 | goto err_debugfs; | ||||
| 236 | } | ||||
| 237 | |||||
| 238 | ret = device_add(minor->kdev); | ||||
| 239 | if (ret) | ||||
| 240 | goto err_debugfs; | ||||
| 241 | #else | ||||
| 242 | drm_debugfs_root = NULL((void *)0); | ||||
| 243 | #endif | ||||
| 244 | |||||
| 245 | /* replace NULL with @minor so lookups will succeed from now on */ | ||||
| 246 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); | ||||
| 247 | idr_replace(&drm_minors_idr, minor, minor->index); | ||||
| 248 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); | ||||
| 249 | |||||
| 250 | DRM_DEBUG("new minor registered %d\n", minor->index)__drm_dbg(DRM_UT_CORE, "new minor registered %d\n", minor-> index); | ||||
| 251 | return 0; | ||||
| 252 | |||||
| 253 | #ifdef __linux__ | ||||
| 254 | err_debugfs: | ||||
| 255 | drm_debugfs_cleanup(minor); | ||||
| 256 | return ret; | ||||
| 257 | #endif | ||||
| 258 | } | ||||
| 259 | |||||
| 260 | static void drm_minor_unregister(struct drm_device *dev, unsigned int type) | ||||
| 261 | { | ||||
| 262 | struct drm_minor *minor; | ||||
| 263 | unsigned long flags; | ||||
| 264 | |||||
| 265 | minor = *drm_minor_get_slot(dev, type); | ||||
| 266 | #ifdef __linux__ | ||||
| 267 | if (!minor || !device_is_registered(minor->kdev)) | ||||
| 268 | #else | ||||
| 269 | if (!minor) | ||||
| 270 | #endif | ||||
| 271 | return; | ||||
| 272 | |||||
| 273 | /* replace @minor with NULL so lookups will fail from now on */ | ||||
| 274 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); | ||||
| 275 | idr_replace(&drm_minors_idr, NULL((void *)0), minor->index); | ||||
| 276 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); | ||||
| 277 | |||||
| 278 | #ifdef __linux__ | ||||
| 279 | device_del(minor->kdev); | ||||
| 280 | #endif | ||||
| 281 | dev_set_drvdata(minor->kdev, NULL); /* safety belt */ | ||||
| 282 | drm_debugfs_cleanup(minor); | ||||
| 283 | } | ||||
| 284 | |||||
| 285 | /* | ||||
| 286 | * Looks up the given minor-ID and returns the respective DRM-minor object. The | ||||
| 287 | * refence-count of the underlying device is increased so you must release this | ||||
| 288 | * object with drm_minor_release(). | ||||
| 289 | * | ||||
| 290 | * As long as you hold this minor, it is guaranteed that the object and the | ||||
| 291 | * minor->dev pointer will stay valid! However, the device may get unplugged and | ||||
| 292 | * unregistered while you hold the minor. | ||||
| 293 | */ | ||||
| 294 | struct drm_minor *drm_minor_acquire(unsigned int minor_id) | ||||
| 295 | { | ||||
| 296 | struct drm_minor *minor; | ||||
| 297 | unsigned long flags; | ||||
| 298 | |||||
| 299 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); | ||||
| |||||
| 300 | minor = idr_find(&drm_minors_idr, minor_id); | ||||
| 301 | if (minor) | ||||
| 302 | drm_dev_get(minor->dev); | ||||
| 303 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); | ||||
| 304 | |||||
| 305 | if (!minor
| ||||
| 306 | return ERR_PTR(-ENODEV19); | ||||
| 307 | } else if (drm_dev_is_unplugged(minor->dev)) { | ||||
| 308 | drm_dev_put(minor->dev); | ||||
| 309 | return ERR_PTR(-ENODEV19); | ||||
| 310 | } | ||||
| 311 | |||||
| 312 | return minor; | ||||
| 313 | } | ||||
| 314 | |||||
| 315 | void drm_minor_release(struct drm_minor *minor) | ||||
| 316 | { | ||||
| 317 | drm_dev_put(minor->dev); | ||||
| 318 | } | ||||
| 319 | |||||
| 320 | /** | ||||
| 321 | * DOC: driver instance overview | ||||
| 322 | * | ||||
| 323 | * A device instance for a drm driver is represented by &struct drm_device. This | ||||
| 324 | * is allocated and initialized with devm_drm_dev_alloc(), usually from | ||||
| 325 | * bus-specific ->probe() callbacks implemented by the driver. The driver then | ||||
| 326 | * needs to initialize all the various subsystems for the drm device like memory | ||||
| 327 | * management, vblank handling, modesetting support and initial output | ||||
| 328 | * configuration plus obviously initialize all the corresponding hardware bits. | ||||
| 329 | * Finally when everything is up and running and ready for userspace the device | ||||
| 330 | * instance can be published using drm_dev_register(). | ||||
| 331 | * | ||||
| 332 | * There is also deprecated support for initalizing device instances using | ||||
| 333 | * bus-specific helpers and the &drm_driver.load callback. But due to | ||||
| 334 | * backwards-compatibility needs the device instance have to be published too | ||||
| 335 | * early, which requires unpretty global locking to make safe and is therefore | ||||
| 336 | * only support for existing drivers not yet converted to the new scheme. | ||||
| 337 | * | ||||
| 338 | * When cleaning up a device instance everything needs to be done in reverse: | ||||
| 339 | * First unpublish the device instance with drm_dev_unregister(). Then clean up | ||||
| 340 | * any other resources allocated at device initialization and drop the driver's | ||||
| 341 | * reference to &drm_device using drm_dev_put(). | ||||
| 342 | * | ||||
| 343 | * Note that any allocation or resource which is visible to userspace must be | ||||
| 344 | * released only when the final drm_dev_put() is called, and not when the | ||||
| 345 | * driver is unbound from the underlying physical struct &device. Best to use | ||||
| 346 | * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and | ||||
| 347 | * related functions. | ||||
| 348 | * | ||||
| 349 | * devres managed resources like devm_kmalloc() can only be used for resources | ||||
| 350 | * directly related to the underlying hardware device, and only used in code | ||||
| 351 | * paths fully protected by drm_dev_enter() and drm_dev_exit(). | ||||
| 352 | * | ||||
| 353 | * Display driver example | ||||
| 354 | * ~~~~~~~~~~~~~~~~~~~~~~ | ||||
| 355 | * | ||||
| 356 | * The following example shows a typical structure of a DRM display driver. | ||||
| 357 | * The example focus on the probe() function and the other functions that is | ||||
| 358 | * almost always present and serves as a demonstration of devm_drm_dev_alloc(). | ||||
| 359 | * | ||||
| 360 | * .. code-block:: c | ||||
| 361 | * | ||||
| 362 | * struct driver_device { | ||||
| 363 | * struct drm_device drm; | ||||
| 364 | * void *userspace_facing; | ||||
| 365 | * struct clk *pclk; | ||||
| 366 | * }; | ||||
| 367 | * | ||||
| 368 | * static struct drm_driver driver_drm_driver = { | ||||
| 369 | * [...] | ||||
| 370 | * }; | ||||
| 371 | * | ||||
| 372 | * static int driver_probe(struct platform_device *pdev) | ||||
| 373 | * { | ||||
| 374 | * struct driver_device *priv; | ||||
| 375 | * struct drm_device *drm; | ||||
| 376 | * int ret; | ||||
| 377 | * | ||||
| 378 | * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, | ||||
| 379 | * struct driver_device, drm); | ||||
| 380 | * if (IS_ERR(priv)) | ||||
| 381 | * return PTR_ERR(priv); | ||||
| 382 | * drm = &priv->drm; | ||||
| 383 | * | ||||
| 384 | * ret = drmm_mode_config_init(drm); | ||||
| 385 | * if (ret) | ||||
| 386 | * return ret; | ||||
| 387 | * | ||||
| 388 | * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); | ||||
| 389 | * if (!priv->userspace_facing) | ||||
| 390 | * return -ENOMEM; | ||||
| 391 | * | ||||
| 392 | * priv->pclk = devm_clk_get(dev, "PCLK"); | ||||
| 393 | * if (IS_ERR(priv->pclk)) | ||||
| 394 | * return PTR_ERR(priv->pclk); | ||||
| 395 | * | ||||
| 396 | * // Further setup, display pipeline etc | ||||
| 397 | * | ||||
| 398 | * platform_set_drvdata(pdev, drm); | ||||
| 399 | * | ||||
| 400 | * drm_mode_config_reset(drm); | ||||
| 401 | * | ||||
| 402 | * ret = drm_dev_register(drm); | ||||
| 403 | * if (ret) | ||||
| 404 | * return ret; | ||||
| 405 | * | ||||
| 406 | * drm_fbdev_generic_setup(drm, 32); | ||||
| 407 | * | ||||
| 408 | * return 0; | ||||
| 409 | * } | ||||
| 410 | * | ||||
| 411 | * // This function is called before the devm_ resources are released | ||||
| 412 | * static int driver_remove(struct platform_device *pdev) | ||||
| 413 | * { | ||||
| 414 | * struct drm_device *drm = platform_get_drvdata(pdev); | ||||
| 415 | * | ||||
| 416 | * drm_dev_unregister(drm); | ||||
| 417 | * drm_atomic_helper_shutdown(drm) | ||||
| 418 | * | ||||
| 419 | * return 0; | ||||
| 420 | * } | ||||
| 421 | * | ||||
| 422 | * // This function is called on kernel restart and shutdown | ||||
| 423 | * static void driver_shutdown(struct platform_device *pdev) | ||||
| 424 | * { | ||||
| 425 | * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); | ||||
| 426 | * } | ||||
| 427 | * | ||||
| 428 | * static int __maybe_unused driver_pm_suspend(struct device *dev) | ||||
| 429 | * { | ||||
| 430 | * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); | ||||
| 431 | * } | ||||
| 432 | * | ||||
| 433 | * static int __maybe_unused driver_pm_resume(struct device *dev) | ||||
| 434 | * { | ||||
| 435 | * drm_mode_config_helper_resume(dev_get_drvdata(dev)); | ||||
| 436 | * | ||||
| 437 | * return 0; | ||||
| 438 | * } | ||||
| 439 | * | ||||
| 440 | * static const struct dev_pm_ops driver_pm_ops = { | ||||
| 441 | * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) | ||||
| 442 | * }; | ||||
| 443 | * | ||||
| 444 | * static struct platform_driver driver_driver = { | ||||
| 445 | * .driver = { | ||||
| 446 | * [...] | ||||
| 447 | * .pm = &driver_pm_ops, | ||||
| 448 | * }, | ||||
| 449 | * .probe = driver_probe, | ||||
| 450 | * .remove = driver_remove, | ||||
| 451 | * .shutdown = driver_shutdown, | ||||
| 452 | * }; | ||||
| 453 | * module_platform_driver(driver_driver); | ||||
| 454 | * | ||||
| 455 | * Drivers that want to support device unplugging (USB, DT overlay unload) should | ||||
| 456 | * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect | ||||
| 457 | * regions that is accessing device resources to prevent use after they're | ||||
| 458 | * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one | ||||
| 459 | * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before | ||||
| 460 | * drm_atomic_helper_shutdown() is called. This means that if the disable code | ||||
| 461 | * paths are protected, they will not run on regular driver module unload, | ||||
| 462 | * possibily leaving the hardware enabled. | ||||
| 463 | */ | ||||
| 464 | |||||
| 465 | /** | ||||
| 466 | * drm_put_dev - Unregister and release a DRM device | ||||
| 467 | * @dev: DRM device | ||||
| 468 | * | ||||
| 469 | * Called at module unload time or when a PCI device is unplugged. | ||||
| 470 | * | ||||
| 471 | * Cleans up all DRM device, calling drm_lastclose(). | ||||
| 472 | * | ||||
| 473 | * Note: Use of this function is deprecated. It will eventually go away | ||||
| 474 | * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly | ||||
| 475 | * instead to make sure that the device isn't userspace accessible any more | ||||
| 476 | * while teardown is in progress, ensuring that userspace can't access an | ||||
| 477 | * inconsistent state. | ||||
| 478 | */ | ||||
| 479 | void drm_put_dev(struct drm_device *dev) | ||||
| 480 | { | ||||
| 481 | DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n"); | ||||
| 482 | |||||
| 483 | if (!dev) { | ||||
| 484 | DRM_ERROR("cleanup called no dev\n")__drm_err("cleanup called no dev\n"); | ||||
| 485 | return; | ||||
| 486 | } | ||||
| 487 | |||||
| 488 | drm_dev_unregister(dev); | ||||
| 489 | drm_dev_put(dev); | ||||
| 490 | } | ||||
| 491 | EXPORT_SYMBOL(drm_put_dev); | ||||
| 492 | |||||
| 493 | /** | ||||
| 494 | * drm_dev_enter - Enter device critical section | ||||
| 495 | * @dev: DRM device | ||||
| 496 | * @idx: Pointer to index that will be passed to the matching drm_dev_exit() | ||||
| 497 | * | ||||
| 498 | * This function marks and protects the beginning of a section that should not | ||||
| 499 | * be entered after the device has been unplugged. The section end is marked | ||||
| 500 | * with drm_dev_exit(). Calls to this function can be nested. | ||||
| 501 | * | ||||
| 502 | * Returns: | ||||
| 503 | * True if it is OK to enter the section, false otherwise. | ||||
| 504 | */ | ||||
| 505 | bool_Bool drm_dev_enter(struct drm_device *dev, int *idx) | ||||
| 506 | { | ||||
| 507 | #ifdef notyet | ||||
| 508 | *idx = srcu_read_lock(&drm_unplug_srcu)0; | ||||
| 509 | |||||
| 510 | if (dev->unplugged) { | ||||
| 511 | srcu_read_unlock(&drm_unplug_srcu, *idx); | ||||
| 512 | return false0; | ||||
| 513 | } | ||||
| 514 | #endif | ||||
| 515 | |||||
| 516 | return true1; | ||||
| 517 | } | ||||
| 518 | EXPORT_SYMBOL(drm_dev_enter); | ||||
| 519 | |||||
| 520 | /** | ||||
| 521 | * drm_dev_exit - Exit device critical section | ||||
| 522 | * @idx: index returned from drm_dev_enter() | ||||
| 523 | * | ||||
| 524 | * This function marks the end of a section that should not be entered after | ||||
| 525 | * the device has been unplugged. | ||||
| 526 | */ | ||||
| 527 | void drm_dev_exit(int idx) | ||||
| 528 | { | ||||
| 529 | #ifdef notyet | ||||
| 530 | srcu_read_unlock(&drm_unplug_srcu, idx); | ||||
| 531 | #endif | ||||
| 532 | } | ||||
| 533 | EXPORT_SYMBOL(drm_dev_exit); | ||||
| 534 | |||||
| 535 | /** | ||||
| 536 | * drm_dev_unplug - unplug a DRM device | ||||
| 537 | * @dev: DRM device | ||||
| 538 | * | ||||
| 539 | * This unplugs a hotpluggable DRM device, which makes it inaccessible to | ||||
| 540 | * userspace operations. Entry-points can use drm_dev_enter() and | ||||
| 541 | * drm_dev_exit() to protect device resources in a race free manner. This | ||||
| 542 | * essentially unregisters the device like drm_dev_unregister(), but can be | ||||
| 543 | * called while there are still open users of @dev. | ||||
| 544 | */ | ||||
| 545 | void drm_dev_unplug(struct drm_device *dev) | ||||
| 546 | { | ||||
| 547 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
| 548 | #ifdef notyet | ||||
| 549 | /* | ||||
| 550 | * After synchronizing any critical read section is guaranteed to see | ||||
| 551 | * the new value of ->unplugged, and any critical section which might | ||||
| 552 | * still have seen the old value of ->unplugged is guaranteed to have | ||||
| 553 | * finished. | ||||
| 554 | */ | ||||
| 555 | dev->unplugged = true1; | ||||
| 556 | synchronize_srcu(&drm_unplug_srcu); | ||||
| 557 | |||||
| 558 | drm_dev_unregister(dev); | ||||
| 559 | #endif | ||||
| 560 | } | ||||
| 561 | EXPORT_SYMBOL(drm_dev_unplug); | ||||
| 562 | |||||
| 563 | #ifdef __linux__ | ||||
| 564 | /* | ||||
| 565 | * DRM internal mount | ||||
| 566 | * We want to be able to allocate our own "struct address_space" to control | ||||
| 567 | * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow | ||||
| 568 | * stand-alone address_space objects, so we need an underlying inode. As there | ||||
| 569 | * is no way to allocate an independent inode easily, we need a fake internal | ||||
| 570 | * VFS mount-point. | ||||
| 571 | * | ||||
| 572 | * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() | ||||
| 573 | * frees it again. You are allowed to use iget() and iput() to get references to | ||||
| 574 | * the inode. But each drm_fs_inode_new() call must be paired with exactly one | ||||
| 575 | * drm_fs_inode_free() call (which does not have to be the last iput()). | ||||
| 576 | * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it | ||||
| 577 | * between multiple inode-users. You could, technically, call | ||||
| 578 | * iget() + drm_fs_inode_free() directly after alloc and sometime later do an | ||||
| 579 | * iput(), but this way you'd end up with a new vfsmount for each inode. | ||||
| 580 | */ | ||||
| 581 | |||||
| 582 | static int drm_fs_cnt; | ||||
| 583 | static struct vfsmount *drm_fs_mnt; | ||||
| 584 | |||||
| 585 | static int drm_fs_init_fs_context(struct fs_context *fc) | ||||
| 586 | { | ||||
| 587 | return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM12; | ||||
| 588 | } | ||||
| 589 | |||||
| 590 | static struct file_system_type drm_fs_type = { | ||||
| 591 | .name = "drm", | ||||
| 592 | .owner = THIS_MODULE((void *)0), | ||||
| 593 | .init_fs_context = drm_fs_init_fs_context, | ||||
| 594 | .kill_sb = kill_anon_super, | ||||
| 595 | }; | ||||
| 596 | |||||
| 597 | static struct inode *drm_fs_inode_new(void) | ||||
| 598 | { | ||||
| 599 | struct inode *inode; | ||||
| 600 | int r; | ||||
| 601 | |||||
| 602 | r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); | ||||
| 603 | if (r < 0) { | ||||
| 604 | DRM_ERROR("Cannot mount pseudo fs: %d\n", r)__drm_err("Cannot mount pseudo fs: %d\n", r); | ||||
| 605 | return ERR_PTR(r); | ||||
| 606 | } | ||||
| 607 | |||||
| 608 | inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); | ||||
| 609 | if (IS_ERR(inode)) | ||||
| 610 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); | ||||
| 611 | |||||
| 612 | return inode; | ||||
| 613 | } | ||||
| 614 | |||||
| 615 | static void drm_fs_inode_free(struct inode *inode) | ||||
| 616 | { | ||||
| 617 | if (inode) { | ||||
| 618 | iput(inode); | ||||
| 619 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); | ||||
| 620 | } | ||||
| 621 | } | ||||
| 622 | |||||
| 623 | #endif /* __linux__ */ | ||||
| 624 | |||||
| 625 | /** | ||||
| 626 | * DOC: component helper usage recommendations | ||||
| 627 | * | ||||
| 628 | * DRM drivers that drive hardware where a logical device consists of a pile of | ||||
| 629 | * independent hardware blocks are recommended to use the :ref:`component helper | ||||
| 630 | * library<component>`. For consistency and better options for code reuse the | ||||
| 631 | * following guidelines apply: | ||||
| 632 | * | ||||
| 633 | * - The entire device initialization procedure should be run from the | ||||
| 634 | * &component_master_ops.master_bind callback, starting with | ||||
| 635 | * devm_drm_dev_alloc(), then binding all components with | ||||
| 636 | * component_bind_all() and finishing with drm_dev_register(). | ||||
| 637 | * | ||||
| 638 | * - The opaque pointer passed to all components through component_bind_all() | ||||
| 639 | * should point at &struct drm_device of the device instance, not some driver | ||||
| 640 | * specific private structure. | ||||
| 641 | * | ||||
| 642 | * - The component helper fills the niche where further standardization of | ||||
| 643 | * interfaces is not practical. When there already is, or will be, a | ||||
| 644 | * standardized interface like &drm_bridge or &drm_panel, providing its own | ||||
| 645 | * functions to find such components at driver load time, like | ||||
| 646 | * drm_of_find_panel_or_bridge(), then the component helper should not be | ||||
| 647 | * used. | ||||
| 648 | */ | ||||
| 649 | |||||
| 650 | static void drm_dev_init_release(struct drm_device *dev, void *res) | ||||
| 651 | { | ||||
| 652 | drm_legacy_ctxbitmap_cleanup(dev); | ||||
| 653 | drm_legacy_remove_map_hash(dev); | ||||
| 654 | #ifdef __linux__ | ||||
| 655 | drm_fs_inode_free(dev->anon_inode); | ||||
| 656 | |||||
| 657 | put_device(dev->dev); | ||||
| 658 | #endif | ||||
| 659 | /* Prevent use-after-free in drm_managed_release when debugging is | ||||
| 660 | * enabled. Slightly awkward, but can't really be helped. */ | ||||
| 661 | dev->dev = NULL((void *)0); | ||||
| 662 | mutex_destroy(&dev->master_mutex); | ||||
| 663 | mutex_destroy(&dev->clientlist_mutex); | ||||
| 664 | mutex_destroy(&dev->filelist_mutex); | ||||
| 665 | mutex_destroy(&dev->struct_mutex); | ||||
| 666 | drm_legacy_destroy_members(dev); | ||||
| 667 | } | ||||
| 668 | |||||
| 669 | static int drm_dev_init(struct drm_device *dev, | ||||
| 670 | struct drm_driver *driver, | ||||
| 671 | struct device *parent) | ||||
| 672 | { | ||||
| 673 | int ret; | ||||
| 674 | |||||
| 675 | if (!drm_core_init_complete) { | ||||
| 676 | DRM_ERROR("DRM core is not initialized\n")__drm_err("DRM core is not initialized\n"); | ||||
| 677 | return -ENODEV19; | ||||
| 678 | } | ||||
| 679 | |||||
| 680 | if (WARN_ON(!parent)({ int __ret = !!(!parent); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!parent", "/usr/src/sys/dev/pci/drm/drm_drv.c", 680); __builtin_expect (!!(__ret), 0); })) | ||||
| 681 | return -EINVAL22; | ||||
| 682 | |||||
| 683 | kref_init(&dev->ref); | ||||
| 684 | #ifdef __linux__ | ||||
| 685 | dev->dev = get_device(parent); | ||||
| 686 | #endif | ||||
| 687 | dev->driver = driver; | ||||
| 688 | |||||
| 689 | INIT_LIST_HEAD(&dev->managed.resources); | ||||
| 690 | mtx_init(&dev->managed.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> managed.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9 ) ? 0x9 : ((0x9)))); } while (0); | ||||
| 691 | |||||
| 692 | /* no per-device feature limits by default */ | ||||
| 693 | dev->driver_features = ~0u; | ||||
| 694 | |||||
| 695 | drm_legacy_init_members(dev); | ||||
| 696 | #ifdef notyet | ||||
| 697 | INIT_LIST_HEAD(&dev->filelist); | ||||
| 698 | #else | ||||
| 699 | SPLAY_INIT(&dev->files)do { (&dev->files)->sph_root = ((void *)0); } while (0); | ||||
| 700 | #endif | ||||
| 701 | INIT_LIST_HEAD(&dev->filelist_internal); | ||||
| 702 | INIT_LIST_HEAD(&dev->clientlist); | ||||
| 703 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||||
| 704 | |||||
| 705 | mtx_init(&dev->event_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> event_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | ||||
| 706 | rw_init(&dev->struct_mutex, "drmdevlk")_rw_init_flags(&dev->struct_mutex, "drmdevlk", 0, ((void *)0)); | ||||
| 707 | rw_init(&dev->filelist_mutex, "drmflist")_rw_init_flags(&dev->filelist_mutex, "drmflist", 0, (( void *)0)); | ||||
| 708 | rw_init(&dev->clientlist_mutex, "drmclist")_rw_init_flags(&dev->clientlist_mutex, "drmclist", 0, ( (void *)0)); | ||||
| 709 | rw_init(&dev->master_mutex, "drmmast")_rw_init_flags(&dev->master_mutex, "drmmast", 0, ((void *)0)); | ||||
| 710 | |||||
| 711 | ret = drmm_add_action(dev, drm_dev_init_release, NULL((void *)0)); | ||||
| 712 | if (ret) | ||||
| 713 | return ret; | ||||
| 714 | |||||
| 715 | #ifdef __linux__ | ||||
| 716 | dev->anon_inode = drm_fs_inode_new(); | ||||
| 717 | if (IS_ERR(dev->anon_inode)) { | ||||
| 718 | ret = PTR_ERR(dev->anon_inode); | ||||
| 719 | DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret)__drm_err("Cannot allocate anonymous inode: %d\n", ret); | ||||
| 720 | goto err; | ||||
| 721 | } | ||||
| 722 | #endif | ||||
| 723 | |||||
| 724 | if (drm_core_check_feature(dev, DRIVER_RENDER)) { | ||||
| 725 | ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); | ||||
| 726 | if (ret) | ||||
| 727 | goto err; | ||||
| 728 | } | ||||
| 729 | |||||
| 730 | ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); | ||||
| 731 | if (ret) | ||||
| 732 | goto err; | ||||
| 733 | |||||
| 734 | ret = drm_legacy_create_map_hash(dev); | ||||
| 735 | if (ret) | ||||
| 736 | goto err; | ||||
| 737 | |||||
| 738 | drm_legacy_ctxbitmap_init(dev); | ||||
| 739 | |||||
| 740 | if (drm_core_check_feature(dev, DRIVER_GEM)) { | ||||
| 741 | ret = drm_gem_init(dev); | ||||
| 742 | if (ret) { | ||||
| 743 | DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n" ); | ||||
| 744 | goto err; | ||||
| 745 | } | ||||
| 746 | } | ||||
| 747 | |||||
| 748 | ret = drm_dev_set_unique(dev, dev_name(parent)""); | ||||
| 749 | if (ret) | ||||
| 750 | goto err; | ||||
| 751 | |||||
| 752 | return 0; | ||||
| 753 | |||||
| 754 | err: | ||||
| 755 | drm_managed_release(dev); | ||||
| 756 | |||||
| 757 | return ret; | ||||
| 758 | } | ||||
| 759 | |||||
| 760 | #ifdef notyet | ||||
| 761 | static void devm_drm_dev_init_release(void *data) | ||||
| 762 | { | ||||
| 763 | drm_dev_put(data); | ||||
| 764 | } | ||||
| 765 | #endif | ||||
| 766 | |||||
| 767 | static int devm_drm_dev_init(struct device *parent, | ||||
| 768 | struct drm_device *dev, | ||||
| 769 | struct drm_driver *driver) | ||||
| 770 | { | ||||
| 771 | STUB()do { printf("%s: stub\n", __func__); } while(0); | ||||
| 772 | return -ENOSYS78; | ||||
| 773 | #ifdef notyet | ||||
| 774 | int ret; | ||||
| 775 | |||||
| 776 | ret = drm_dev_init(dev, driver, parent); | ||||
| 777 | if (ret) | ||||
| 778 | return ret; | ||||
| 779 | |||||
| 780 | ret = devm_add_action(parent, devm_drm_dev_init_release, dev); | ||||
| 781 | if (ret) | ||||
| 782 | devm_drm_dev_init_release(dev); | ||||
| 783 | |||||
| 784 | return ret; | ||||
| 785 | #endif | ||||
| 786 | } | ||||
| 787 | |||||
| 788 | void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, | ||||
| 789 | size_t size, size_t offset) | ||||
| 790 | { | ||||
| 791 | void *container; | ||||
| 792 | struct drm_device *drm; | ||||
| 793 | int ret; | ||||
| 794 | |||||
| 795 | container = kzalloc(size, GFP_KERNEL(0x0001 | 0x0004)); | ||||
| 796 | if (!container) | ||||
| 797 | return ERR_PTR(-ENOMEM12); | ||||
| 798 | |||||
| 799 | drm = container + offset; | ||||
| 800 | ret = devm_drm_dev_init(parent, drm, driver); | ||||
| 801 | if (ret) { | ||||
| 802 | kfree(container); | ||||
| 803 | return ERR_PTR(ret); | ||||
| 804 | } | ||||
| 805 | drmm_add_final_kfree(drm, container); | ||||
| 806 | |||||
| 807 | return container; | ||||
| 808 | } | ||||
| 809 | EXPORT_SYMBOL(__devm_drm_dev_alloc); | ||||
| 810 | |||||
| 811 | /** | ||||
| 812 | * drm_dev_alloc - Allocate new DRM device | ||||
| 813 | * @driver: DRM driver to allocate device for | ||||
| 814 | * @parent: Parent device object | ||||
| 815 | * | ||||
| 816 | * This is the deprecated version of devm_drm_dev_alloc(), which does not support | ||||
| 817 | * subclassing through embedding the struct &drm_device in a driver private | ||||
| 818 | * structure, and which does not support automatic cleanup through devres. | ||||
| 819 | * | ||||
| 820 | * RETURNS: | ||||
| 821 | * Pointer to new DRM device, or ERR_PTR on failure. | ||||
| 822 | */ | ||||
| 823 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, | ||||
| 824 | struct device *parent) | ||||
| 825 | { | ||||
| 826 | struct drm_device *dev; | ||||
| 827 | int ret; | ||||
| 828 | |||||
| 829 | dev = kzalloc(sizeof(*dev), GFP_KERNEL(0x0001 | 0x0004)); | ||||
| 830 | if (!dev) | ||||
| 831 | return ERR_PTR(-ENOMEM12); | ||||
| 832 | |||||
| 833 | ret = drm_dev_init(dev, driver, parent); | ||||
| 834 | if (ret) { | ||||
| 835 | kfree(dev); | ||||
| 836 | return ERR_PTR(ret); | ||||
| 837 | } | ||||
| 838 | |||||
| 839 | drmm_add_final_kfree(dev, dev); | ||||
| 840 | |||||
| 841 | return dev; | ||||
| 842 | } | ||||
| 843 | EXPORT_SYMBOL(drm_dev_alloc); | ||||
| 844 | |||||
| 845 | static void drm_dev_release(struct kref *ref) | ||||
| 846 | { | ||||
| 847 | struct drm_device *dev = container_of(ref, struct drm_device, ref)({ const __typeof( ((struct drm_device *)0)->ref ) *__mptr = (ref); (struct drm_device *)( (char *)__mptr - __builtin_offsetof (struct drm_device, ref) );}); | ||||
| 848 | |||||
| 849 | if (dev->driver->release) | ||||
| 850 | dev->driver->release(dev); | ||||
| 851 | |||||
| 852 | drm_managed_release(dev); | ||||
| 853 | |||||
| 854 | kfree(dev->managed.final_kfree); | ||||
| 855 | } | ||||
| 856 | |||||
| 857 | /** | ||||
| 858 | * drm_dev_get - Take reference of a DRM device | ||||
| 859 | * @dev: device to take reference of or NULL | ||||
| 860 | * | ||||
| 861 | * This increases the ref-count of @dev by one. You *must* already own a | ||||
| 862 | * reference when calling this. Use drm_dev_put() to drop this reference | ||||
| 863 | * again. | ||||
| 864 | * | ||||
| 865 | * This function never fails. However, this function does not provide *any* | ||||
| 866 | * guarantee whether the device is alive or running. It only provides a | ||||
| 867 | * reference to the object and the memory associated with it. | ||||
| 868 | */ | ||||
| 869 | void drm_dev_get(struct drm_device *dev) | ||||
| 870 | { | ||||
| 871 | if (dev) | ||||
| 872 | kref_get(&dev->ref); | ||||
| 873 | } | ||||
| 874 | EXPORT_SYMBOL(drm_dev_get); | ||||
| 875 | |||||
| 876 | /** | ||||
| 877 | * drm_dev_put - Drop reference of a DRM device | ||||
| 878 | * @dev: device to drop reference of or NULL | ||||
| 879 | * | ||||
| 880 | * This decreases the ref-count of @dev by one. The device is destroyed if the | ||||
| 881 | * ref-count drops to zero. | ||||
| 882 | */ | ||||
| 883 | void drm_dev_put(struct drm_device *dev) | ||||
| 884 | { | ||||
| 885 | if (dev) | ||||
| 886 | kref_put(&dev->ref, drm_dev_release); | ||||
| 887 | } | ||||
| 888 | EXPORT_SYMBOL(drm_dev_put); | ||||
| 889 | |||||
| 890 | static int create_compat_control_link(struct drm_device *dev) | ||||
| 891 | { | ||||
| 892 | struct drm_minor *minor; | ||||
| 893 | char *name; | ||||
| 894 | int ret; | ||||
| 895 | |||||
| 896 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 897 | return 0; | ||||
| 898 | |||||
| 899 | minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); | ||||
| 900 | if (!minor) | ||||
| 901 | return 0; | ||||
| 902 | |||||
| 903 | /* | ||||
| 904 | * Some existing userspace out there uses the existing of the controlD* | ||||
| 905 | * sysfs files to figure out whether it's a modeset driver. It only does | ||||
| 906 | * readdir, hence a symlink is sufficient (and the least confusing | ||||
| 907 | * option). Otherwise controlD* is entirely unused. | ||||
| 908 | * | ||||
| 909 | * Old controlD chardev have been allocated in the range | ||||
| 910 | * 64-127. | ||||
| 911 | */ | ||||
| 912 | name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64); | ||||
| 913 | if (!name) | ||||
| 914 | return -ENOMEM12; | ||||
| 915 | |||||
| 916 | ret = sysfs_create_link(minor->kdev->kobj.parent,0 | ||||
| 917 | &minor->kdev->kobj,0 | ||||
| 918 | name)0; | ||||
| 919 | |||||
| 920 | kfree(name); | ||||
| 921 | |||||
| 922 | return ret; | ||||
| 923 | } | ||||
| 924 | |||||
| 925 | static void remove_compat_control_link(struct drm_device *dev) | ||||
| 926 | { | ||||
| 927 | struct drm_minor *minor; | ||||
| 928 | char *name; | ||||
| 929 | |||||
| 930 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 931 | return; | ||||
| 932 | |||||
| 933 | minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); | ||||
| 934 | if (!minor) | ||||
| 935 | return; | ||||
| 936 | |||||
| 937 | name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64); | ||||
| 938 | if (!name) | ||||
| 939 | return; | ||||
| 940 | |||||
| 941 | sysfs_remove_link(minor->kdev->kobj.parent, name); | ||||
| 942 | |||||
| 943 | kfree(name); | ||||
| 944 | } | ||||
| 945 | |||||
| 946 | /** | ||||
| 947 | * drm_dev_register - Register DRM device | ||||
| 948 | * @dev: Device to register | ||||
| 949 | * @flags: Flags passed to the driver's .load() function | ||||
| 950 | * | ||||
| 951 | * Register the DRM device @dev with the system, advertise device to user-space | ||||
| 952 | * and start normal device operation. @dev must be initialized via drm_dev_init() | ||||
| 953 | * previously. | ||||
| 954 | * | ||||
| 955 | * Never call this twice on any device! | ||||
| 956 | * | ||||
| 957 | * NOTE: To ensure backward compatibility with existing drivers method this | ||||
| 958 | * function calls the &drm_driver.load method after registering the device | ||||
| 959 | * nodes, creating race conditions. Usage of the &drm_driver.load methods is | ||||
| 960 | * therefore deprecated, drivers must perform all initialization before calling | ||||
| 961 | * drm_dev_register(). | ||||
| 962 | * | ||||
| 963 | * RETURNS: | ||||
| 964 | * 0 on success, negative error code on failure. | ||||
| 965 | */ | ||||
| 966 | int drm_dev_register(struct drm_device *dev, unsigned long flags) | ||||
| 967 | { | ||||
| 968 | struct drm_driver *driver = dev->driver; | ||||
| 969 | int ret; | ||||
| 970 | |||||
| 971 | if (!driver->load) | ||||
| 972 | drm_mode_config_validate(dev); | ||||
| 973 | |||||
| 974 | WARN_ON(!dev->managed.final_kfree)({ int __ret = !!(!dev->managed.final_kfree); if (__ret) printf ("WARNING %s failed at %s:%d\n", "!dev->managed.final_kfree" , "/usr/src/sys/dev/pci/drm/drm_drv.c", 974); __builtin_expect (!!(__ret), 0); }); | ||||
| 975 | |||||
| 976 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 977 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); | ||||
| 978 | |||||
| 979 | ret = drm_minor_register(dev, DRM_MINOR_RENDER); | ||||
| 980 | if (ret) | ||||
| 981 | goto err_minors; | ||||
| 982 | |||||
| 983 | ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); | ||||
| 984 | if (ret) | ||||
| 985 | goto err_minors; | ||||
| 986 | |||||
| 987 | ret = create_compat_control_link(dev); | ||||
| 988 | if (ret) | ||||
| 989 | goto err_minors; | ||||
| 990 | |||||
| 991 | dev->registered = true1; | ||||
| 992 | |||||
| 993 | if (dev->driver->load) { | ||||
| 994 | ret = dev->driver->load(dev, flags); | ||||
| 995 | if (ret) | ||||
| 996 | goto err_minors; | ||||
| 997 | } | ||||
| 998 | |||||
| 999 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 1000 | drm_modeset_register_all(dev); | ||||
| 1001 | |||||
| 1002 | ret = 0; | ||||
| 1003 | |||||
| 1004 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) | ||||
| 1005 | driver->name, driver->major, driver->minor,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) | ||||
| 1006 | driver->patchlevel, driver->date,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) | ||||
| 1007 | dev->dev ? dev_name(dev->dev) : "virtual device",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) | ||||
| 1008 | dev->primary->index)printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index); | ||||
| 1009 | |||||
| 1010 | goto out_unlock; | ||||
| 1011 | |||||
| 1012 | err_minors: | ||||
| 1013 | remove_compat_control_link(dev); | ||||
| 1014 | drm_minor_unregister(dev, DRM_MINOR_PRIMARY); | ||||
| 1015 | drm_minor_unregister(dev, DRM_MINOR_RENDER); | ||||
| 1016 | out_unlock: | ||||
| 1017 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1018 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); | ||||
| 1019 | return ret; | ||||
| 1020 | } | ||||
| 1021 | EXPORT_SYMBOL(drm_dev_register); | ||||
| 1022 | |||||
| 1023 | /** | ||||
| 1024 | * drm_dev_unregister - Unregister DRM device | ||||
| 1025 | * @dev: Device to unregister | ||||
| 1026 | * | ||||
| 1027 | * Unregister the DRM device from the system. This does the reverse of | ||||
| 1028 | * drm_dev_register() but does not deallocate the device. The caller must call | ||||
| 1029 | * drm_dev_put() to drop their final reference. | ||||
| 1030 | * | ||||
| 1031 | * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), | ||||
| 1032 | * which can be called while there are still open users of @dev. | ||||
| 1033 | * | ||||
| 1034 | * This should be called first in the device teardown code to make sure | ||||
| 1035 | * userspace can't access the device instance any more. | ||||
| 1036 | */ | ||||
| 1037 | void drm_dev_unregister(struct drm_device *dev) | ||||
| 1038 | { | ||||
| 1039 | if (drm_core_check_feature(dev, DRIVER_LEGACY)) | ||||
| 1040 | drm_lastclose(dev); | ||||
| 1041 | |||||
| 1042 | dev->registered = false0; | ||||
| 1043 | |||||
| 1044 | drm_client_dev_unregister(dev); | ||||
| 1045 | |||||
| 1046 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||||
| 1047 | drm_modeset_unregister_all(dev); | ||||
| 1048 | |||||
| 1049 | if (dev->driver->unload) | ||||
| 1050 | dev->driver->unload(dev); | ||||
| 1051 | |||||
| 1052 | #if IS_ENABLED(CONFIG_AGP)1 | ||||
| 1053 | if (dev->agp) | ||||
| 1054 | drm_agp_takedown(dev); | ||||
| 1055 | #endif | ||||
| 1056 | |||||
| 1057 | drm_legacy_rmmaps(dev); | ||||
| 1058 | |||||
| 1059 | remove_compat_control_link(dev); | ||||
| 1060 | drm_minor_unregister(dev, DRM_MINOR_PRIMARY); | ||||
| 1061 | drm_minor_unregister(dev, DRM_MINOR_RENDER); | ||||
| 1062 | } | ||||
| 1063 | EXPORT_SYMBOL(drm_dev_unregister); | ||||
| 1064 | |||||
| 1065 | /** | ||||
| 1066 | * drm_dev_set_unique - Set the unique name of a DRM device | ||||
| 1067 | * @dev: device of which to set the unique name | ||||
| 1068 | * @name: unique name | ||||
| 1069 | * | ||||
| 1070 | * Sets the unique name of a DRM device using the specified string. This is | ||||
| 1071 | * already done by drm_dev_init(), drivers should only override the default | ||||
| 1072 | * unique name for backwards compatibility reasons. | ||||
| 1073 | * | ||||
| 1074 | * Return: 0 on success or a negative error code on failure. | ||||
| 1075 | */ | ||||
| 1076 | int drm_dev_set_unique(struct drm_device *dev, const char *name) | ||||
| 1077 | { | ||||
| 1078 | drmm_kfree(dev, dev->unique); | ||||
| 1079 | dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL(0x0001 | 0x0004)); | ||||
| 1080 | |||||
| 1081 | return dev->unique ? 0 : -ENOMEM12; | ||||
| 1082 | } | ||||
| 1083 | EXPORT_SYMBOL(drm_dev_set_unique); | ||||
| 1084 | |||||
| 1085 | /* | ||||
| 1086 | * DRM Core | ||||
| 1087 | * The DRM core module initializes all global DRM objects and makes them | ||||
| 1088 | * available to drivers. Once setup, drivers can probe their respective | ||||
| 1089 | * devices. | ||||
| 1090 | * Currently, core management includes: | ||||
| 1091 | * - The "DRM-Global" key/value database | ||||
| 1092 | * - Global ID management for connectors | ||||
| 1093 | * - DRM major number allocation | ||||
| 1094 | * - DRM minor management | ||||
| 1095 | * - DRM sysfs class | ||||
| 1096 | * - DRM debugfs root | ||||
| 1097 | * | ||||
| 1098 | * Furthermore, the DRM core provides dynamic char-dev lookups. For each | ||||
| 1099 | * interface registered on a DRM device, you can request minor numbers from DRM | ||||
| 1100 | * core. DRM core takes care of major-number management and char-dev | ||||
| 1101 | * registration. A stub ->open() callback forwards any open() requests to the | ||||
| 1102 | * registered minor. | ||||
| 1103 | */ | ||||
| 1104 | |||||
| 1105 | #ifdef __linux__ | ||||
| 1106 | static int drm_stub_open(struct inode *inode, struct file *filp) | ||||
| 1107 | { | ||||
| 1108 | const struct file_operations *new_fops; | ||||
| 1109 | struct drm_minor *minor; | ||||
| 1110 | int err; | ||||
| 1111 | |||||
| 1112 | DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n"); | ||||
| 1113 | |||||
| 1114 | minor = drm_minor_acquire(iminor(inode)); | ||||
| 1115 | if (IS_ERR(minor)) | ||||
| 1116 | return PTR_ERR(minor); | ||||
| 1117 | |||||
| 1118 | new_fops = fops_get(minor->dev->driver->fops); | ||||
| 1119 | if (!new_fops) { | ||||
| 1120 | err = -ENODEV19; | ||||
| 1121 | goto out; | ||||
| 1122 | } | ||||
| 1123 | |||||
| 1124 | replace_fops(filp, new_fops); | ||||
| 1125 | if (filp->f_op->open) | ||||
| 1126 | err = filp->f_op->open(inode, filp); | ||||
| 1127 | else | ||||
| 1128 | err = 0; | ||||
| 1129 | |||||
| 1130 | out: | ||||
| 1131 | drm_minor_release(minor); | ||||
| 1132 | |||||
| 1133 | return err; | ||||
| 1134 | } | ||||
| 1135 | |||||
| 1136 | static const struct file_operations drm_stub_fops = { | ||||
| 1137 | .owner = THIS_MODULE((void *)0), | ||||
| 1138 | .open = drm_stub_open, | ||||
| 1139 | .llseek = noop_llseek, | ||||
| 1140 | }; | ||||
| 1141 | #endif /* __linux__ */ | ||||
| 1142 | |||||
| 1143 | static void drm_core_exit(void) | ||||
| 1144 | { | ||||
| 1145 | #ifdef __linux__ | ||||
| 1146 | unregister_chrdev(DRM_MAJOR, "drm"); | ||||
| 1147 | debugfs_remove(drm_debugfs_root); | ||||
| 1148 | drm_sysfs_destroy(); | ||||
| 1149 | #endif | ||||
| 1150 | idr_destroy(&drm_minors_idr); | ||||
| 1151 | drm_connector_ida_destroy(); | ||||
| 1152 | } | ||||
| 1153 | |||||
| 1154 | static int __init drm_core_init(void) | ||||
| 1155 | { | ||||
| 1156 | #ifdef __linux__ | ||||
| 1157 | int ret; | ||||
| 1158 | #endif | ||||
| 1159 | |||||
| 1160 | drm_connector_ida_init(); | ||||
| 1161 | idr_init(&drm_minors_idr); | ||||
| 1162 | |||||
| 1163 | #ifdef __linux__ | ||||
| 1164 | ret = drm_sysfs_init(); | ||||
| 1165 | if (ret < 0) { | ||||
| 1166 | DRM_ERROR("Cannot create DRM class: %d\n", ret)__drm_err("Cannot create DRM class: %d\n", ret); | ||||
| 1167 | goto error; | ||||
| 1168 | } | ||||
| 1169 | |||||
| 1170 | drm_debugfs_root = debugfs_create_dir("dri", NULL((void *)0)); | ||||
| 1171 | |||||
| 1172 | ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); | ||||
| 1173 | if (ret < 0) | ||||
| 1174 | goto error; | ||||
| 1175 | #endif | ||||
| 1176 | |||||
| 1177 | drm_core_init_complete = true1; | ||||
| 1178 | |||||
| 1179 | DRM_DEBUG("Initialized\n")__drm_dbg(DRM_UT_CORE, "Initialized\n"); | ||||
| 1180 | return 0; | ||||
| 1181 | #ifdef __linux__ | ||||
| 1182 | error: | ||||
| 1183 | drm_core_exit(); | ||||
| 1184 | return ret; | ||||
| 1185 | #endif | ||||
| 1186 | } | ||||
| 1187 | |||||
| 1188 | #ifdef __linux__ | ||||
| 1189 | module_init(drm_core_init); | ||||
| 1190 | module_exit(drm_core_exit); | ||||
| 1191 | #endif | ||||
| 1192 | |||||
| 1193 | void | ||||
| 1194 | drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot, | ||||
| 1195 | bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm) | ||||
| 1196 | { | ||||
| 1197 | struct drm_attach_args arg; | ||||
| 1198 | |||||
| 1199 | memset(&arg, 0, sizeof(arg))__builtin_memset((&arg), (0), (sizeof(arg))); | ||||
| 1200 | arg.driver = driver; | ||||
| 1201 | arg.bst = iot; | ||||
| 1202 | arg.dmat = dmat; | ||||
| 1203 | arg.drm = drm; | ||||
| 1204 | |||||
| 1205 | arg.busid = dev->dv_xname; | ||||
| 1206 | arg.busid_len = strlen(dev->dv_xname) + 1; | ||||
| 1207 | config_found_sm(dev, &arg, drmprint, drmsubmatch); | ||||
| 1208 | } | ||||
| 1209 | |||||
| 1210 | struct drm_device * | ||||
| 1211 | drm_attach_pci(struct drm_driver *driver, struct pci_attach_args *pa, | ||||
| 1212 | int is_agp, int primary, struct device *dev, struct drm_device *drm) | ||||
| 1213 | { | ||||
| 1214 | struct drm_attach_args arg; | ||||
| 1215 | struct drm_softc *sc; | ||||
| 1216 | |||||
| 1217 | arg.drm = drm; | ||||
| 1218 | arg.driver = driver; | ||||
| 1219 | arg.dmat = pa->pa_dmat; | ||||
| 1220 | arg.bst = pa->pa_memt; | ||||
| 1221 | arg.is_agp = is_agp; | ||||
| 1222 | arg.primary = primary; | ||||
| 1223 | arg.pa = pa; | ||||
| 1224 | |||||
| 1225 | arg.busid_len = 20; | ||||
| 1226 | arg.busid = malloc(arg.busid_len + 1, M_DRM145, M_NOWAIT0x0002); | ||||
| 1227 | if (arg.busid == NULL((void *)0)) { | ||||
| 1228 | printf("%s: no memory for drm\n", dev->dv_xname); | ||||
| 1229 | return (NULL((void *)0)); | ||||
| 1230 | } | ||||
| 1231 | snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x", | ||||
| 1232 | pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function); | ||||
| 1233 | |||||
| 1234 | sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch); | ||||
| 1235 | if (sc == NULL((void *)0)) | ||||
| 1236 | return NULL((void *)0); | ||||
| 1237 | |||||
| 1238 | return sc->sc_drm; | ||||
| 1239 | } | ||||
| 1240 | |||||
| 1241 | int | ||||
| 1242 | drmprint(void *aux, const char *pnp) | ||||
| 1243 | { | ||||
| 1244 | if (pnp != NULL((void *)0)) | ||||
| 1245 | printf("drm at %s", pnp); | ||||
| 1246 | return (UNCONF1); | ||||
| 1247 | } | ||||
| 1248 | |||||
| 1249 | int | ||||
| 1250 | drmsubmatch(struct device *parent, void *match, void *aux) | ||||
| 1251 | { | ||||
| 1252 | extern struct cfdriver drm_cd; | ||||
| 1253 | struct cfdata *cf = match; | ||||
| 1254 | |||||
| 1255 | /* only allow drm to attach */ | ||||
| 1256 | if (cf->cf_driver == &drm_cd) | ||||
| 1257 | return ((*cf->cf_attach->ca_match)(parent, match, aux)); | ||||
| 1258 | return (0); | ||||
| 1259 | } | ||||
| 1260 | |||||
| 1261 | int | ||||
| 1262 | drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist) | ||||
| 1263 | { | ||||
| 1264 | const struct pci_device_id *id_entry; | ||||
| 1265 | |||||
| 1266 | id_entry = drm_find_description(PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff), | ||||
| 1267 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff), idlist); | ||||
| 1268 | if (id_entry != NULL((void *)0)) | ||||
| 1269 | return 1; | ||||
| 1270 | |||||
| 1271 | return 0; | ||||
| 1272 | } | ||||
| 1273 | |||||
| 1274 | int | ||||
| 1275 | drm_probe(struct device *parent, void *match, void *aux) | ||||
| 1276 | { | ||||
| 1277 | struct cfdata *cf = match; | ||||
| 1278 | struct drm_attach_args *da = aux; | ||||
| 1279 | |||||
| 1280 | if (cf->drmdevcf_primarycf_loc[0] != DRMDEVCF_PRIMARY_UNK-1) { | ||||
| 1281 | /* | ||||
| 1282 | * If primary-ness of device specified, either match | ||||
| 1283 | * exactly (at high priority), or fail. | ||||
| 1284 | */ | ||||
| 1285 | if (cf->drmdevcf_primarycf_loc[0] != 0 && da->primary != 0) | ||||
| 1286 | return (10); | ||||
| 1287 | else | ||||
| 1288 | return (0); | ||||
| 1289 | } | ||||
| 1290 | |||||
| 1291 | /* If primary-ness unspecified, it wins. */ | ||||
| 1292 | return (1); | ||||
| 1293 | } | ||||
| 1294 | |||||
| 1295 | void | ||||
| 1296 | drm_attach(struct device *parent, struct device *self, void *aux) | ||||
| 1297 | { | ||||
| 1298 | struct drm_softc *sc = (struct drm_softc *)self; | ||||
| 1299 | struct drm_attach_args *da = aux; | ||||
| 1300 | struct drm_device *dev = da->drm; | ||||
| 1301 | int ret; | ||||
| 1302 | |||||
| 1303 | if (drm_refcnt == 0) { | ||||
| 1304 | drm_linux_init(); | ||||
| 1305 | drm_core_init(); | ||||
| 1306 | } | ||||
| 1307 | drm_refcnt++; | ||||
| 1308 | |||||
| 1309 | if (dev == NULL((void *)0)) { | ||||
| 1310 | dev = malloc(sizeof(struct drm_device), M_DRM145, | ||||
| 1311 | M_WAITOK0x0001 | M_ZERO0x0008); | ||||
| 1312 | sc->sc_allocated = 1; | ||||
| 1313 | } | ||||
| 1314 | |||||
| 1315 | sc->sc_drm = dev; | ||||
| 1316 | |||||
| 1317 | dev->dev = self; | ||||
| 1318 | dev->dev_private = parent; | ||||
| 1319 | dev->driver = da->driver; | ||||
| 1320 | |||||
| 1321 | INIT_LIST_HEAD(&dev->managed.resources); | ||||
| 1322 | mtx_init(&dev->managed.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> managed.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9 ) ? 0x9 : ((0x9)))); } while (0); | ||||
| 1323 | |||||
| 1324 | /* no per-device feature limits by default */ | ||||
| 1325 | dev->driver_features = ~0u; | ||||
| 1326 | |||||
| 1327 | dev->dmat = da->dmat; | ||||
| 1328 | dev->bst = da->bst; | ||||
| 1329 | dev->unique = da->busid; | ||||
| 1330 | |||||
| 1331 | if (da->pa) { | ||||
| 1332 | struct pci_attach_args *pa = da->pa; | ||||
| 1333 | pcireg_t subsys; | ||||
| 1334 | |||||
| 1335 | subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, | ||||
| 1336 | PCI_SUBSYS_ID_REG0x2c); | ||||
| 1337 | |||||
| 1338 | dev->pdev = &dev->_pdev; | ||||
| 1339 | dev->pdev->vendor = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff); | ||||
| 1340 | dev->pdev->device = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff); | ||||
| 1341 | dev->pdev->subsystem_vendor = PCI_VENDOR(subsys)(((subsys) >> 0) & 0xffff); | ||||
| 1342 | dev->pdev->subsystem_device = PCI_PRODUCT(subsys)(((subsys) >> 16) & 0xffff); | ||||
| 1343 | dev->pdev->revision = PCI_REVISION(pa->pa_class)(((pa->pa_class) >> 0) & 0xff); | ||||
| 1344 | |||||
| 1345 | dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function)((pa->pa_device) << 3 | (pa->pa_function)); | ||||
| 1346 | dev->pdev->bus = &dev->pdev->_bus; | ||||
| 1347 | dev->pdev->bus->pc = pa->pa_pc; | ||||
| 1348 | dev->pdev->bus->number = pa->pa_bus; | ||||
| 1349 | dev->pdev->bus->domain_nr = pa->pa_domain; | ||||
| 1350 | dev->pdev->bus->bridgetag = pa->pa_bridgetag; | ||||
| 1351 | |||||
| 1352 | if (pa->pa_bridgetag != NULL((void *)0)) { | ||||
| 1353 | dev->pdev->bus->self = malloc(sizeof(struct pci_dev), | ||||
| 1354 | M_DRM145, M_WAITOK0x0001 | M_ZERO0x0008); | ||||
| 1355 | dev->pdev->bus->self->pc = pa->pa_pc; | ||||
| 1356 | dev->pdev->bus->self->tag = *pa->pa_bridgetag; | ||||
| 1357 | } | ||||
| 1358 | |||||
| 1359 | dev->pdev->pc = pa->pa_pc; | ||||
| 1360 | dev->pdev->tag = pa->pa_tag; | ||||
| 1361 | dev->pdev->pci = (struct pci_softc *)parent->dv_parent; | ||||
| 1362 | |||||
| 1363 | #ifdef CONFIG_ACPI1 | ||||
| 1364 | dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag); | ||||
| 1365 | aml_register_notify(dev->pdev->dev.node, NULL((void *)0), | ||||
| 1366 | drm_linux_acpi_notify, NULL((void *)0), ACPIDEV_NOPOLL0); | ||||
| 1367 | #endif | ||||
| 1368 | } | ||||
| 1369 | |||||
| 1370 | mtx_init(&dev->quiesce_mtx, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> quiesce_mtx), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); | ||||
| 1371 | mtx_init(&dev->event_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> event_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | ||||
| 1372 | rw_init(&dev->struct_mutex, "drmdevlk")_rw_init_flags(&dev->struct_mutex, "drmdevlk", 0, ((void *)0)); | ||||
| 1373 | rw_init(&dev->filelist_mutex, "drmflist")_rw_init_flags(&dev->filelist_mutex, "drmflist", 0, (( void *)0)); | ||||
| 1374 | rw_init(&dev->clientlist_mutex, "drmclist")_rw_init_flags(&dev->clientlist_mutex, "drmclist", 0, ( (void *)0)); | ||||
| 1375 | rw_init(&dev->master_mutex, "drmmast")_rw_init_flags(&dev->master_mutex, "drmmast", 0, ((void *)0)); | ||||
| 1376 | |||||
| 1377 | ret = drmm_add_action(dev, drm_dev_init_release, NULL((void *)0)); | ||||
| 1378 | if (ret) | ||||
| 1379 | goto error; | ||||
| 1380 | |||||
| 1381 | SPLAY_INIT(&dev->files)do { (&dev->files)->sph_root = ((void *)0); } while (0); | ||||
| 1382 | INIT_LIST_HEAD(&dev->filelist_internal); | ||||
| 1383 | INIT_LIST_HEAD(&dev->clientlist); | ||||
| 1384 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||||
| 1385 | |||||
| 1386 | if (drm_core_check_feature(dev, DRIVER_RENDER)) { | ||||
| 1387 | ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); | ||||
| 1388 | if (ret) | ||||
| 1389 | goto error; | ||||
| 1390 | } | ||||
| 1391 | |||||
| 1392 | ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); | ||||
| 1393 | if (ret) | ||||
| 1394 | goto error; | ||||
| 1395 | |||||
| 1396 | if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { | ||||
| 1397 | #if IS_ENABLED(CONFIG_AGP)1 | ||||
| 1398 | if (da->is_agp) | ||||
| 1399 | dev->agp = drm_agp_init(); | ||||
| 1400 | #endif | ||||
| 1401 | if (dev->agp != NULL((void *)0)) { | ||||
| 1402 | if (drm_mtrr_add(dev->agp->info.ai_aperture_base, | ||||
| 1403 | dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1)) == 0) | ||||
| 1404 | dev->agp->mtrr = 1; | ||||
| 1405 | } | ||||
| 1406 | } | ||||
| 1407 | |||||
| 1408 | if (dev->driver->gem_size > 0) { | ||||
| 1409 | KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object))((dev->driver->gem_size >= sizeof(struct drm_gem_object )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 1409, "dev->driver->gem_size >= sizeof(struct drm_gem_object)" )); | ||||
| 1410 | /* XXX unique name */ | ||||
| 1411 | pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE0x0, 0, | ||||
| 1412 | "drmobjpl", NULL((void *)0)); | ||||
| 1413 | } | ||||
| 1414 | |||||
| 1415 | if (drm_core_check_feature(dev, DRIVER_GEM)) { | ||||
| 1416 | ret = drm_gem_init(dev); | ||||
| 1417 | if (ret) { | ||||
| 1418 | DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n" ); | ||||
| 1419 | goto error; | ||||
| 1420 | } | ||||
| 1421 | } | ||||
| 1422 | |||||
| 1423 | drmm_add_final_kfree(dev, dev); | ||||
| 1424 | |||||
| 1425 | printf("\n"); | ||||
| 1426 | return; | ||||
| 1427 | |||||
| 1428 | error: | ||||
| 1429 | drm_managed_release(dev); | ||||
| 1430 | dev->dev_private = NULL((void *)0); | ||||
| 1431 | } | ||||
| 1432 | |||||
| 1433 | int | ||||
| 1434 | drm_detach(struct device *self, int flags) | ||||
| 1435 | { | ||||
| 1436 | struct drm_softc *sc = (struct drm_softc *)self; | ||||
| 1437 | struct drm_device *dev = sc->sc_drm; | ||||
| 1438 | |||||
| 1439 | drm_refcnt--; | ||||
| 1440 | if (drm_refcnt == 0) { | ||||
| 1441 | drm_core_exit(); | ||||
| 1442 | drm_linux_exit(); | ||||
| 1443 | } | ||||
| 1444 | |||||
| 1445 | drm_lastclose(dev); | ||||
| 1446 | |||||
| 1447 | if (drm_core_check_feature(dev, DRIVER_GEM)) { | ||||
| 1448 | if (dev->driver->gem_size > 0) | ||||
| 1449 | pool_destroy(&dev->objpl); | ||||
| 1450 | } | ||||
| 1451 | |||||
| 1452 | if (dev->agp && dev->agp->mtrr) { | ||||
| 1453 | int retcode; | ||||
| 1454 | |||||
| 1455 | retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, | ||||
| 1456 | dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1)); | ||||
| 1457 | DRM_DEBUG("mtrr_del = %d", retcode)__drm_dbg(DRM_UT_CORE, "mtrr_del = %d", retcode); | ||||
| 1458 | } | ||||
| 1459 | |||||
| 1460 | free(dev->agp, M_DRM145, 0); | ||||
| 1461 | if (dev->pdev && dev->pdev->bus) | ||||
| 1462 | free(dev->pdev->bus->self, M_DRM145, sizeof(struct pci_dev)); | ||||
| 1463 | |||||
| 1464 | if (sc->sc_allocated) | ||||
| 1465 | free(dev, M_DRM145, sizeof(struct drm_device)); | ||||
| 1466 | |||||
| 1467 | return 0; | ||||
| 1468 | } | ||||
| 1469 | |||||
| 1470 | void | ||||
| 1471 | drm_quiesce(struct drm_device *dev) | ||||
| 1472 | { | ||||
| 1473 | mtx_enter(&dev->quiesce_mtx); | ||||
| 1474 | dev->quiesce = 1; | ||||
| 1475 | while (dev->quiesce_count > 0) { | ||||
| 1476 | msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx, | ||||
| 1477 | PZERO22, "drmqui", INFSLP0xffffffffffffffffULL); | ||||
| 1478 | } | ||||
| 1479 | mtx_leave(&dev->quiesce_mtx); | ||||
| 1480 | } | ||||
| 1481 | |||||
| 1482 | void | ||||
| 1483 | drm_wakeup(struct drm_device *dev) | ||||
| 1484 | { | ||||
| 1485 | mtx_enter(&dev->quiesce_mtx); | ||||
| 1486 | dev->quiesce = 0; | ||||
| 1487 | wakeup(&dev->quiesce); | ||||
| 1488 | mtx_leave(&dev->quiesce_mtx); | ||||
| 1489 | } | ||||
| 1490 | |||||
| 1491 | int | ||||
| 1492 | drm_activate(struct device *self, int act) | ||||
| 1493 | { | ||||
| 1494 | struct drm_softc *sc = (struct drm_softc *)self; | ||||
| 1495 | struct drm_device *dev = sc->sc_drm; | ||||
| 1496 | |||||
| 1497 | switch (act) { | ||||
| 1498 | case DVACT_QUIESCE2: | ||||
| 1499 | drm_quiesce(dev); | ||||
| 1500 | break; | ||||
| 1501 | case DVACT_WAKEUP5: | ||||
| 1502 | drm_wakeup(dev); | ||||
| 1503 | break; | ||||
| 1504 | } | ||||
| 1505 | |||||
| 1506 | return (0); | ||||
| 1507 | } | ||||
| 1508 | |||||
| 1509 | struct cfattach drm_ca = { | ||||
| 1510 | sizeof(struct drm_softc), drm_probe, drm_attach, | ||||
| 1511 | drm_detach, drm_activate | ||||
| 1512 | }; | ||||
| 1513 | |||||
| 1514 | struct cfdriver drm_cd = { | ||||
| 1515 | 0, "drm", DV_DULL | ||||
| 1516 | }; | ||||
| 1517 | |||||
| 1518 | const struct pci_device_id * | ||||
| 1519 | drm_find_description(int vendor, int device, const struct pci_device_id *idlist) | ||||
| 1520 | { | ||||
| 1521 | int i = 0; | ||||
| 1522 | |||||
| 1523 | for (i = 0; idlist[i].vendor != 0; i++) { | ||||
| 1524 | if ((idlist[i].vendor == vendor) && | ||||
| 1525 | (idlist[i].device == device) && | ||||
| 1526 | (idlist[i].subvendor == PCI_ANY_ID(uint16_t) (~0U)) && | ||||
| 1527 | (idlist[i].subdevice == PCI_ANY_ID(uint16_t) (~0U))) | ||||
| 1528 | return &idlist[i]; | ||||
| 1529 | } | ||||
| 1530 | return NULL((void *)0); | ||||
| 1531 | } | ||||
| 1532 | |||||
| 1533 | int | ||||
| 1534 | drm_file_cmp(struct drm_file *f1, struct drm_file *f2) | ||||
| 1535 | { | ||||
| 1536 | return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor); | ||||
| 1537 | } | ||||
| 1538 | |||||
| 1539 | SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp)struct drm_file * drm_file_tree_SPLAY_INSERT(struct drm_file_tree *head, struct drm_file *elm) { if (((head)->sph_root == ( (void *)0))) { (elm)->link.spe_left = (elm)->link.spe_right = ((void *)0); } else { int __comp; drm_file_tree_SPLAY(head , elm); __comp = (drm_file_cmp)(elm, (head)->sph_root); if (__comp < 0) { (elm)->link.spe_left = ((head)->sph_root )->link.spe_left; (elm)->link.spe_right = (head)->sph_root ; ((head)->sph_root)->link.spe_left = ((void *)0); } else if (__comp > 0) { (elm)->link.spe_right = ((head)-> sph_root)->link.spe_right; (elm)->link.spe_left = (head )->sph_root; ((head)->sph_root)->link.spe_right = (( void *)0); } else return ((head)->sph_root); } (head)-> sph_root = (elm); return (((void *)0)); } struct drm_file * drm_file_tree_SPLAY_REMOVE (struct drm_file_tree *head, struct drm_file *elm) { struct drm_file *__tmp; if (((head)->sph_root == ((void *)0))) return ((( void *)0)); drm_file_tree_SPLAY(head, elm); if ((drm_file_cmp )(elm, (head)->sph_root) == 0) { if (((head)->sph_root) ->link.spe_left == ((void *)0)) { (head)->sph_root = (( head)->sph_root)->link.spe_right; } else { __tmp = ((head )->sph_root)->link.spe_right; (head)->sph_root = ((head )->sph_root)->link.spe_left; drm_file_tree_SPLAY(head, elm ); ((head)->sph_root)->link.spe_right = __tmp; } return (elm); } return (((void *)0)); } void drm_file_tree_SPLAY(struct drm_file_tree *head, struct drm_file *elm) { struct drm_file __node, *__left, *__right, *__tmp; int __comp; (&__node) ->link.spe_left = (&__node)->link.spe_right = ((void *)0); __left = __right = &__node; while ((__comp = (drm_file_cmp )(elm, (head)->sph_root))) { if (__comp < 0) { __tmp = ( (head)->sph_root)->link.spe_left; if (__tmp == ((void * )0)) break; if ((drm_file_cmp)(elm, __tmp) < 0){ do { ((head )->sph_root)->link.spe_left = (__tmp)->link.spe_right ; (__tmp)->link.spe_right = (head)->sph_root; (head)-> sph_root = __tmp; } while (0); if (((head)->sph_root)-> link.spe_left == ((void *)0)) break; } do { (__right)->link .spe_left = (head)->sph_root; __right = (head)->sph_root ; (head)->sph_root = ((head)->sph_root)->link.spe_left ; } while (0); } else if (__comp > 0) { __tmp = ((head)-> sph_root)->link.spe_right; if (__tmp == ((void *)0)) break ; if ((drm_file_cmp)(elm, __tmp) > 0){ do { ((head)->sph_root )->link.spe_right = (__tmp)->link.spe_left; (__tmp)-> link.spe_left = (head)->sph_root; (head)->sph_root = __tmp ; } while (0); if (((head)->sph_root)->link.spe_right == ((void *)0)) break; } do { (__left)->link.spe_right = (head )->sph_root; __left = (head)->sph_root; (head)->sph_root = ((head)->sph_root)->link.spe_right; } while (0); } } do { (__left)->link.spe_right = ((head)->sph_root)-> link.spe_left; (__right)->link.spe_left = ((head)->sph_root )->link.spe_right; ((head)->sph_root)->link.spe_left = (&__node)->link.spe_right; ((head)->sph_root)-> link.spe_right = (&__node)->link.spe_left; } while (0) ; } void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *head , int __comp) { struct drm_file __node, *__left, *__right, *__tmp ; (&__node)->link.spe_left = (&__node)->link.spe_right = ((void *)0); __left = __right = &__node; while (1) { if (__comp < 0) { __tmp = ((head)->sph_root)->link.spe_left ; if (__tmp == ((void *)0)) break; if (__comp < 0){ do { ( (head)->sph_root)->link.spe_left = (__tmp)->link.spe_right ; (__tmp)->link.spe_right = (head)->sph_root; (head)-> sph_root = __tmp; } while (0); if (((head)->sph_root)-> link.spe_left == ((void *)0)) break; } do { (__right)->link .spe_left = (head)->sph_root; __right = (head)->sph_root ; (head)->sph_root = ((head)->sph_root)->link.spe_left ; } while (0); } else if (__comp > 0) { __tmp = ((head)-> sph_root)->link.spe_right; if (__tmp == ((void *)0)) break ; if (__comp > 0) { do { ((head)->sph_root)->link.spe_right = (__tmp)->link.spe_left; (__tmp)->link.spe_left = (head )->sph_root; (head)->sph_root = __tmp; } while (0); if ( ((head)->sph_root)->link.spe_right == ((void *)0)) break ; } do { (__left)->link.spe_right = (head)->sph_root; __left = (head)->sph_root; (head)->sph_root = ((head)->sph_root )->link.spe_right; } while (0); } } do { (__left)->link .spe_right = ((head)->sph_root)->link.spe_left; (__right )->link.spe_left = ((head)->sph_root)->link.spe_right ; ((head)->sph_root)->link.spe_left = (&__node)-> link.spe_right; ((head)->sph_root)->link.spe_right = (& __node)->link.spe_left; } while (0); }; | ||||
| 1540 | |||||
| 1541 | struct drm_file * | ||||
| 1542 | drm_find_file_by_minor(struct drm_device *dev, int minor) | ||||
| 1543 | { | ||||
| 1544 | struct drm_file key; | ||||
| 1545 | |||||
| 1546 | key.fminor = minor; | ||||
| 1547 | return (SPLAY_FIND(drm_file_tree, &dev->files, &key)drm_file_tree_SPLAY_FIND(&dev->files, &key)); | ||||
| 1548 | } | ||||
| 1549 | |||||
| 1550 | struct drm_device * | ||||
| 1551 | drm_get_device_from_kdev(dev_t kdev) | ||||
| 1552 | { | ||||
| 1553 | int unit = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)) & ((1 << CLONE_SHIFT8) - 1); | ||||
| 1554 | /* control */ | ||||
| 1555 | if (unit >= 64 && unit < 128) | ||||
| 1556 | unit -= 64; | ||||
| 1557 | /* render */ | ||||
| 1558 | if (unit >= 128) | ||||
| 1559 | unit -= 128; | ||||
| 1560 | struct drm_softc *sc; | ||||
| 1561 | |||||
| 1562 | if (unit < drm_cd.cd_ndevs) { | ||||
| 1563 | sc = (struct drm_softc *)drm_cd.cd_devs[unit]; | ||||
| 1564 | if (sc) | ||||
| 1565 | return sc->sc_drm; | ||||
| 1566 | } | ||||
| 1567 | |||||
| 1568 | return NULL((void *)0); | ||||
| 1569 | } | ||||
| 1570 | |||||
| 1571 | void | ||||
| 1572 | filt_drmdetach(struct knote *kn) | ||||
| 1573 | { | ||||
| 1574 | struct drm_device *dev = kn->kn_hook; | ||||
| 1575 | int s; | ||||
| 1576 | |||||
| 1577 | s = spltty()splraise(0x9); | ||||
| 1578 | klist_remove_locked(&dev->note, kn); | ||||
| 1579 | splx(s)spllower(s); | ||||
| 1580 | } | ||||
| 1581 | |||||
| 1582 | int | ||||
| 1583 | filt_drmkms(struct knote *kn, long hint) | ||||
| 1584 | { | ||||
| 1585 | if (kn->kn_sfflags & hint) | ||||
| 1586 | kn->kn_fflagskn_kevent.fflags |= hint; | ||||
| 1587 | return (kn->kn_fflagskn_kevent.fflags != 0); | ||||
| 1588 | } | ||||
| 1589 | |||||
| 1590 | void | ||||
| 1591 | filt_drmreaddetach(struct knote *kn) | ||||
| 1592 | { | ||||
| 1593 | struct drm_file *file_priv = kn->kn_hook; | ||||
| 1594 | int s; | ||||
| 1595 | |||||
| 1596 | s = spltty()splraise(0x9); | ||||
| 1597 | klist_remove_locked(&file_priv->rsel.si_note, kn); | ||||
| 1598 | splx(s)spllower(s); | ||||
| 1599 | } | ||||
| 1600 | |||||
| 1601 | int | ||||
| 1602 | filt_drmread(struct knote *kn, long hint) | ||||
| 1603 | { | ||||
| 1604 | struct drm_file *file_priv = kn->kn_hook; | ||||
| 1605 | int val = 0; | ||||
| 1606 | |||||
| 1607 | if ((hint & NOTE_SUBMIT0x01000000) == 0) | ||||
| 1608 | mtx_enter(&file_priv->minor->dev->event_lock); | ||||
| 1609 | val = !list_empty(&file_priv->event_list); | ||||
| 1610 | if ((hint & NOTE_SUBMIT0x01000000) == 0) | ||||
| 1611 | mtx_leave(&file_priv->minor->dev->event_lock); | ||||
| 1612 | return (val); | ||||
| 1613 | } | ||||
| 1614 | |||||
| 1615 | const struct filterops drm_filtops = { | ||||
| 1616 | .f_flags = FILTEROP_ISFD0x00000001, | ||||
| 1617 | .f_attach = NULL((void *)0), | ||||
| 1618 | .f_detach = filt_drmdetach, | ||||
| 1619 | .f_event = filt_drmkms, | ||||
| 1620 | }; | ||||
| 1621 | |||||
| 1622 | const struct filterops drmread_filtops = { | ||||
| 1623 | .f_flags = FILTEROP_ISFD0x00000001, | ||||
| 1624 | .f_attach = NULL((void *)0), | ||||
| 1625 | .f_detach = filt_drmreaddetach, | ||||
| 1626 | .f_event = filt_drmread, | ||||
| 1627 | }; | ||||
| 1628 | |||||
| 1629 | int | ||||
| 1630 | drmkqfilter(dev_t kdev, struct knote *kn) | ||||
| 1631 | { | ||||
| 1632 | struct drm_device *dev = NULL((void *)0); | ||||
| 1633 | struct drm_file *file_priv = NULL((void *)0); | ||||
| 1634 | int s; | ||||
| 1635 | |||||
| 1636 | dev = drm_get_device_from_kdev(kdev); | ||||
| 1637 | if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0)) | ||||
| 1638 | return (ENXIO6); | ||||
| 1639 | |||||
| 1640 | switch (kn->kn_filterkn_kevent.filter) { | ||||
| 1641 | case EVFILT_READ(-1): | ||||
| 1642 | mutex_lock(&dev->struct_mutex)rw_enter_write(&dev->struct_mutex); | ||||
| 1643 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); | ||||
| 1644 | mutex_unlock(&dev->struct_mutex)rw_exit_write(&dev->struct_mutex); | ||||
| 1645 | if (file_priv == NULL((void *)0)) | ||||
| 1646 | return (ENXIO6); | ||||
| 1647 | |||||
| 1648 | kn->kn_fop = &drmread_filtops; | ||||
| 1649 | kn->kn_hook = file_priv; | ||||
| 1650 | |||||
| 1651 | s = spltty()splraise(0x9); | ||||
| 1652 | klist_insert_locked(&file_priv->rsel.si_note, kn); | ||||
| 1653 | splx(s)spllower(s); | ||||
| 1654 | break; | ||||
| 1655 | case EVFILT_DEVICE(-8): | ||||
| 1656 | kn->kn_fop = &drm_filtops; | ||||
| 1657 | kn->kn_hook = dev; | ||||
| 1658 | |||||
| 1659 | s = spltty()splraise(0x9); | ||||
| 1660 | klist_insert_locked(&dev->note, kn); | ||||
| 1661 | splx(s)spllower(s); | ||||
| 1662 | break; | ||||
| 1663 | default: | ||||
| 1664 | return (EINVAL22); | ||||
| 1665 | } | ||||
| 1666 | |||||
| 1667 | return (0); | ||||
| 1668 | } | ||||
| 1669 | |||||
| 1670 | int | ||||
| 1671 | drmopen(dev_t kdev, int flags, int fmt, struct proc *p) | ||||
| 1672 | { | ||||
| 1673 | struct drm_device *dev = NULL((void *)0); | ||||
| 1674 | struct drm_file *file_priv; | ||||
| 1675 | struct drm_minor *dm; | ||||
| 1676 | int ret = 0; | ||||
| 1677 | int dminor, realminor, minor_type; | ||||
| 1678 | int need_setup = 0; | ||||
| 1679 | |||||
| 1680 | dev = drm_get_device_from_kdev(kdev); | ||||
| 1681 | if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0)) | ||||
| 1682 | return (ENXIO6); | ||||
| 1683 | |||||
| 1684 | DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))__drm_dbg(DRM_UT_CORE, "open_count = %d\n", ({ typeof(*(& dev->open_count)) __tmp = *(volatile typeof(*(&dev-> open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer (); __tmp; })); | ||||
| 1685 | |||||
| 1686 | if (flags & O_EXCL0x0800) | ||||
| 1687 | return (EBUSY16); /* No exclusive opens */ | ||||
| 1688 | |||||
| 1689 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1690 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); | ||||
| 1691 | |||||
| 1692 | if (!atomic_fetch_inc(&dev->open_count)__sync_fetch_and_add(&dev->open_count, 1)) | ||||
| 1693 | need_setup = 1; | ||||
| 1694 | |||||
| 1695 | dminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); | ||||
| 1696 | realminor = dminor & ((1 << CLONE_SHIFT8) - 1); | ||||
| 1697 | if (realminor < 64) | ||||
| 1698 | minor_type = DRM_MINOR_PRIMARY; | ||||
| 1699 | else if (realminor >= 64 && realminor < 128) | ||||
| 1700 | minor_type = DRM_MINOR_CONTROL; | ||||
| 1701 | else | ||||
| 1702 | minor_type = DRM_MINOR_RENDER; | ||||
| 1703 | |||||
| 1704 | dm = *drm_minor_get_slot(dev, minor_type); | ||||
| 1705 | dm->index = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); | ||||
| 1706 | |||||
| 1707 | file_priv = drm_file_alloc(dm); | ||||
| 1708 | if (IS_ERR(file_priv)) { | ||||
| 1709 | ret = ENOMEM12; | ||||
| 1710 | goto err; | ||||
| 1711 | } | ||||
| 1712 | |||||
| 1713 | /* first opener automatically becomes master */ | ||||
| 1714 | if (drm_is_primary_client(file_priv)) { | ||||
| 1715 | ret = drm_master_open(file_priv); | ||||
| 1716 | if (ret != 0) | ||||
| 1717 | goto out_file_free; | ||||
| 1718 | } | ||||
| 1719 | |||||
| 1720 | file_priv->filp = (void *)file_priv; | ||||
| 1721 | file_priv->fminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); | ||||
| 1722 | |||||
| 1723 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); | ||||
| 1724 | SPLAY_INSERT(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_INSERT(&dev->files, file_priv); | ||||
| 1725 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); | ||||
| 1726 | |||||
| 1727 | if (need_setup) { | ||||
| 1728 | ret = drm_legacy_setup(dev); | ||||
| 1729 | if (ret) | ||||
| 1730 | goto out_file_free; | ||||
| 1731 | } | ||||
| 1732 | |||||
| 1733 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1734 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); | ||||
| 1735 | |||||
| 1736 | return 0; | ||||
| 1737 | |||||
| 1738 | out_file_free: | ||||
| 1739 | drm_file_free(file_priv); | ||||
| 1740 | err: | ||||
| 1741 | atomic_dec(&dev->open_count)__sync_fetch_and_sub(&dev->open_count, 1); | ||||
| 1742 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1743 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); | ||||
| 1744 | return (ret); | ||||
| 1745 | } | ||||
| 1746 | |||||
| 1747 | int | ||||
| 1748 | drmclose(dev_t kdev, int flags, int fmt, struct proc *p) | ||||
| 1749 | { | ||||
| 1750 | struct drm_device *dev = drm_get_device_from_kdev(kdev); | ||||
| 1751 | struct drm_file *file_priv; | ||||
| 1752 | int retcode = 0; | ||||
| 1753 | |||||
| 1754 | if (dev == NULL((void *)0)) | ||||
| 1755 | return (ENXIO6); | ||||
| 1756 | |||||
| 1757 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1758 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); | ||||
| 1759 | |||||
| 1760 | DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))__drm_dbg(DRM_UT_CORE, "open_count = %d\n", ({ typeof(*(& dev->open_count)) __tmp = *(volatile typeof(*(&dev-> open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer (); __tmp; })); | ||||
| 1761 | |||||
| 1762 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); | ||||
| 1763 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); | ||||
| 1764 | if (file_priv == NULL((void *)0)) { | ||||
| 1765 | DRM_ERROR("can't find authenticator\n")__drm_err("can't find authenticator\n"); | ||||
| 1766 | retcode = EINVAL22; | ||||
| 1767 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); | ||||
| 1768 | goto done; | ||||
| 1769 | } | ||||
| 1770 | |||||
| 1771 | SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_REMOVE(&dev->files, file_priv); | ||||
| 1772 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); | ||||
| 1773 | drm_file_free(file_priv); | ||||
| 1774 | done: | ||||
| 1775 | if (atomic_dec_and_test(&dev->open_count)(__sync_sub_and_fetch((&dev->open_count), 1) == 0)) | ||||
| 1776 | drm_lastclose(dev); | ||||
| 1777 | |||||
| 1778 | if (drm_dev_needs_global_mutex(dev)) | ||||
| 1779 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); | ||||
| 1780 | |||||
| 1781 | return (retcode); | ||||
| 1782 | } | ||||
| 1783 | |||||
| 1784 | int | ||||
| 1785 | drmread(dev_t kdev, struct uio *uio, int ioflag) | ||||
| 1786 | { | ||||
| 1787 | struct drm_device *dev = drm_get_device_from_kdev(kdev); | ||||
| 1788 | struct drm_file *file_priv; | ||||
| 1789 | struct drm_pending_event *ev; | ||||
| 1790 | int error = 0; | ||||
| 1791 | |||||
| 1792 | if (dev == NULL((void *)0)) | ||||
| 1793 | return (ENXIO6); | ||||
| 1794 | |||||
| 1795 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); | ||||
| 1796 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); | ||||
| 1797 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); | ||||
| 1798 | if (file_priv == NULL((void *)0)) | ||||
| 1799 | return (ENXIO6); | ||||
| 1800 | |||||
| 1801 | /* | ||||
| 1802 | * The semantics are a little weird here. We will wait until we | ||||
| 1803 | * have events to process, but as soon as we have events we will | ||||
| 1804 | * only deliver as many as we have. | ||||
| 1805 | * Note that events are atomic, if the read buffer will not fit in | ||||
| 1806 | * a whole event, we won't read any of it out. | ||||
| 1807 | */ | ||||
| 1808 | mtx_enter(&dev->event_lock); | ||||
| 1809 | while (error == 0 && list_empty(&file_priv->event_list)) { | ||||
| 1810 | if (ioflag & IO_NDELAY0x10) { | ||||
| 1811 | mtx_leave(&dev->event_lock); | ||||
| 1812 | return (EAGAIN35); | ||||
| 1813 | } | ||||
| 1814 | error = msleep_nsec(&file_priv->event_wait, &dev->event_lock, | ||||
| 1815 | PWAIT32 | PCATCH0x100, "drmread", INFSLP0xffffffffffffffffULL); | ||||
| 1816 | } | ||||
| 1817 | if (error) { | ||||
| 1818 | mtx_leave(&dev->event_lock); | ||||
| 1819 | return (error); | ||||
| 1820 | } | ||||
| 1821 | while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) { | ||||
| 1822 | MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p held in %s" , (&dev->event_lock), __func__); } while (0); | ||||
| 1823 | /* XXX we always destroy the event on error. */ | ||||
| 1824 | error = uiomove(ev->event, ev->event->length, uio); | ||||
| 1825 | kfree(ev); | ||||
| 1826 | if (error) | ||||
| 1827 | break; | ||||
| 1828 | mtx_enter(&dev->event_lock); | ||||
| 1829 | } | ||||
| 1830 | MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p held in %s" , (&dev->event_lock), __func__); } while (0); | ||||
| 1831 | |||||
| 1832 | return (error); | ||||
| 1833 | } | ||||
| 1834 | |||||
| 1835 | /* | ||||
| 1836 | * Deqeue an event from the file priv in question. returning 1 if an | ||||
| 1837 | * event was found. We take the resid from the read as a parameter because | ||||
| 1838 | * we will only dequeue and event if the read buffer has space to fit the | ||||
| 1839 | * entire thing. | ||||
| 1840 | * | ||||
| 1841 | * We are called locked, but we will *unlock* the queue on return so that | ||||
| 1842 | * we may sleep to copyout the event. | ||||
| 1843 | */ | ||||
| 1844 | int | ||||
| 1845 | drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv, | ||||
| 1846 | size_t resid, struct drm_pending_event **out) | ||||
| 1847 | { | ||||
| 1848 | struct drm_pending_event *e = NULL((void *)0); | ||||
| 1849 | int gotone = 0; | ||||
| 1850 | |||||
| 1851 | MUTEX_ASSERT_LOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner != ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p not held in %s" , (&dev->event_lock), __func__); } while (0); | ||||
| 1852 | |||||
| 1853 | *out = NULL((void *)0); | ||||
| 1854 | if (list_empty(&file_priv->event_list)) | ||||
| 1855 | goto out; | ||||
| 1856 | e = list_first_entry(&file_priv->event_list,({ const __typeof( ((struct drm_pending_event *)0)->link ) *__mptr = ((&file_priv->event_list)->next); (struct drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct drm_pending_event, link) );}) | ||||
| 1857 | struct drm_pending_event, link)({ const __typeof( ((struct drm_pending_event *)0)->link ) *__mptr = ((&file_priv->event_list)->next); (struct drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct drm_pending_event, link) );}); | ||||
| 1858 | if (e->event->length > resid) | ||||
| 1859 | goto out; | ||||
| 1860 | |||||
| 1861 | file_priv->event_space += e->event->length; | ||||
| 1862 | list_del(&e->link); | ||||
| 1863 | *out = e; | ||||
| 1864 | gotone = 1; | ||||
| 1865 | |||||
| 1866 | out: | ||||
| 1867 | mtx_leave(&dev->event_lock); | ||||
| 1868 | |||||
| 1869 | return (gotone); | ||||
| 1870 | } | ||||
| 1871 | |||||
| 1872 | int | ||||
| 1873 | drmpoll(dev_t kdev, int events, struct proc *p) | ||||
| 1874 | { | ||||
| 1875 | struct drm_device *dev = drm_get_device_from_kdev(kdev); | ||||
| 1876 | struct drm_file *file_priv; | ||||
| 1877 | int revents = 0; | ||||
| 1878 | |||||
| 1879 | if (dev == NULL((void *)0)) | ||||
| 1880 | return (POLLERR0x0008); | ||||
| 1881 | |||||
| 1882 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); | ||||
| 1883 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); | ||||
| 1884 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); | ||||
| 1885 | if (file_priv == NULL((void *)0)) | ||||
| 1886 | return (POLLERR0x0008); | ||||
| 1887 | |||||
| 1888 | mtx_enter(&dev->event_lock); | ||||
| 1889 | if (events & (POLLIN0x0001 | POLLRDNORM0x0040)) { | ||||
| 1890 | if (!list_empty(&file_priv->event_list)) | ||||
| 1891 | revents |= events & (POLLIN0x0001 | POLLRDNORM0x0040); | ||||
| 1892 | else | ||||
| 1893 | selrecord(p, &file_priv->rsel); | ||||
| 1894 | } | ||||
| 1895 | mtx_leave(&dev->event_lock); | ||||
| 1896 | |||||
| 1897 | return (revents); | ||||
| 1898 | } | ||||
| 1899 | |||||
| 1900 | paddr_t | ||||
| 1901 | drmmmap(dev_t kdev, off_t offset, int prot) | ||||
| 1902 | { | ||||
| 1903 | return -1; | ||||
| 1904 | } | ||||
| 1905 | |||||
| 1906 | struct drm_dmamem * | ||||
| 1907 | drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment, | ||||
| 1908 | int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags) | ||||
| 1909 | { | ||||
| 1910 | struct drm_dmamem *mem; | ||||
| 1911 | size_t strsize; | ||||
| 1912 | /* | ||||
| 1913 | * segs is the last member of the struct since we modify the size | ||||
| 1914 | * to allow extra segments if more than one are allowed. | ||||
| 1915 | */ | ||||
| 1916 | strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1)); | ||||
| 1917 | mem = malloc(strsize, M_DRM145, M_NOWAIT0x0002 | M_ZERO0x0008); | ||||
| 1918 | if (mem == NULL((void *)0)) | ||||
| 1919 | return (NULL((void *)0)); | ||||
| 1920 | |||||
| 1921 | mem->size = size; | ||||
| 1922 | |||||
| 1923 | if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz ), (0), (0x0001 | 0x0002), (&mem->map)) | ||||
| 1924 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map)(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz ), (0), (0x0001 | 0x0002), (&mem->map)) != 0) | ||||
| 1925 | goto strfree; | ||||
| 1926 | |||||
| 1927 | if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0), (mem->segs), (nsegments), (&mem->nsegs), (0x0001 | 0x1000)) | ||||
| 1928 | &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0), (mem->segs), (nsegments), (&mem->nsegs), (0x0001 | 0x1000)) != 0) | ||||
| 1929 | goto destroy; | ||||
| 1930 | |||||
| 1931 | if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs ), (size), (&mem->kva), (0x0001 | mapflags)) | ||||
| 1932 | &mem->kva, BUS_DMA_NOWAIT | mapflags)(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs ), (size), (&mem->kva), (0x0001 | mapflags)) != 0) | ||||
| 1933 | goto free; | ||||
| 1934 | |||||
| 1935 | if (bus_dmamap_load(dmat, mem->map, mem->kva, size,(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva ), (size), (((void *)0)), (0x0001 | loadflags)) | ||||
| 1936 | NULL, BUS_DMA_NOWAIT | loadflags)(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva ), (size), (((void *)0)), (0x0001 | loadflags)) != 0) | ||||
| 1937 | goto unmap; | ||||
| 1938 | |||||
| 1939 | return (mem); | ||||
| 1940 | |||||
| 1941 | unmap: | ||||
| 1942 | bus_dmamem_unmap(dmat, mem->kva, size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (size)); | ||||
| 1943 | free: | ||||
| 1944 | bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs )); | ||||
| 1945 | destroy: | ||||
| 1946 | bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map)); | ||||
| 1947 | strfree: | ||||
| 1948 | free(mem, M_DRM145, 0); | ||||
| 1949 | |||||
| 1950 | return (NULL((void *)0)); | ||||
| 1951 | } | ||||
| 1952 | |||||
| 1953 | void | ||||
| 1954 | drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem) | ||||
| 1955 | { | ||||
| 1956 | if (mem == NULL((void *)0)) | ||||
| 1957 | return; | ||||
| 1958 | |||||
| 1959 | bus_dmamap_unload(dmat, mem->map)(*(dmat)->_dmamap_unload)((dmat), (mem->map)); | ||||
| 1960 | bus_dmamem_unmap(dmat, mem->kva, mem->size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (mem->size )); | ||||
| 1961 | bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs )); | ||||
| 1962 | bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map)); | ||||
| 1963 | free(mem, M_DRM145, 0); | ||||
| 1964 | } | ||||
| 1965 | |||||
| 1966 | struct drm_dma_handle * | ||||
| 1967 | drm_pci_alloc(struct drm_device *dev, size_t size, size_t align) | ||||
| 1968 | { | ||||
| 1969 | struct drm_dma_handle *dmah; | ||||
| 1970 | |||||
| 1971 | dmah = malloc(sizeof(*dmah), M_DRM145, M_WAITOK0x0001); | ||||
| 1972 | dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size, | ||||
| 1973 | BUS_DMA_NOCACHE0x0800, 0); | ||||
| 1974 | if (dmah->mem == NULL((void *)0)) { | ||||
| 1975 | free(dmah, M_DRM145, sizeof(*dmah)); | ||||
| 1976 | return NULL((void *)0); | ||||
| 1977 | } | ||||
| 1978 | dmah->busaddr = dmah->mem->segs[0].ds_addr; | ||||
| 1979 | dmah->size = dmah->mem->size; | ||||
| 1980 | dmah->vaddr = dmah->mem->kva; | ||||
| 1981 | return (dmah); | ||||
| 1982 | } | ||||
| 1983 | |||||
| 1984 | void | ||||
| 1985 | drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah) | ||||
| 1986 | { | ||||
| 1987 | if (dmah == NULL((void *)0)) | ||||
| 1988 | return; | ||||
| 1989 | |||||
| 1990 | drm_dmamem_free(dev->dmat, dmah->mem); | ||||
| 1991 | free(dmah, M_DRM145, sizeof(*dmah)); | ||||
| 1992 | } | ||||
| 1993 | |||||
| 1994 | /* | ||||
| 1995 | * Compute order. Can be made faster. | ||||
| 1996 | */ | ||||
| 1997 | int | ||||
| 1998 | drm_order(unsigned long size) | ||||
| 1999 | { | ||||
| 2000 | int order; | ||||
| 2001 | unsigned long tmp; | ||||
| 2002 | |||||
| 2003 | for (order = 0, tmp = size; tmp >>= 1; ++order) | ||||
| 2004 | ; | ||||
| 2005 | |||||
| 2006 | if (size & ~(1 << order)) | ||||
| 2007 | ++order; | ||||
| 2008 | |||||
| 2009 | return order; | ||||
| 2010 | } | ||||
| 2011 | |||||
| 2012 | int | ||||
| 2013 | drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||||
| 2014 | { | ||||
| 2015 | struct drm_pciinfo *info = data; | ||||
| 2016 | |||||
| 2017 | if (dev->pdev == NULL((void *)0)) | ||||
| 2018 | return -ENOTTY25; | ||||
| 2019 | |||||
| 2020 | info->domain = dev->pdev->bus->domain_nr; | ||||
| 2021 | info->bus = dev->pdev->bus->number; | ||||
| 2022 | info->dev = PCI_SLOT(dev->pdev->devfn)((dev->pdev->devfn) >> 3); | ||||
| 2023 | info->func = PCI_FUNC(dev->pdev->devfn)((dev->pdev->devfn) & 0x7); | ||||
| 2024 | info->vendor_id = dev->pdev->vendor; | ||||
| 2025 | info->device_id = dev->pdev->device; | ||||
| 2026 | info->subvendor_id = dev->pdev->subsystem_vendor; | ||||
| 2027 | info->subdevice_id = dev->pdev->subsystem_device; | ||||
| 2028 | info->revision_id = 0; | ||||
| 2029 | |||||
| 2030 | return 0; | ||||
| 2031 | } |
| 1 | /* | |||
| 2 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | |||
| 3 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |||
| 4 | * Copyright (c) 2009-2010, Code Aurora Forum. | |||
| 5 | * Copyright 2016 Intel Corp. | |||
| 6 | * | |||
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 8 | * copy of this software and associated documentation files (the "Software"), | |||
| 9 | * to deal in the Software without restriction, including without limitation | |||
| 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 11 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 12 | * Software is furnished to do so, subject to the following conditions: | |||
| 13 | * | |||
| 14 | * The above copyright notice and this permission notice (including the next | |||
| 15 | * paragraph) shall be included in all copies or substantial portions of the | |||
| 16 | * Software. | |||
| 17 | * | |||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 21 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 24 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 25 | */ | |||
| 26 | ||||
| 27 | #ifndef _DRM_DRV_H_ | |||
| 28 | #define _DRM_DRV_H_ | |||
| 29 | ||||
| 30 | #include <linux/list.h> | |||
| 31 | #include <linux/irqreturn.h> | |||
| 32 | ||||
| 33 | #include <drm/drm_device.h> | |||
| 34 | ||||
| 35 | #include <uvm/uvm_extern.h> | |||
| 36 | ||||
| 37 | struct drm_file; | |||
| 38 | struct drm_gem_object; | |||
| 39 | struct drm_master; | |||
| 40 | struct drm_minor; | |||
| 41 | struct dma_buf_attachment; | |||
| 42 | struct drm_display_mode; | |||
| 43 | struct drm_mode_create_dumb; | |||
| 44 | struct drm_printer; | |||
| 45 | ||||
| 46 | /** | |||
| 47 | * enum drm_driver_feature - feature flags | |||
| 48 | * | |||
| 49 | * See &drm_driver.driver_features, drm_device.driver_features and | |||
| 50 | * drm_core_check_feature(). | |||
| 51 | */ | |||
| 52 | enum drm_driver_feature { | |||
| 53 | /** | |||
| 54 | * @DRIVER_GEM: | |||
| 55 | * | |||
| 56 | * Driver use the GEM memory manager. This should be set for all modern | |||
| 57 | * drivers. | |||
| 58 | */ | |||
| 59 | DRIVER_GEM = BIT(0)(1UL << (0)), | |||
| 60 | /** | |||
| 61 | * @DRIVER_MODESET: | |||
| 62 | * | |||
| 63 | * Driver supports mode setting interfaces (KMS). | |||
| 64 | */ | |||
| 65 | DRIVER_MODESET = BIT(1)(1UL << (1)), | |||
| 66 | /** | |||
| 67 | * @DRIVER_RENDER: | |||
| 68 | * | |||
| 69 | * Driver supports dedicated render nodes. See also the :ref:`section on | |||
| 70 | * render nodes <drm_render_node>` for details. | |||
| 71 | */ | |||
| 72 | DRIVER_RENDER = BIT(3)(1UL << (3)), | |||
| 73 | /** | |||
| 74 | * @DRIVER_ATOMIC: | |||
| 75 | * | |||
| 76 | * Driver supports the full atomic modesetting userspace API. Drivers | |||
| 77 | * which only use atomic internally, but do not the support the full | |||
| 78 | * userspace API (e.g. not all properties converted to atomic, or | |||
| 79 | * multi-plane updates are not guaranteed to be tear-free) should not | |||
| 80 | * set this flag. | |||
| 81 | */ | |||
| 82 | DRIVER_ATOMIC = BIT(4)(1UL << (4)), | |||
| 83 | /** | |||
| 84 | * @DRIVER_SYNCOBJ: | |||
| 85 | * | |||
| 86 | * Driver supports &drm_syncobj for explicit synchronization of command | |||
| 87 | * submission. | |||
| 88 | */ | |||
| 89 | DRIVER_SYNCOBJ = BIT(5)(1UL << (5)), | |||
| 90 | /** | |||
| 91 | * @DRIVER_SYNCOBJ_TIMELINE: | |||
| 92 | * | |||
| 93 | * Driver supports the timeline flavor of &drm_syncobj for explicit | |||
| 94 | * synchronization of command submission. | |||
| 95 | */ | |||
| 96 | DRIVER_SYNCOBJ_TIMELINE = BIT(6)(1UL << (6)), | |||
| 97 | ||||
| 98 | /* IMPORTANT: Below are all the legacy flags, add new ones above. */ | |||
| 99 | ||||
| 100 | /** | |||
| 101 | * @DRIVER_USE_AGP: | |||
| 102 | * | |||
| 103 | * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage | |||
| 104 | * AGP resources. New drivers don't need this. | |||
| 105 | */ | |||
| 106 | DRIVER_USE_AGP = BIT(25)(1UL << (25)), | |||
| 107 | /** | |||
| 108 | * @DRIVER_LEGACY: | |||
| 109 | * | |||
| 110 | * Denote a legacy driver using shadow attach. Do not use. | |||
| 111 | */ | |||
| 112 | DRIVER_LEGACY = BIT(26)(1UL << (26)), | |||
| 113 | /** | |||
| 114 | * @DRIVER_PCI_DMA: | |||
| 115 | * | |||
| 116 | * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace | |||
| 117 | * will be enabled. Only for legacy drivers. Do not use. | |||
| 118 | */ | |||
| 119 | DRIVER_PCI_DMA = BIT(27)(1UL << (27)), | |||
| 120 | /** | |||
| 121 | * @DRIVER_SG: | |||
| 122 | * | |||
| 123 | * Driver can perform scatter/gather DMA, allocation and mapping of | |||
| 124 | * scatter/gather buffers will be enabled. Only for legacy drivers. Do | |||
| 125 | * not use. | |||
| 126 | */ | |||
| 127 | DRIVER_SG = BIT(28)(1UL << (28)), | |||
| 128 | ||||
| 129 | /** | |||
| 130 | * @DRIVER_HAVE_DMA: | |||
| 131 | * | |||
| 132 | * Driver supports DMA, the userspace DMA API will be supported. Only | |||
| 133 | * for legacy drivers. Do not use. | |||
| 134 | */ | |||
| 135 | DRIVER_HAVE_DMA = BIT(29)(1UL << (29)), | |||
| 136 | /** | |||
| 137 | * @DRIVER_HAVE_IRQ: | |||
| 138 | * | |||
| 139 | * Legacy irq support. Only for legacy drivers. Do not use. | |||
| 140 | * | |||
| 141 | * New drivers can either use the drm_irq_install() and | |||
| 142 | * drm_irq_uninstall() helper functions, or roll their own irq support | |||
| 143 | * code by calling request_irq() directly. | |||
| 144 | */ | |||
| 145 | DRIVER_HAVE_IRQ = BIT(30)(1UL << (30)), | |||
| 146 | /** | |||
| 147 | * @DRIVER_KMS_LEGACY_CONTEXT: | |||
| 148 | * | |||
| 149 | * Used only by nouveau for backwards compatibility with existing | |||
| 150 | * userspace. Do not use. | |||
| 151 | */ | |||
| 152 | DRIVER_KMS_LEGACY_CONTEXT = BIT(31)(1UL << (31)), | |||
| 153 | }; | |||
| 154 | ||||
| 155 | /** | |||
| 156 | * struct drm_driver - DRM driver structure | |||
| 157 | * | |||
| 158 | * This structure represent the common code for a family of cards. There will be | |||
| 159 | * one &struct drm_device for each card present in this family. It contains lots | |||
| 160 | * of vfunc entries, and a pile of those probably should be moved to more | |||
| 161 | * appropriate places like &drm_mode_config_funcs or into a new operations | |||
| 162 | * structure for GEM drivers. | |||
| 163 | */ | |||
| 164 | struct drm_driver { | |||
| 165 | /** | |||
| 166 | * @load: | |||
| 167 | * | |||
| 168 | * Backward-compatible driver callback to complete initialization steps | |||
| 169 | * after the driver is registered. For this reason, may suffer from | |||
| 170 | * race conditions and its use is deprecated for new drivers. It is | |||
| 171 | * therefore only supported for existing drivers not yet converted to | |||
| 172 | * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for | |||
| 173 | * proper and race-free way to set up a &struct drm_device. | |||
| 174 | * | |||
| 175 | * This is deprecated, do not use! | |||
| 176 | * | |||
| 177 | * Returns: | |||
| 178 | * | |||
| 179 | * Zero on success, non-zero value on failure. | |||
| 180 | */ | |||
| 181 | int (*load) (struct drm_device *, unsigned long flags); | |||
| 182 | ||||
| 183 | /** | |||
| 184 | * @open: | |||
| 185 | * | |||
| 186 | * Driver callback when a new &struct drm_file is opened. Useful for | |||
| 187 | * setting up driver-private data structures like buffer allocators, | |||
| 188 | * execution contexts or similar things. Such driver-private resources | |||
| 189 | * must be released again in @postclose. | |||
| 190 | * | |||
| 191 | * Since the display/modeset side of DRM can only be owned by exactly | |||
| 192 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) | |||
| 193 | * there should never be a need to set up any modeset related resources | |||
| 194 | * in this callback. Doing so would be a driver design bug. | |||
| 195 | * | |||
| 196 | * Returns: | |||
| 197 | * | |||
| 198 | * 0 on success, a negative error code on failure, which will be | |||
| 199 | * promoted to userspace as the result of the open() system call. | |||
| 200 | */ | |||
| 201 | int (*open) (struct drm_device *, struct drm_file *); | |||
| 202 | ||||
| 203 | /** | |||
| 204 | * @postclose: | |||
| 205 | * | |||
| 206 | * One of the driver callbacks when a new &struct drm_file is closed. | |||
| 207 | * Useful for tearing down driver-private data structures allocated in | |||
| 208 | * @open like buffer allocators, execution contexts or similar things. | |||
| 209 | * | |||
| 210 | * Since the display/modeset side of DRM can only be owned by exactly | |||
| 211 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) | |||
| 212 | * there should never be a need to tear down any modeset related | |||
| 213 | * resources in this callback. Doing so would be a driver design bug. | |||
| 214 | */ | |||
| 215 | void (*postclose) (struct drm_device *, struct drm_file *); | |||
| 216 | ||||
| 217 | /** | |||
| 218 | * @lastclose: | |||
| 219 | * | |||
| 220 | * Called when the last &struct drm_file has been closed and there's | |||
| 221 | * currently no userspace client for the &struct drm_device. | |||
| 222 | * | |||
| 223 | * Modern drivers should only use this to force-restore the fbdev | |||
| 224 | * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). | |||
| 225 | * Anything else would indicate there's something seriously wrong. | |||
| 226 | * Modern drivers can also use this to execute delayed power switching | |||
| 227 | * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` | |||
| 228 | * infrastructure. | |||
| 229 | * | |||
| 230 | * This is called after @postclose hook has been called. | |||
| 231 | * | |||
| 232 | * NOTE: | |||
| 233 | * | |||
| 234 | * All legacy drivers use this callback to de-initialize the hardware. | |||
| 235 | * This is purely because of the shadow-attach model, where the DRM | |||
| 236 | * kernel driver does not really own the hardware. Instead ownershipe is | |||
| 237 | * handled with the help of userspace through an inheritedly racy dance | |||
| 238 | * to set/unset the VT into raw mode. | |||
| 239 | * | |||
| 240 | * Legacy drivers initialize the hardware in the @firstopen callback, | |||
| 241 | * which isn't even called for modern drivers. | |||
| 242 | */ | |||
| 243 | void (*lastclose) (struct drm_device *); | |||
| 244 | ||||
| 245 | /** | |||
| 246 | * @unload: | |||
| 247 | * | |||
| 248 | * Reverse the effects of the driver load callback. Ideally, | |||
| 249 | * the clean up performed by the driver should happen in the | |||
| 250 | * reverse order of the initialization. Similarly to the load | |||
| 251 | * hook, this handler is deprecated and its usage should be | |||
| 252 | * dropped in favor of an open-coded teardown function at the | |||
| 253 | * driver layer. See drm_dev_unregister() and drm_dev_put() | |||
| 254 | * for the proper way to remove a &struct drm_device. | |||
| 255 | * | |||
| 256 | * The unload() hook is called right after unregistering | |||
| 257 | * the device. | |||
| 258 | * | |||
| 259 | */ | |||
| 260 | void (*unload) (struct drm_device *); | |||
| 261 | ||||
| 262 | /** | |||
| 263 | * @release: | |||
| 264 | * | |||
| 265 | * Optional callback for destroying device data after the final | |||
| 266 | * reference is released, i.e. the device is being destroyed. | |||
| 267 | * | |||
| 268 | * This is deprecated, clean up all memory allocations associated with a | |||
| 269 | * &drm_device using drmm_add_action(), drmm_kmalloc() and related | |||
| 270 | * managed resources functions. | |||
| 271 | */ | |||
| 272 | void (*release) (struct drm_device *); | |||
| 273 | ||||
| 274 | /** | |||
| 275 | * @irq_handler: | |||
| 276 | * | |||
| 277 | * Interrupt handler called when using drm_irq_install(). Not used by | |||
| 278 | * drivers which implement their own interrupt handling. | |||
| 279 | */ | |||
| 280 | irqreturn_t(*irq_handler) (int irq, void *arg); | |||
| 281 | ||||
| 282 | /** | |||
| 283 | * @irq_preinstall: | |||
| 284 | * | |||
| 285 | * Optional callback used by drm_irq_install() which is called before | |||
| 286 | * the interrupt handler is registered. This should be used to clear out | |||
| 287 | * any pending interrupts (from e.g. firmware based drives) and reset | |||
| 288 | * the interrupt handling registers. | |||
| 289 | */ | |||
| 290 | void (*irq_preinstall) (struct drm_device *dev); | |||
| 291 | ||||
| 292 | /** | |||
| 293 | * @irq_postinstall: | |||
| 294 | * | |||
| 295 | * Optional callback used by drm_irq_install() which is called after | |||
| 296 | * the interrupt handler is registered. This should be used to enable | |||
| 297 | * interrupt generation in the hardware. | |||
| 298 | */ | |||
| 299 | int (*irq_postinstall) (struct drm_device *dev); | |||
| 300 | ||||
| 301 | /** | |||
| 302 | * @irq_uninstall: | |||
| 303 | * | |||
| 304 | * Optional callback used by drm_irq_uninstall() which is called before | |||
| 305 | * the interrupt handler is unregistered. This should be used to disable | |||
| 306 | * interrupt generation in the hardware. | |||
| 307 | */ | |||
| 308 | void (*irq_uninstall) (struct drm_device *dev); | |||
| 309 | ||||
| 310 | /** | |||
| 311 | * @master_set: | |||
| 312 | * | |||
| 313 | * Called whenever the minor master is set. Only used by vmwgfx. | |||
| 314 | */ | |||
| 315 | void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, | |||
| 316 | bool_Bool from_open); | |||
| 317 | /** | |||
| 318 | * @master_drop: | |||
| 319 | * | |||
| 320 | * Called whenever the minor master is dropped. Only used by vmwgfx. | |||
| 321 | */ | |||
| 322 | void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); | |||
| 323 | ||||
| 324 | /** | |||
| 325 | * @debugfs_init: | |||
| 326 | * | |||
| 327 | * Allows drivers to create driver-specific debugfs files. | |||
| 328 | */ | |||
| 329 | void (*debugfs_init)(struct drm_minor *minor); | |||
| 330 | ||||
| 331 | /** | |||
| 332 | * @gem_free_object_unlocked: deconstructor for drm_gem_objects | |||
| 333 | * | |||
| 334 | * This is deprecated and should not be used by new drivers. Use | |||
| 335 | * &drm_gem_object_funcs.free instead. | |||
| 336 | */ | |||
| 337 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); | |||
| 338 | ||||
| 339 | /** | |||
| 340 | * @gem_open_object: | |||
| 341 | * | |||
| 342 | * This callback is deprecated in favour of &drm_gem_object_funcs.open. | |||
| 343 | * | |||
| 344 | * Driver hook called upon gem handle creation | |||
| 345 | */ | |||
| 346 | int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); | |||
| 347 | ||||
| 348 | /** | |||
| 349 | * @gem_close_object: | |||
| 350 | * | |||
| 351 | * This callback is deprecated in favour of &drm_gem_object_funcs.close. | |||
| 352 | * | |||
| 353 | * Driver hook called upon gem handle release | |||
| 354 | */ | |||
| 355 | void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); | |||
| 356 | ||||
| 357 | /** | |||
| 358 | * @gem_create_object: constructor for gem objects | |||
| 359 | * | |||
| 360 | * Hook for allocating the GEM object struct, for use by the CMA and | |||
| 361 | * SHMEM GEM helpers. | |||
| 362 | */ | |||
| 363 | struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, | |||
| 364 | size_t size); | |||
| 365 | /** | |||
| 366 | * @prime_handle_to_fd: | |||
| 367 | * | |||
| 368 | * Main PRIME export function. Should be implemented with | |||
| 369 | * drm_gem_prime_handle_to_fd() for GEM based drivers. | |||
| 370 | * | |||
| 371 | * For an in-depth discussion see :ref:`PRIME buffer sharing | |||
| 372 | * documentation <prime_buffer_sharing>`. | |||
| 373 | */ | |||
| 374 | int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, | |||
| 375 | uint32_t handle, uint32_t flags, int *prime_fd); | |||
| 376 | /** | |||
| 377 | * @prime_fd_to_handle: | |||
| 378 | * | |||
| 379 | * Main PRIME import function. Should be implemented with | |||
| 380 | * drm_gem_prime_fd_to_handle() for GEM based drivers. | |||
| 381 | * | |||
| 382 | * For an in-depth discussion see :ref:`PRIME buffer sharing | |||
| 383 | * documentation <prime_buffer_sharing>`. | |||
| 384 | */ | |||
| 385 | int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, | |||
| 386 | int prime_fd, uint32_t *handle); | |||
| 387 | /** | |||
| 388 | * @gem_prime_export: | |||
| 389 | * | |||
| 390 | * Export hook for GEM drivers. Deprecated in favour of | |||
| 391 | * &drm_gem_object_funcs.export. | |||
| 392 | */ | |||
| 393 | struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, | |||
| 394 | int flags); | |||
| 395 | /** | |||
| 396 | * @gem_prime_import: | |||
| 397 | * | |||
| 398 | * Import hook for GEM drivers. | |||
| 399 | * | |||
| 400 | * This defaults to drm_gem_prime_import() if not set. | |||
| 401 | */ | |||
| 402 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, | |||
| 403 | struct dma_buf *dma_buf); | |||
| 404 | ||||
| 405 | /** | |||
| 406 | * @gem_prime_pin: | |||
| 407 | * | |||
| 408 | * Deprecated hook in favour of &drm_gem_object_funcs.pin. | |||
| 409 | */ | |||
| 410 | int (*gem_prime_pin)(struct drm_gem_object *obj); | |||
| 411 | ||||
| 412 | /** | |||
| 413 | * @gem_prime_unpin: | |||
| 414 | * | |||
| 415 | * Deprecated hook in favour of &drm_gem_object_funcs.unpin. | |||
| 416 | */ | |||
| 417 | void (*gem_prime_unpin)(struct drm_gem_object *obj); | |||
| 418 | ||||
| 419 | ||||
| 420 | /** | |||
| 421 | * @gem_prime_get_sg_table: | |||
| 422 | * | |||
| 423 | * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. | |||
| 424 | */ | |||
| 425 | struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); | |||
| 426 | ||||
| 427 | /** | |||
| 428 | * @gem_prime_import_sg_table: | |||
| 429 | * | |||
| 430 | * Optional hook used by the PRIME helper functions | |||
| 431 | * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). | |||
| 432 | */ | |||
| 433 | struct drm_gem_object *(*gem_prime_import_sg_table)( | |||
| 434 | struct drm_device *dev, | |||
| 435 | struct dma_buf_attachment *attach, | |||
| 436 | struct sg_table *sgt); | |||
| 437 | /** | |||
| 438 | * @gem_prime_vmap: | |||
| 439 | * | |||
| 440 | * Deprecated vmap hook for GEM drivers. Please use | |||
| 441 | * &drm_gem_object_funcs.vmap instead. | |||
| 442 | */ | |||
| 443 | void *(*gem_prime_vmap)(struct drm_gem_object *obj); | |||
| 444 | ||||
| 445 | /** | |||
| 446 | * @gem_prime_vunmap: | |||
| 447 | * | |||
| 448 | * Deprecated vunmap hook for GEM drivers. Please use | |||
| 449 | * &drm_gem_object_funcs.vunmap instead. | |||
| 450 | */ | |||
| 451 | void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); | |||
| 452 | ||||
| 453 | /** | |||
| 454 | * @gem_prime_mmap: | |||
| 455 | * | |||
| 456 | * mmap hook for GEM drivers, used to implement dma-buf mmap in the | |||
| 457 | * PRIME helpers. | |||
| 458 | * | |||
| 459 | * FIXME: There's way too much duplication going on here, and also moved | |||
| 460 | * to &drm_gem_object_funcs. | |||
| 461 | */ | |||
| 462 | #ifdef __linux__ | |||
| 463 | int (*gem_prime_mmap)(struct drm_gem_object *obj, | |||
| 464 | struct vm_area_struct *vma); | |||
| 465 | #endif | |||
| 466 | ||||
| 467 | #ifdef __OpenBSD__1 | |||
| 468 | struct uvm_object *(*mmap)(struct file *, vm_prot_t, voff_t, vsize_t); | |||
| 469 | size_t gem_size; | |||
| 470 | #endif | |||
| 471 | ||||
| 472 | /** | |||
| 473 | * @dumb_create: | |||
| 474 | * | |||
| 475 | * This creates a new dumb buffer in the driver's backing storage manager (GEM, | |||
| 476 | * TTM or something else entirely) and returns the resulting buffer handle. This | |||
| 477 | * handle can then be wrapped up into a framebuffer modeset object. | |||
| 478 | * | |||
| 479 | * Note that userspace is not allowed to use such objects for render | |||
| 480 | * acceleration - drivers must create their own private ioctls for such a use | |||
| 481 | * case. | |||
| 482 | * | |||
| 483 | * Width, height and depth are specified in the &drm_mode_create_dumb | |||
| 484 | * argument. The callback needs to fill the handle, pitch and size for | |||
| 485 | * the created buffer. | |||
| 486 | * | |||
| 487 | * Called by the user via ioctl. | |||
| 488 | * | |||
| 489 | * Returns: | |||
| 490 | * | |||
| 491 | * Zero on success, negative errno on failure. | |||
| 492 | */ | |||
| 493 | int (*dumb_create)(struct drm_file *file_priv, | |||
| 494 | struct drm_device *dev, | |||
| 495 | struct drm_mode_create_dumb *args); | |||
| 496 | /** | |||
| 497 | * @dumb_map_offset: | |||
| 498 | * | |||
| 499 | * Allocate an offset in the drm device node's address space to be able to | |||
| 500 | * memory map a dumb buffer. | |||
| 501 | * | |||
| 502 | * The default implementation is drm_gem_create_mmap_offset(). GEM based | |||
| 503 | * drivers must not overwrite this. | |||
| 504 | * | |||
| 505 | * Called by the user via ioctl. | |||
| 506 | * | |||
| 507 | * Returns: | |||
| 508 | * | |||
| 509 | * Zero on success, negative errno on failure. | |||
| 510 | */ | |||
| 511 | int (*dumb_map_offset)(struct drm_file *file_priv, | |||
| 512 | struct drm_device *dev, uint32_t handle, | |||
| 513 | uint64_t *offset); | |||
| 514 | /** | |||
| 515 | * @dumb_destroy: | |||
| 516 | * | |||
| 517 | * This destroys the userspace handle for the given dumb backing storage buffer. | |||
| 518 | * Since buffer objects must be reference counted in the kernel a buffer object | |||
| 519 | * won't be immediately freed if a framebuffer modeset object still uses it. | |||
| 520 | * | |||
| 521 | * Called by the user via ioctl. | |||
| 522 | * | |||
| 523 | * The default implementation is drm_gem_dumb_destroy(). GEM based drivers | |||
| 524 | * must not overwrite this. | |||
| 525 | * | |||
| 526 | * Returns: | |||
| 527 | * | |||
| 528 | * Zero on success, negative errno on failure. | |||
| 529 | */ | |||
| 530 | int (*dumb_destroy)(struct drm_file *file_priv, | |||
| 531 | struct drm_device *dev, | |||
| 532 | uint32_t handle); | |||
| 533 | ||||
| 534 | /** | |||
| 535 | * @gem_vm_ops: Driver private ops for this object | |||
| 536 | * | |||
| 537 | * For GEM drivers this is deprecated in favour of | |||
| 538 | * &drm_gem_object_funcs.vm_ops. | |||
| 539 | */ | |||
| 540 | const struct vm_operations_struct *gem_vm_ops; | |||
| 541 | ||||
| 542 | #ifdef __OpenBSD__1 | |||
| 543 | int (*gem_fault)(struct drm_gem_object *, | |||
| 544 | struct uvm_faultinfo *, off_t, vaddr_t, | |||
| 545 | vm_page_t *, int, int, vm_prot_t, int); | |||
| 546 | #endif | |||
| 547 | ||||
| 548 | /** @major: driver major number */ | |||
| 549 | int major; | |||
| 550 | /** @minor: driver minor number */ | |||
| 551 | int minor; | |||
| 552 | /** @patchlevel: driver patch level */ | |||
| 553 | int patchlevel; | |||
| 554 | /** @name: driver name */ | |||
| 555 | char *name; | |||
| 556 | /** @desc: driver description */ | |||
| 557 | char *desc; | |||
| 558 | /** @date: driver date */ | |||
| 559 | char *date; | |||
| 560 | ||||
| 561 | /** | |||
| 562 | * @driver_features: | |||
| 563 | * Driver features, see &enum drm_driver_feature. Drivers can disable | |||
| 564 | * some features on a per-instance basis using | |||
| 565 | * &drm_device.driver_features. | |||
| 566 | */ | |||
| 567 | u32 driver_features; | |||
| 568 | ||||
| 569 | /** | |||
| 570 | * @ioctls: | |||
| 571 | * | |||
| 572 | * Array of driver-private IOCTL description entries. See the chapter on | |||
| 573 | * :ref:`IOCTL support in the userland interfaces | |||
| 574 | * chapter<drm_driver_ioctl>` for the full details. | |||
| 575 | */ | |||
| 576 | ||||
| 577 | const struct drm_ioctl_desc *ioctls; | |||
| 578 | /** @num_ioctls: Number of entries in @ioctls. */ | |||
| 579 | int num_ioctls; | |||
| 580 | ||||
| 581 | /** | |||
| 582 | * @fops: | |||
| 583 | * | |||
| 584 | * File operations for the DRM device node. See the discussion in | |||
| 585 | * :ref:`file operations<drm_driver_fops>` for in-depth coverage and | |||
| 586 | * some examples. | |||
| 587 | */ | |||
| 588 | const struct file_operations *fops; | |||
| 589 | ||||
| 590 | /* Everything below here is for legacy driver, never use! */ | |||
| 591 | /* private: */ | |||
| 592 | ||||
| 593 | /* List of devices hanging off this driver with stealth attach. */ | |||
| 594 | struct list_head legacy_dev_list; | |||
| 595 | int (*firstopen) (struct drm_device *); | |||
| 596 | void (*preclose) (struct drm_device *, struct drm_file *file_priv); | |||
| 597 | int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); | |||
| 598 | int (*dma_quiescent) (struct drm_device *); | |||
| 599 | int (*context_dtor) (struct drm_device *dev, int context); | |||
| 600 | u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); | |||
| 601 | int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); | |||
| 602 | void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); | |||
| 603 | int dev_priv_size; | |||
| 604 | }; | |||
| 605 | ||||
| 606 | void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, | |||
| 607 | size_t size, size_t offset); | |||
| 608 | ||||
| 609 | /** | |||
| 610 | * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance | |||
| 611 | * @parent: Parent device object | |||
| 612 | * @driver: DRM driver | |||
| 613 | * @type: the type of the struct which contains struct &drm_device | |||
| 614 | * @member: the name of the &drm_device within @type. | |||
| 615 | * | |||
| 616 | * This allocates and initialize a new DRM device. No device registration is done. | |||
| 617 | * Call drm_dev_register() to advertice the device to user space and register it | |||
| 618 | * with other core subsystems. This should be done last in the device | |||
| 619 | * initialization sequence to make sure userspace can't access an inconsistent | |||
| 620 | * state. | |||
| 621 | * | |||
| 622 | * The initial ref-count of the object is 1. Use drm_dev_get() and | |||
| 623 | * drm_dev_put() to take and drop further ref-counts. | |||
| 624 | * | |||
| 625 | * It is recommended that drivers embed &struct drm_device into their own device | |||
| 626 | * structure. | |||
| 627 | * | |||
| 628 | * Note that this manages the lifetime of the resulting &drm_device | |||
| 629 | * automatically using devres. The DRM device initialized with this function is | |||
| 630 | * automatically put on driver detach using drm_dev_put(). | |||
| 631 | * | |||
| 632 | * RETURNS: | |||
| 633 | * Pointer to new DRM device, or ERR_PTR on failure. | |||
| 634 | */ | |||
| 635 | #define devm_drm_dev_alloc(parent, driver, type, member)((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), __builtin_offsetof (type, member))) \ | |||
| 636 | ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ | |||
| 637 | offsetof(type, member)__builtin_offsetof(type, member))) | |||
| 638 | ||||
| 639 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, | |||
| 640 | struct device *parent); | |||
| 641 | int drm_dev_register(struct drm_device *dev, unsigned long flags); | |||
| 642 | void drm_dev_unregister(struct drm_device *dev); | |||
| 643 | ||||
| 644 | void drm_dev_get(struct drm_device *dev); | |||
| 645 | void drm_dev_put(struct drm_device *dev); | |||
| 646 | void drm_put_dev(struct drm_device *dev); | |||
| 647 | bool_Bool drm_dev_enter(struct drm_device *dev, int *idx); | |||
| 648 | void drm_dev_exit(int idx); | |||
| 649 | void drm_dev_unplug(struct drm_device *dev); | |||
| 650 | ||||
| 651 | /** | |||
| 652 | * drm_dev_is_unplugged - is a DRM device unplugged | |||
| 653 | * @dev: DRM device | |||
| 654 | * | |||
| 655 | * This function can be called to check whether a hotpluggable is unplugged. | |||
| 656 | * Unplugging itself is singalled through drm_dev_unplug(). If a device is | |||
| 657 | * unplugged, these two functions guarantee that any store before calling | |||
| 658 | * drm_dev_unplug() is visible to callers of this function after it completes | |||
| 659 | * | |||
| 660 | * WARNING: This function fundamentally races against drm_dev_unplug(). It is | |||
| 661 | * recommended that drivers instead use the underlying drm_dev_enter() and | |||
| 662 | * drm_dev_exit() function pairs. | |||
| 663 | */ | |||
| 664 | static inline bool_Bool drm_dev_is_unplugged(struct drm_device *dev) | |||
| 665 | { | |||
| 666 | int idx; | |||
| 667 | ||||
| 668 | if (drm_dev_enter(dev, &idx)) { | |||
| 669 | drm_dev_exit(idx); | |||
| ||||
| 670 | return false0; | |||
| 671 | } | |||
| 672 | ||||
| 673 | return true1; | |||
| 674 | } | |||
| 675 | ||||
| 676 | /** | |||
| 677 | * drm_core_check_all_features - check driver feature flags mask | |||
| 678 | * @dev: DRM device to check | |||
| 679 | * @features: feature flag(s) mask | |||
| 680 | * | |||
| 681 | * This checks @dev for driver features, see &drm_driver.driver_features, | |||
| 682 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. | |||
| 683 | * | |||
| 684 | * Returns true if all features in the @features mask are supported, false | |||
| 685 | * otherwise. | |||
| 686 | */ | |||
| 687 | static inline bool_Bool drm_core_check_all_features(const struct drm_device *dev, | |||
| 688 | u32 features) | |||
| 689 | { | |||
| 690 | u32 supported = dev->driver->driver_features & dev->driver_features; | |||
| 691 | ||||
| 692 | return features && (supported & features) == features; | |||
| 693 | } | |||
| 694 | ||||
| 695 | /** | |||
| 696 | * drm_core_check_feature - check driver feature flags | |||
| 697 | * @dev: DRM device to check | |||
| 698 | * @feature: feature flag | |||
| 699 | * | |||
| 700 | * This checks @dev for driver features, see &drm_driver.driver_features, | |||
| 701 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. | |||
| 702 | * | |||
| 703 | * Returns true if the @feature is supported, false otherwise. | |||
| 704 | */ | |||
| 705 | static inline bool_Bool drm_core_check_feature(const struct drm_device *dev, | |||
| 706 | enum drm_driver_feature feature) | |||
| 707 | { | |||
| 708 | return drm_core_check_all_features(dev, feature); | |||
| 709 | } | |||
| 710 | ||||
| 711 | /** | |||
| 712 | * drm_drv_uses_atomic_modeset - check if the driver implements | |||
| 713 | * atomic_commit() | |||
| 714 | * @dev: DRM device | |||
| 715 | * | |||
| 716 | * This check is useful if drivers do not have DRIVER_ATOMIC set but | |||
| 717 | * have atomic modesetting internally implemented. | |||
| 718 | */ | |||
| 719 | static inline bool_Bool drm_drv_uses_atomic_modeset(struct drm_device *dev) | |||
| 720 | { | |||
| 721 | return drm_core_check_feature(dev, DRIVER_ATOMIC) || | |||
| 722 | (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL((void *)0)); | |||
| 723 | } | |||
| 724 | ||||
| 725 | ||||
| 726 | int drm_dev_set_unique(struct drm_device *dev, const char *name); | |||
| 727 | ||||
| 728 | struct drm_file *drm_find_file_by_minor(struct drm_device *, int); | |||
| 729 | struct drm_device *drm_get_device_from_kdev(dev_t); | |||
| 730 | ||||
| 731 | #ifdef __OpenBSD__1 | |||
| 732 | ||||
| 733 | void drm_attach_platform(struct drm_driver *, bus_space_tag_t, bus_dma_tag_t, | |||
| 734 | struct device *, struct drm_device *); | |||
| 735 | struct drm_device *drm_attach_pci(struct drm_driver *, | |||
| 736 | struct pci_attach_args *, int, int, struct device *, struct drm_device *); | |||
| 737 | ||||
| 738 | int drm_pciprobe(struct pci_attach_args *, const struct pci_device_id * ); | |||
| 739 | const struct pci_device_id *drm_find_description(int, int, | |||
| 740 | const struct pci_device_id *); | |||
| 741 | ||||
| 742 | int drm_getpciinfo(struct drm_device *, void *, struct drm_file *); | |||
| 743 | ||||
| 744 | #endif | |||
| 745 | ||||
| 746 | #endif |