File: | dev/pci/drm/drm_drv.c |
Warning: | line 794, column 2 Value stored to 'drm' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org |
3 | * |
4 | * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. |
5 | * All Rights Reserved. |
6 | * |
7 | * Author Rickard E. (Rik) Faith <faith@valinux.com> |
8 | * |
9 | * Permission is hereby granted, free of charge, to any person obtaining a |
10 | * copy of this software and associated documentation files (the "Software"), |
11 | * to deal in the Software without restriction, including without limitation |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
13 | * and/or sell copies of the Software, and to permit persons to whom the |
14 | * Software is furnished to do so, subject to the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice (including the next |
17 | * paragraph) shall be included in all copies or substantial portions of the |
18 | * Software. |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
23 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
26 | * DEALINGS IN THE SOFTWARE. |
27 | */ |
28 | |
29 | #include <sys/param.h> |
30 | #include <sys/fcntl.h> |
31 | #include <sys/specdev.h> |
32 | #include <sys/vnode.h> |
33 | |
34 | #include <machine/bus.h> |
35 | |
36 | #ifdef __HAVE_ACPI |
37 | #include <dev/acpi/acpidev.h> |
38 | #include <dev/acpi/acpivar.h> |
39 | #include <dev/acpi/dsdt.h> |
40 | #endif |
41 | |
42 | #include <linux/debugfs.h> |
43 | #include <linux/fs.h> |
44 | #include <linux/module.h> |
45 | #include <linux/moduleparam.h> |
46 | #include <linux/mount.h> |
47 | #include <linux/pseudo_fs.h> |
48 | #include <linux/slab.h> |
49 | #include <linux/srcu.h> |
50 | |
51 | #include <drm/drm_cache.h> |
52 | #include <drm/drm_client.h> |
53 | #include <drm/drm_color_mgmt.h> |
54 | #include <drm/drm_drv.h> |
55 | #include <drm/drm_file.h> |
56 | #include <drm/drm_managed.h> |
57 | #include <drm/drm_mode_object.h> |
58 | #include <drm/drm_print.h> |
59 | #include <drm/drm_privacy_screen_machine.h> |
60 | |
61 | #include <drm/drm_gem.h> |
62 | |
63 | #include "drm_crtc_internal.h" |
64 | #include "drm_internal.h" |
65 | #include "drm_legacy.h" |
66 | |
67 | MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); |
68 | MODULE_DESCRIPTION("DRM shared core routines"); |
69 | MODULE_LICENSE("GPL and additional rights"); |
70 | |
71 | static DEFINE_SPINLOCK(drm_minor_lock)struct mutex drm_minor_lock = { ((void *)0), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9))), 0x0 }; |
72 | static struct idr drm_minors_idr; |
73 | |
74 | /* |
75 | * If the drm core fails to init for whatever reason, |
76 | * we should prevent any drivers from registering with it. |
77 | * It's best to check this at drm_dev_init(), as some drivers |
78 | * prefer to embed struct drm_device into their own device |
79 | * structure and call drm_dev_init() themselves. |
80 | */ |
81 | static bool_Bool drm_core_init_complete; |
82 | |
83 | static struct dentry *drm_debugfs_root; |
84 | |
85 | #ifdef notyet |
86 | DEFINE_STATIC_SRCU(drm_unplug_srcu); |
87 | #endif |
88 | |
89 | /* |
90 | * Some functions are only called once on init regardless of how many times |
91 | * drm attaches. In linux this is handled via module_init()/module_exit() |
92 | */ |
93 | int drm_refcnt; |
94 | |
95 | struct drm_softc { |
96 | struct device sc_dev; |
97 | struct drm_device *sc_drm; |
98 | int sc_allocated; |
99 | }; |
100 | |
101 | struct drm_attach_args { |
102 | struct drm_device *drm; |
103 | const struct drm_driver *driver; |
104 | char *busid; |
105 | bus_dma_tag_t dmat; |
106 | bus_space_tag_t bst; |
107 | size_t busid_len; |
108 | int is_agp; |
109 | struct pci_attach_args *pa; |
110 | int primary; |
111 | }; |
112 | |
113 | void drm_linux_init(void); |
114 | void drm_linux_exit(void); |
115 | int drm_linux_acpi_notify(struct aml_node *, int, void *); |
116 | |
117 | int drm_dequeue_event(struct drm_device *, struct drm_file *, size_t, |
118 | struct drm_pending_event **); |
119 | |
120 | int drmprint(void *, const char *); |
121 | int drmsubmatch(struct device *, void *, void *); |
122 | const struct pci_device_id * |
123 | drm_find_description(int, int, const struct pci_device_id *); |
124 | |
125 | int drm_file_cmp(struct drm_file *, struct drm_file *); |
126 | SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp)void drm_file_tree_SPLAY(struct drm_file_tree *, struct drm_file *); void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *, int ); struct drm_file *drm_file_tree_SPLAY_INSERT(struct drm_file_tree *, struct drm_file *); struct drm_file *drm_file_tree_SPLAY_REMOVE (struct drm_file_tree *, struct drm_file *); static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_FIND (struct drm_file_tree *head, struct drm_file *elm) { if (((head )->sph_root == ((void *)0))) return(((void *)0)); drm_file_tree_SPLAY (head, elm); if ((drm_file_cmp)(elm, (head)->sph_root) == 0 ) return (head->sph_root); return (((void *)0)); } static __attribute__ ((__unused__)) __inline struct drm_file * drm_file_tree_SPLAY_NEXT (struct drm_file_tree *head, struct drm_file *elm) { drm_file_tree_SPLAY (head, elm); if ((elm)->link.spe_right != ((void *)0)) { elm = (elm)->link.spe_right; while ((elm)->link.spe_left != ((void *)0)) { elm = (elm)->link.spe_left; } } else elm = ((void *)0); return (elm); } static __attribute__((__unused__ )) __inline struct drm_file * drm_file_tree_SPLAY_MIN_MAX(struct drm_file_tree *head, int val) { drm_file_tree_SPLAY_MINMAX(head , val); return ((head)->sph_root); }; |
127 | |
128 | #define DRMDEVCF_PRIMARY0 0 |
129 | #define drmdevcf_primarycf_loc[0] cf_loc[DRMDEVCF_PRIMARY0] /* spec'd as primary? */ |
130 | #define DRMDEVCF_PRIMARY_UNK-1 -1 |
131 | |
132 | /* |
133 | * DRM Minors |
134 | * A DRM device can provide several char-dev interfaces on the DRM-Major. Each |
135 | * of them is represented by a drm_minor object. Depending on the capabilities |
136 | * of the device-driver, different interfaces are registered. |
137 | * |
138 | * Minors can be accessed via dev->$minor_name. This pointer is either |
139 | * NULL or a valid drm_minor pointer and stays valid as long as the device is |
140 | * valid. This means, DRM minors have the same life-time as the underlying |
141 | * device. However, this doesn't mean that the minor is active. Minors are |
142 | * registered and unregistered dynamically according to device-state. |
143 | */ |
144 | |
145 | static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, |
146 | unsigned int type) |
147 | { |
148 | switch (type) { |
149 | case DRM_MINOR_PRIMARY: |
150 | return &dev->primary; |
151 | case DRM_MINOR_RENDER: |
152 | return &dev->render; |
153 | default: |
154 | BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 154); } while (0); |
155 | } |
156 | } |
157 | |
158 | static void drm_minor_alloc_release(struct drm_device *dev, void *data) |
159 | { |
160 | struct drm_minor *minor = data; |
161 | unsigned long flags; |
162 | |
163 | WARN_ON(dev != minor->dev)({ int __ret = !!(dev != minor->dev); if (__ret) printf("WARNING %s failed at %s:%d\n" , "dev != minor->dev", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 163); __builtin_expect(!!(__ret), 0); }); |
164 | |
165 | #ifdef __linux__ |
166 | put_device(minor->kdev); |
167 | #endif |
168 | |
169 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); |
170 | idr_remove(&drm_minors_idr, minor->index); |
171 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); |
172 | } |
173 | |
174 | static int drm_minor_alloc(struct drm_device *dev, unsigned int type) |
175 | { |
176 | struct drm_minor *minor; |
177 | unsigned long flags; |
178 | int r; |
179 | |
180 | minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL(0x0001 | 0x0004)); |
181 | if (!minor) |
182 | return -ENOMEM12; |
183 | |
184 | minor->type = type; |
185 | minor->dev = dev; |
186 | |
187 | idr_preload(GFP_KERNEL(0x0001 | 0x0004)); |
188 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); |
189 | r = idr_alloc(&drm_minors_idr, |
190 | NULL((void *)0), |
191 | 64 * type, |
192 | 64 * (type + 1), |
193 | GFP_NOWAIT0x0002); |
194 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); |
195 | idr_preload_end(); |
196 | |
197 | if (r < 0) |
198 | return r; |
199 | |
200 | minor->index = r; |
201 | |
202 | r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); |
203 | if (r) |
204 | return r; |
205 | |
206 | #ifdef __linux__ |
207 | minor->kdev = drm_sysfs_minor_alloc(minor); |
208 | if (IS_ERR(minor->kdev)) |
209 | return PTR_ERR(minor->kdev); |
210 | #endif |
211 | |
212 | *drm_minor_get_slot(dev, type) = minor; |
213 | return 0; |
214 | } |
215 | |
216 | static int drm_minor_register(struct drm_device *dev, unsigned int type) |
217 | { |
218 | struct drm_minor *minor; |
219 | unsigned long flags; |
220 | #ifdef __linux__ |
221 | int ret; |
222 | #endif |
223 | |
224 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); |
225 | |
226 | minor = *drm_minor_get_slot(dev, type); |
227 | if (!minor) |
228 | return 0; |
229 | |
230 | #ifdef __linux__ |
231 | ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); |
232 | if (ret) { |
233 | DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n")__drm_err("DRM: Failed to initialize /sys/kernel/debug/dri.\n" ); |
234 | goto err_debugfs; |
235 | } |
236 | |
237 | ret = device_add(minor->kdev); |
238 | if (ret) |
239 | goto err_debugfs; |
240 | #else |
241 | drm_debugfs_root = NULL((void *)0); |
242 | #endif |
243 | |
244 | /* replace NULL with @minor so lookups will succeed from now on */ |
245 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); |
246 | idr_replace(&drm_minors_idr, minor, minor->index); |
247 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); |
248 | |
249 | DRM_DEBUG("new minor registered %d\n", minor->index)___drm_dbg(((void *)0), DRM_UT_CORE, "new minor registered %d\n" , minor->index); |
250 | return 0; |
251 | |
252 | #ifdef __linux__ |
253 | err_debugfs: |
254 | drm_debugfs_cleanup(minor); |
255 | return ret; |
256 | #endif |
257 | } |
258 | |
259 | static void drm_minor_unregister(struct drm_device *dev, unsigned int type) |
260 | { |
261 | struct drm_minor *minor; |
262 | unsigned long flags; |
263 | |
264 | minor = *drm_minor_get_slot(dev, type); |
265 | #ifdef __linux__ |
266 | if (!minor || !device_is_registered(minor->kdev)) |
267 | #else |
268 | if (!minor) |
269 | #endif |
270 | return; |
271 | |
272 | /* replace @minor with NULL so lookups will fail from now on */ |
273 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); |
274 | idr_replace(&drm_minors_idr, NULL((void *)0), minor->index); |
275 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); |
276 | |
277 | #ifdef __linux__ |
278 | device_del(minor->kdev); |
279 | #endif |
280 | dev_set_drvdata(minor->kdev, NULL((void *)0)); /* safety belt */ |
281 | drm_debugfs_cleanup(minor); |
282 | } |
283 | |
284 | /* |
285 | * Looks up the given minor-ID and returns the respective DRM-minor object. The |
286 | * refence-count of the underlying device is increased so you must release this |
287 | * object with drm_minor_release(). |
288 | * |
289 | * As long as you hold this minor, it is guaranteed that the object and the |
290 | * minor->dev pointer will stay valid! However, the device may get unplugged and |
291 | * unregistered while you hold the minor. |
292 | */ |
293 | struct drm_minor *drm_minor_acquire(unsigned int minor_id) |
294 | { |
295 | struct drm_minor *minor; |
296 | unsigned long flags; |
297 | |
298 | spin_lock_irqsave(&drm_minor_lock, flags)do { flags = 0; mtx_enter(&drm_minor_lock); } while (0); |
299 | minor = idr_find(&drm_minors_idr, minor_id); |
300 | if (minor) |
301 | drm_dev_get(minor->dev); |
302 | spin_unlock_irqrestore(&drm_minor_lock, flags)do { (void)(flags); mtx_leave(&drm_minor_lock); } while ( 0); |
303 | |
304 | if (!minor) { |
305 | return ERR_PTR(-ENODEV19); |
306 | } else if (drm_dev_is_unplugged(minor->dev)) { |
307 | drm_dev_put(minor->dev); |
308 | return ERR_PTR(-ENODEV19); |
309 | } |
310 | |
311 | return minor; |
312 | } |
313 | |
314 | void drm_minor_release(struct drm_minor *minor) |
315 | { |
316 | drm_dev_put(minor->dev); |
317 | } |
318 | |
319 | /** |
320 | * DOC: driver instance overview |
321 | * |
322 | * A device instance for a drm driver is represented by &struct drm_device. This |
323 | * is allocated and initialized with devm_drm_dev_alloc(), usually from |
324 | * bus-specific ->probe() callbacks implemented by the driver. The driver then |
325 | * needs to initialize all the various subsystems for the drm device like memory |
326 | * management, vblank handling, modesetting support and initial output |
327 | * configuration plus obviously initialize all the corresponding hardware bits. |
328 | * Finally when everything is up and running and ready for userspace the device |
329 | * instance can be published using drm_dev_register(). |
330 | * |
331 | * There is also deprecated support for initializing device instances using |
332 | * bus-specific helpers and the &drm_driver.load callback. But due to |
333 | * backwards-compatibility needs the device instance have to be published too |
334 | * early, which requires unpretty global locking to make safe and is therefore |
335 | * only support for existing drivers not yet converted to the new scheme. |
336 | * |
337 | * When cleaning up a device instance everything needs to be done in reverse: |
338 | * First unpublish the device instance with drm_dev_unregister(). Then clean up |
339 | * any other resources allocated at device initialization and drop the driver's |
340 | * reference to &drm_device using drm_dev_put(). |
341 | * |
342 | * Note that any allocation or resource which is visible to userspace must be |
343 | * released only when the final drm_dev_put() is called, and not when the |
344 | * driver is unbound from the underlying physical struct &device. Best to use |
345 | * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and |
346 | * related functions. |
347 | * |
348 | * devres managed resources like devm_kmalloc() can only be used for resources |
349 | * directly related to the underlying hardware device, and only used in code |
350 | * paths fully protected by drm_dev_enter() and drm_dev_exit(). |
351 | * |
352 | * Display driver example |
353 | * ~~~~~~~~~~~~~~~~~~~~~~ |
354 | * |
355 | * The following example shows a typical structure of a DRM display driver. |
356 | * The example focus on the probe() function and the other functions that is |
357 | * almost always present and serves as a demonstration of devm_drm_dev_alloc(). |
358 | * |
359 | * .. code-block:: c |
360 | * |
361 | * struct driver_device { |
362 | * struct drm_device drm; |
363 | * void *userspace_facing; |
364 | * struct clk *pclk; |
365 | * }; |
366 | * |
367 | * static const struct drm_driver driver_drm_driver = { |
368 | * [...] |
369 | * }; |
370 | * |
371 | * static int driver_probe(struct platform_device *pdev) |
372 | * { |
373 | * struct driver_device *priv; |
374 | * struct drm_device *drm; |
375 | * int ret; |
376 | * |
377 | * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, |
378 | * struct driver_device, drm); |
379 | * if (IS_ERR(priv)) |
380 | * return PTR_ERR(priv); |
381 | * drm = &priv->drm; |
382 | * |
383 | * ret = drmm_mode_config_init(drm); |
384 | * if (ret) |
385 | * return ret; |
386 | * |
387 | * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); |
388 | * if (!priv->userspace_facing) |
389 | * return -ENOMEM; |
390 | * |
391 | * priv->pclk = devm_clk_get(dev, "PCLK"); |
392 | * if (IS_ERR(priv->pclk)) |
393 | * return PTR_ERR(priv->pclk); |
394 | * |
395 | * // Further setup, display pipeline etc |
396 | * |
397 | * platform_set_drvdata(pdev, drm); |
398 | * |
399 | * drm_mode_config_reset(drm); |
400 | * |
401 | * ret = drm_dev_register(drm); |
402 | * if (ret) |
403 | * return ret; |
404 | * |
405 | * drm_fbdev_generic_setup(drm, 32); |
406 | * |
407 | * return 0; |
408 | * } |
409 | * |
410 | * // This function is called before the devm_ resources are released |
411 | * static int driver_remove(struct platform_device *pdev) |
412 | * { |
413 | * struct drm_device *drm = platform_get_drvdata(pdev); |
414 | * |
415 | * drm_dev_unregister(drm); |
416 | * drm_atomic_helper_shutdown(drm) |
417 | * |
418 | * return 0; |
419 | * } |
420 | * |
421 | * // This function is called on kernel restart and shutdown |
422 | * static void driver_shutdown(struct platform_device *pdev) |
423 | * { |
424 | * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); |
425 | * } |
426 | * |
427 | * static int __maybe_unused driver_pm_suspend(struct device *dev) |
428 | * { |
429 | * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); |
430 | * } |
431 | * |
432 | * static int __maybe_unused driver_pm_resume(struct device *dev) |
433 | * { |
434 | * drm_mode_config_helper_resume(dev_get_drvdata(dev)); |
435 | * |
436 | * return 0; |
437 | * } |
438 | * |
439 | * static const struct dev_pm_ops driver_pm_ops = { |
440 | * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) |
441 | * }; |
442 | * |
443 | * static struct platform_driver driver_driver = { |
444 | * .driver = { |
445 | * [...] |
446 | * .pm = &driver_pm_ops, |
447 | * }, |
448 | * .probe = driver_probe, |
449 | * .remove = driver_remove, |
450 | * .shutdown = driver_shutdown, |
451 | * }; |
452 | * module_platform_driver(driver_driver); |
453 | * |
454 | * Drivers that want to support device unplugging (USB, DT overlay unload) should |
455 | * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect |
456 | * regions that is accessing device resources to prevent use after they're |
457 | * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one |
458 | * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before |
459 | * drm_atomic_helper_shutdown() is called. This means that if the disable code |
460 | * paths are protected, they will not run on regular driver module unload, |
461 | * possibly leaving the hardware enabled. |
462 | */ |
463 | |
464 | /** |
465 | * drm_put_dev - Unregister and release a DRM device |
466 | * @dev: DRM device |
467 | * |
468 | * Called at module unload time or when a PCI device is unplugged. |
469 | * |
470 | * Cleans up all DRM device, calling drm_lastclose(). |
471 | * |
472 | * Note: Use of this function is deprecated. It will eventually go away |
473 | * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly |
474 | * instead to make sure that the device isn't userspace accessible any more |
475 | * while teardown is in progress, ensuring that userspace can't access an |
476 | * inconsistent state. |
477 | */ |
478 | void drm_put_dev(struct drm_device *dev) |
479 | { |
480 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); |
481 | |
482 | if (!dev) { |
483 | DRM_ERROR("cleanup called no dev\n")__drm_err("cleanup called no dev\n"); |
484 | return; |
485 | } |
486 | |
487 | drm_dev_unregister(dev); |
488 | drm_dev_put(dev); |
489 | } |
490 | EXPORT_SYMBOL(drm_put_dev); |
491 | |
492 | /** |
493 | * drm_dev_enter - Enter device critical section |
494 | * @dev: DRM device |
495 | * @idx: Pointer to index that will be passed to the matching drm_dev_exit() |
496 | * |
497 | * This function marks and protects the beginning of a section that should not |
498 | * be entered after the device has been unplugged. The section end is marked |
499 | * with drm_dev_exit(). Calls to this function can be nested. |
500 | * |
501 | * Returns: |
502 | * True if it is OK to enter the section, false otherwise. |
503 | */ |
504 | bool_Bool drm_dev_enter(struct drm_device *dev, int *idx) |
505 | { |
506 | #ifdef notyet |
507 | *idx = srcu_read_lock(&drm_unplug_srcu)0; |
508 | |
509 | if (dev->unplugged) { |
510 | srcu_read_unlock(&drm_unplug_srcu, *idx); |
511 | return false0; |
512 | } |
513 | #endif |
514 | |
515 | return true1; |
516 | } |
517 | EXPORT_SYMBOL(drm_dev_enter); |
518 | |
519 | /** |
520 | * drm_dev_exit - Exit device critical section |
521 | * @idx: index returned from drm_dev_enter() |
522 | * |
523 | * This function marks the end of a section that should not be entered after |
524 | * the device has been unplugged. |
525 | */ |
526 | void drm_dev_exit(int idx) |
527 | { |
528 | #ifdef notyet |
529 | srcu_read_unlock(&drm_unplug_srcu, idx); |
530 | #endif |
531 | } |
532 | EXPORT_SYMBOL(drm_dev_exit); |
533 | |
534 | /** |
535 | * drm_dev_unplug - unplug a DRM device |
536 | * @dev: DRM device |
537 | * |
538 | * This unplugs a hotpluggable DRM device, which makes it inaccessible to |
539 | * userspace operations. Entry-points can use drm_dev_enter() and |
540 | * drm_dev_exit() to protect device resources in a race free manner. This |
541 | * essentially unregisters the device like drm_dev_unregister(), but can be |
542 | * called while there are still open users of @dev. |
543 | */ |
544 | void drm_dev_unplug(struct drm_device *dev) |
545 | { |
546 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
547 | #ifdef notyet |
548 | /* |
549 | * After synchronizing any critical read section is guaranteed to see |
550 | * the new value of ->unplugged, and any critical section which might |
551 | * still have seen the old value of ->unplugged is guaranteed to have |
552 | * finished. |
553 | */ |
554 | dev->unplugged = true1; |
555 | synchronize_srcu(&drm_unplug_srcu); |
556 | |
557 | drm_dev_unregister(dev); |
558 | |
559 | /* Clear all CPU mappings pointing to this device */ |
560 | unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1); |
561 | #endif |
562 | } |
563 | EXPORT_SYMBOL(drm_dev_unplug); |
564 | |
565 | #ifdef __linux__ |
566 | /* |
567 | * DRM internal mount |
568 | * We want to be able to allocate our own "struct address_space" to control |
569 | * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow |
570 | * stand-alone address_space objects, so we need an underlying inode. As there |
571 | * is no way to allocate an independent inode easily, we need a fake internal |
572 | * VFS mount-point. |
573 | * |
574 | * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() |
575 | * frees it again. You are allowed to use iget() and iput() to get references to |
576 | * the inode. But each drm_fs_inode_new() call must be paired with exactly one |
577 | * drm_fs_inode_free() call (which does not have to be the last iput()). |
578 | * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it |
579 | * between multiple inode-users. You could, technically, call |
580 | * iget() + drm_fs_inode_free() directly after alloc and sometime later do an |
581 | * iput(), but this way you'd end up with a new vfsmount for each inode. |
582 | */ |
583 | |
584 | static int drm_fs_cnt; |
585 | static struct vfsmount *drm_fs_mnt; |
586 | |
587 | static int drm_fs_init_fs_context(struct fs_context *fc) |
588 | { |
589 | return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM12; |
590 | } |
591 | |
592 | static struct file_system_type drm_fs_type = { |
593 | .name = "drm", |
594 | .owner = THIS_MODULE((void *)0), |
595 | .init_fs_context = drm_fs_init_fs_context, |
596 | .kill_sb = kill_anon_super, |
597 | }; |
598 | |
599 | static struct inode *drm_fs_inode_new(void) |
600 | { |
601 | struct inode *inode; |
602 | int r; |
603 | |
604 | r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); |
605 | if (r < 0) { |
606 | DRM_ERROR("Cannot mount pseudo fs: %d\n", r)__drm_err("Cannot mount pseudo fs: %d\n", r); |
607 | return ERR_PTR(r); |
608 | } |
609 | |
610 | inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); |
611 | if (IS_ERR(inode)) |
612 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); |
613 | |
614 | return inode; |
615 | } |
616 | |
617 | static void drm_fs_inode_free(struct inode *inode) |
618 | { |
619 | if (inode) { |
620 | iput(inode); |
621 | simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); |
622 | } |
623 | } |
624 | |
625 | #endif /* __linux__ */ |
626 | |
627 | /** |
628 | * DOC: component helper usage recommendations |
629 | * |
630 | * DRM drivers that drive hardware where a logical device consists of a pile of |
631 | * independent hardware blocks are recommended to use the :ref:`component helper |
632 | * library<component>`. For consistency and better options for code reuse the |
633 | * following guidelines apply: |
634 | * |
635 | * - The entire device initialization procedure should be run from the |
636 | * &component_master_ops.master_bind callback, starting with |
637 | * devm_drm_dev_alloc(), then binding all components with |
638 | * component_bind_all() and finishing with drm_dev_register(). |
639 | * |
640 | * - The opaque pointer passed to all components through component_bind_all() |
641 | * should point at &struct drm_device of the device instance, not some driver |
642 | * specific private structure. |
643 | * |
644 | * - The component helper fills the niche where further standardization of |
645 | * interfaces is not practical. When there already is, or will be, a |
646 | * standardized interface like &drm_bridge or &drm_panel, providing its own |
647 | * functions to find such components at driver load time, like |
648 | * drm_of_find_panel_or_bridge(), then the component helper should not be |
649 | * used. |
650 | */ |
651 | |
652 | static void drm_dev_init_release(struct drm_device *dev, void *res) |
653 | { |
654 | drm_legacy_ctxbitmap_cleanup(dev); |
655 | drm_legacy_remove_map_hash(dev); |
656 | #ifdef __linux__ |
657 | drm_fs_inode_free(dev->anon_inode); |
658 | |
659 | put_device(dev->dev); |
660 | #endif |
661 | /* Prevent use-after-free in drm_managed_release when debugging is |
662 | * enabled. Slightly awkward, but can't really be helped. */ |
663 | dev->dev = NULL((void *)0); |
664 | mutex_destroy(&dev->master_mutex); |
665 | mutex_destroy(&dev->clientlist_mutex); |
666 | mutex_destroy(&dev->filelist_mutex); |
667 | mutex_destroy(&dev->struct_mutex); |
668 | drm_legacy_destroy_members(dev); |
669 | } |
670 | |
671 | #ifdef notyet |
672 | |
673 | static int drm_dev_init(struct drm_device *dev, |
674 | const struct drm_driver *driver, |
675 | struct device *parent) |
676 | { |
677 | struct inode *inode; |
678 | int ret; |
679 | |
680 | if (!drm_core_init_complete) { |
681 | DRM_ERROR("DRM core is not initialized\n")__drm_err("DRM core is not initialized\n"); |
682 | return -ENODEV19; |
683 | } |
684 | |
685 | if (WARN_ON(!parent)({ int __ret = !!(!parent); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!parent", "/usr/src/sys/dev/pci/drm/drm_drv.c", 685); __builtin_expect (!!(__ret), 0); })) |
686 | return -EINVAL22; |
687 | |
688 | kref_init(&dev->ref); |
689 | dev->dev = get_device(parent); |
690 | dev->driver = driver; |
691 | |
692 | INIT_LIST_HEAD(&dev->managed.resources); |
693 | spin_lock_init(&dev->managed.lock); |
694 | |
695 | /* no per-device feature limits by default */ |
696 | dev->driver_features = ~0u; |
697 | |
698 | drm_legacy_init_members(dev); |
699 | INIT_LIST_HEAD(&dev->filelist); |
700 | INIT_LIST_HEAD(&dev->filelist_internal); |
701 | INIT_LIST_HEAD(&dev->clientlist); |
702 | INIT_LIST_HEAD(&dev->vblank_event_list); |
703 | |
704 | spin_lock_init(&dev->event_lock); |
705 | mutex_init(&dev->struct_mutex); |
706 | mutex_init(&dev->filelist_mutex); |
707 | mutex_init(&dev->clientlist_mutex); |
708 | mutex_init(&dev->master_mutex); |
709 | |
710 | ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL((void *)0)); |
711 | if (ret) |
712 | return ret; |
713 | |
714 | inode = drm_fs_inode_new(); |
715 | if (IS_ERR(inode)) { |
716 | ret = PTR_ERR(inode); |
717 | DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret)__drm_err("Cannot allocate anonymous inode: %d\n", ret); |
718 | goto err; |
719 | } |
720 | |
721 | dev->anon_inode = inode; |
722 | |
723 | if (drm_core_check_feature(dev, DRIVER_RENDER)) { |
724 | ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); |
725 | if (ret) |
726 | goto err; |
727 | } |
728 | |
729 | ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); |
730 | if (ret) |
731 | goto err; |
732 | |
733 | ret = drm_legacy_create_map_hash(dev); |
734 | if (ret) |
735 | goto err; |
736 | |
737 | drm_legacy_ctxbitmap_init(dev); |
738 | |
739 | if (drm_core_check_feature(dev, DRIVER_GEM)) { |
740 | ret = drm_gem_init(dev); |
741 | if (ret) { |
742 | DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n" ); |
743 | goto err; |
744 | } |
745 | } |
746 | |
747 | ret = drm_dev_set_unique(dev, dev_name(parent)""); |
748 | if (ret) |
749 | goto err; |
750 | |
751 | return 0; |
752 | |
753 | err: |
754 | drm_managed_release(dev); |
755 | |
756 | return ret; |
757 | } |
758 | |
759 | static void devm_drm_dev_init_release(void *data) |
760 | { |
761 | drm_dev_put(data); |
762 | } |
763 | |
764 | static int devm_drm_dev_init(struct device *parent, |
765 | struct drm_device *dev, |
766 | const struct drm_driver *driver) |
767 | { |
768 | int ret; |
769 | |
770 | ret = drm_dev_init(dev, driver, parent); |
771 | if (ret) |
772 | return ret; |
773 | |
774 | return devm_add_action_or_reset(parent, |
775 | devm_drm_dev_init_release, dev); |
776 | } |
777 | |
778 | #endif |
779 | |
780 | void *__devm_drm_dev_alloc(struct device *parent, |
781 | const struct drm_driver *driver, |
782 | size_t size, size_t offset) |
783 | { |
784 | void *container; |
785 | struct drm_device *drm; |
786 | #ifdef notyet |
787 | int ret; |
788 | #endif |
789 | |
790 | container = kzalloc(size, GFP_KERNEL(0x0001 | 0x0004)); |
791 | if (!container) |
792 | return ERR_PTR(-ENOMEM12); |
793 | |
794 | drm = container + offset; |
Value stored to 'drm' is never read | |
795 | #ifdef notyet |
796 | ret = devm_drm_dev_init(parent, drm, driver); |
797 | if (ret) { |
798 | kfree(container); |
799 | return ERR_PTR(ret); |
800 | } |
801 | drmm_add_final_kfree(drm, container); |
802 | #endif |
803 | |
804 | return container; |
805 | } |
806 | EXPORT_SYMBOL(__devm_drm_dev_alloc); |
807 | |
808 | #ifdef notyet |
809 | |
810 | /** |
811 | * drm_dev_alloc - Allocate new DRM device |
812 | * @driver: DRM driver to allocate device for |
813 | * @parent: Parent device object |
814 | * |
815 | * This is the deprecated version of devm_drm_dev_alloc(), which does not support |
816 | * subclassing through embedding the struct &drm_device in a driver private |
817 | * structure, and which does not support automatic cleanup through devres. |
818 | * |
819 | * RETURNS: |
820 | * Pointer to new DRM device, or ERR_PTR on failure. |
821 | */ |
822 | struct drm_device *drm_dev_alloc(const struct drm_driver *driver, |
823 | struct device *parent) |
824 | { |
825 | struct drm_device *dev; |
826 | int ret; |
827 | |
828 | dev = kzalloc(sizeof(*dev), GFP_KERNEL(0x0001 | 0x0004)); |
829 | if (!dev) |
830 | return ERR_PTR(-ENOMEM12); |
831 | |
832 | ret = drm_dev_init(dev, driver, parent); |
833 | if (ret) { |
834 | kfree(dev); |
835 | return ERR_PTR(ret); |
836 | } |
837 | |
838 | drmm_add_final_kfree(dev, dev); |
839 | |
840 | return dev; |
841 | } |
842 | EXPORT_SYMBOL(drm_dev_alloc); |
843 | |
844 | #endif |
845 | |
846 | static void drm_dev_release(struct kref *ref) |
847 | { |
848 | struct drm_device *dev = container_of(ref, struct drm_device, ref)({ const __typeof( ((struct drm_device *)0)->ref ) *__mptr = (ref); (struct drm_device *)( (char *)__mptr - __builtin_offsetof (struct drm_device, ref) );}); |
849 | |
850 | if (dev->driver->release) |
851 | dev->driver->release(dev); |
852 | |
853 | drm_managed_release(dev); |
854 | |
855 | kfree(dev->managed.final_kfree); |
856 | } |
857 | |
858 | /** |
859 | * drm_dev_get - Take reference of a DRM device |
860 | * @dev: device to take reference of or NULL |
861 | * |
862 | * This increases the ref-count of @dev by one. You *must* already own a |
863 | * reference when calling this. Use drm_dev_put() to drop this reference |
864 | * again. |
865 | * |
866 | * This function never fails. However, this function does not provide *any* |
867 | * guarantee whether the device is alive or running. It only provides a |
868 | * reference to the object and the memory associated with it. |
869 | */ |
870 | void drm_dev_get(struct drm_device *dev) |
871 | { |
872 | if (dev) |
873 | kref_get(&dev->ref); |
874 | } |
875 | EXPORT_SYMBOL(drm_dev_get); |
876 | |
877 | /** |
878 | * drm_dev_put - Drop reference of a DRM device |
879 | * @dev: device to drop reference of or NULL |
880 | * |
881 | * This decreases the ref-count of @dev by one. The device is destroyed if the |
882 | * ref-count drops to zero. |
883 | */ |
884 | void drm_dev_put(struct drm_device *dev) |
885 | { |
886 | if (dev) |
887 | kref_put(&dev->ref, drm_dev_release); |
888 | } |
889 | EXPORT_SYMBOL(drm_dev_put); |
890 | |
891 | static int create_compat_control_link(struct drm_device *dev) |
892 | { |
893 | struct drm_minor *minor; |
894 | char *name; |
895 | int ret; |
896 | |
897 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
898 | return 0; |
899 | |
900 | minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); |
901 | if (!minor) |
902 | return 0; |
903 | |
904 | /* |
905 | * Some existing userspace out there uses the existing of the controlD* |
906 | * sysfs files to figure out whether it's a modeset driver. It only does |
907 | * readdir, hence a symlink is sufficient (and the least confusing |
908 | * option). Otherwise controlD* is entirely unused. |
909 | * |
910 | * Old controlD chardev have been allocated in the range |
911 | * 64-127. |
912 | */ |
913 | name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64); |
914 | if (!name) |
915 | return -ENOMEM12; |
916 | |
917 | ret = sysfs_create_link(minor->kdev->kobj.parent,0 |
918 | &minor->kdev->kobj,0 |
919 | name)0; |
920 | |
921 | kfree(name); |
922 | |
923 | return ret; |
924 | } |
925 | |
926 | static void remove_compat_control_link(struct drm_device *dev) |
927 | { |
928 | struct drm_minor *minor; |
929 | char *name; |
930 | |
931 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
932 | return; |
933 | |
934 | minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); |
935 | if (!minor) |
936 | return; |
937 | |
938 | name = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "controlD%d", minor->index + 64); |
939 | if (!name) |
940 | return; |
941 | |
942 | sysfs_remove_link(minor->kdev->kobj.parent, name); |
943 | |
944 | kfree(name); |
945 | } |
946 | |
947 | /** |
948 | * drm_dev_register - Register DRM device |
949 | * @dev: Device to register |
950 | * @flags: Flags passed to the driver's .load() function |
951 | * |
952 | * Register the DRM device @dev with the system, advertise device to user-space |
953 | * and start normal device operation. @dev must be initialized via drm_dev_init() |
954 | * previously. |
955 | * |
956 | * Never call this twice on any device! |
957 | * |
958 | * NOTE: To ensure backward compatibility with existing drivers method this |
959 | * function calls the &drm_driver.load method after registering the device |
960 | * nodes, creating race conditions. Usage of the &drm_driver.load methods is |
961 | * therefore deprecated, drivers must perform all initialization before calling |
962 | * drm_dev_register(). |
963 | * |
964 | * RETURNS: |
965 | * 0 on success, negative error code on failure. |
966 | */ |
967 | int drm_dev_register(struct drm_device *dev, unsigned long flags) |
968 | { |
969 | const struct drm_driver *driver = dev->driver; |
970 | int ret; |
971 | |
972 | if (!driver->load) |
973 | drm_mode_config_validate(dev); |
974 | |
975 | WARN_ON(!dev->managed.final_kfree)({ int __ret = !!(!dev->managed.final_kfree); if (__ret) printf ("WARNING %s failed at %s:%d\n", "!dev->managed.final_kfree" , "/usr/src/sys/dev/pci/drm/drm_drv.c", 975); __builtin_expect (!!(__ret), 0); }); |
976 | |
977 | if (drm_dev_needs_global_mutex(dev)) |
978 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); |
979 | |
980 | ret = drm_minor_register(dev, DRM_MINOR_RENDER); |
981 | if (ret) |
982 | goto err_minors; |
983 | |
984 | ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); |
985 | if (ret) |
986 | goto err_minors; |
987 | |
988 | ret = create_compat_control_link(dev); |
989 | if (ret) |
990 | goto err_minors; |
991 | |
992 | dev->registered = true1; |
993 | |
994 | if (dev->driver->load) { |
995 | ret = dev->driver->load(dev, flags); |
996 | if (ret) |
997 | goto err_minors; |
998 | } |
999 | |
1000 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1001 | drm_modeset_register_all(dev); |
1002 | |
1003 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) |
1004 | driver->name, driver->major, driver->minor,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) |
1005 | driver->patchlevel, driver->date,printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) |
1006 | dev->dev ? dev_name(dev->dev) : "virtual device",printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index) |
1007 | dev->primary->index)printk("\0016" "[" "drm" "] " "Initialized %s %d.%d.%d %s for %s on minor %d\n" , driver->name, driver->major, driver->minor, driver ->patchlevel, driver->date, dev->dev ? "" : "virtual device" , dev->primary->index); |
1008 | |
1009 | goto out_unlock; |
1010 | |
1011 | err_minors: |
1012 | remove_compat_control_link(dev); |
1013 | drm_minor_unregister(dev, DRM_MINOR_PRIMARY); |
1014 | drm_minor_unregister(dev, DRM_MINOR_RENDER); |
1015 | out_unlock: |
1016 | if (drm_dev_needs_global_mutex(dev)) |
1017 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); |
1018 | return ret; |
1019 | } |
1020 | EXPORT_SYMBOL(drm_dev_register); |
1021 | |
1022 | /** |
1023 | * drm_dev_unregister - Unregister DRM device |
1024 | * @dev: Device to unregister |
1025 | * |
1026 | * Unregister the DRM device from the system. This does the reverse of |
1027 | * drm_dev_register() but does not deallocate the device. The caller must call |
1028 | * drm_dev_put() to drop their final reference. |
1029 | * |
1030 | * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), |
1031 | * which can be called while there are still open users of @dev. |
1032 | * |
1033 | * This should be called first in the device teardown code to make sure |
1034 | * userspace can't access the device instance any more. |
1035 | */ |
1036 | void drm_dev_unregister(struct drm_device *dev) |
1037 | { |
1038 | if (drm_core_check_feature(dev, DRIVER_LEGACY)) |
1039 | drm_lastclose(dev); |
1040 | |
1041 | dev->registered = false0; |
1042 | |
1043 | drm_client_dev_unregister(dev); |
1044 | |
1045 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1046 | drm_modeset_unregister_all(dev); |
1047 | |
1048 | if (dev->driver->unload) |
1049 | dev->driver->unload(dev); |
1050 | |
1051 | drm_legacy_pci_agp_destroy(dev); |
1052 | drm_legacy_rmmaps(dev); |
1053 | |
1054 | remove_compat_control_link(dev); |
1055 | drm_minor_unregister(dev, DRM_MINOR_PRIMARY); |
1056 | drm_minor_unregister(dev, DRM_MINOR_RENDER); |
1057 | } |
1058 | EXPORT_SYMBOL(drm_dev_unregister); |
1059 | |
1060 | /** |
1061 | * drm_dev_set_unique - Set the unique name of a DRM device |
1062 | * @dev: device of which to set the unique name |
1063 | * @name: unique name |
1064 | * |
1065 | * Sets the unique name of a DRM device using the specified string. This is |
1066 | * already done by drm_dev_init(), drivers should only override the default |
1067 | * unique name for backwards compatibility reasons. |
1068 | * |
1069 | * Return: 0 on success or a negative error code on failure. |
1070 | */ |
1071 | int drm_dev_set_unique(struct drm_device *dev, const char *name) |
1072 | { |
1073 | drmm_kfree(dev, dev->unique); |
1074 | dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL(0x0001 | 0x0004)); |
1075 | |
1076 | return dev->unique ? 0 : -ENOMEM12; |
1077 | } |
1078 | EXPORT_SYMBOL(drm_dev_set_unique); |
1079 | |
1080 | /* |
1081 | * DRM Core |
1082 | * The DRM core module initializes all global DRM objects and makes them |
1083 | * available to drivers. Once setup, drivers can probe their respective |
1084 | * devices. |
1085 | * Currently, core management includes: |
1086 | * - The "DRM-Global" key/value database |
1087 | * - Global ID management for connectors |
1088 | * - DRM major number allocation |
1089 | * - DRM minor management |
1090 | * - DRM sysfs class |
1091 | * - DRM debugfs root |
1092 | * |
1093 | * Furthermore, the DRM core provides dynamic char-dev lookups. For each |
1094 | * interface registered on a DRM device, you can request minor numbers from DRM |
1095 | * core. DRM core takes care of major-number management and char-dev |
1096 | * registration. A stub ->open() callback forwards any open() requests to the |
1097 | * registered minor. |
1098 | */ |
1099 | |
1100 | #ifdef __linux__ |
1101 | static int drm_stub_open(struct inode *inode, struct file *filp) |
1102 | { |
1103 | const struct file_operations *new_fops; |
1104 | struct drm_minor *minor; |
1105 | int err; |
1106 | |
1107 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); |
1108 | |
1109 | minor = drm_minor_acquire(iminor(inode)); |
1110 | if (IS_ERR(minor)) |
1111 | return PTR_ERR(minor); |
1112 | |
1113 | new_fops = fops_get(minor->dev->driver->fops); |
1114 | if (!new_fops) { |
1115 | err = -ENODEV19; |
1116 | goto out; |
1117 | } |
1118 | |
1119 | replace_fops(filp, new_fops); |
1120 | if (filp->f_op->open) |
1121 | err = filp->f_op->open(inode, filp); |
1122 | else |
1123 | err = 0; |
1124 | |
1125 | out: |
1126 | drm_minor_release(minor); |
1127 | |
1128 | return err; |
1129 | } |
1130 | |
1131 | static const struct file_operations drm_stub_fops = { |
1132 | .owner = THIS_MODULE((void *)0), |
1133 | .open = drm_stub_open, |
1134 | .llseek = noop_llseek, |
1135 | }; |
1136 | #endif /* __linux__ */ |
1137 | |
1138 | static void drm_core_exit(void) |
1139 | { |
1140 | drm_privacy_screen_lookup_exit(); |
1141 | #ifdef __linux__ |
1142 | unregister_chrdev(DRM_MAJOR, "drm"); |
1143 | debugfs_remove(drm_debugfs_root); |
1144 | drm_sysfs_destroy(); |
1145 | #endif |
1146 | idr_destroy(&drm_minors_idr); |
1147 | drm_connector_ida_destroy(); |
1148 | } |
1149 | |
1150 | static int __init drm_core_init(void) |
1151 | { |
1152 | #ifdef __linux__ |
1153 | int ret; |
1154 | #endif |
1155 | |
1156 | drm_connector_ida_init(); |
1157 | idr_init(&drm_minors_idr); |
1158 | drm_memcpy_init_early(); |
1159 | |
1160 | #ifdef __linux__ |
1161 | ret = drm_sysfs_init(); |
1162 | if (ret < 0) { |
1163 | DRM_ERROR("Cannot create DRM class: %d\n", ret)__drm_err("Cannot create DRM class: %d\n", ret); |
1164 | goto error; |
1165 | } |
1166 | |
1167 | drm_debugfs_root = debugfs_create_dir("dri", NULL)ERR_PTR(-78); |
1168 | |
1169 | ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); |
1170 | if (ret < 0) |
1171 | goto error; |
1172 | #endif |
1173 | |
1174 | drm_privacy_screen_lookup_init(); |
1175 | |
1176 | drm_core_init_complete = true1; |
1177 | |
1178 | DRM_DEBUG("Initialized\n")___drm_dbg(((void *)0), DRM_UT_CORE, "Initialized\n"); |
1179 | return 0; |
1180 | #ifdef __linux__ |
1181 | error: |
1182 | drm_core_exit(); |
1183 | return ret; |
1184 | #endif |
1185 | } |
1186 | |
1187 | #ifdef __linux__ |
1188 | module_init(drm_core_init); |
1189 | module_exit(drm_core_exit); |
1190 | #endif |
1191 | |
1192 | void |
1193 | drm_attach_platform(struct drm_driver *driver, bus_space_tag_t iot, |
1194 | bus_dma_tag_t dmat, struct device *dev, struct drm_device *drm) |
1195 | { |
1196 | struct drm_attach_args arg; |
1197 | |
1198 | memset(&arg, 0, sizeof(arg))__builtin_memset((&arg), (0), (sizeof(arg))); |
1199 | arg.driver = driver; |
1200 | arg.bst = iot; |
1201 | arg.dmat = dmat; |
1202 | arg.drm = drm; |
1203 | |
1204 | arg.busid = dev->dv_xname; |
1205 | arg.busid_len = strlen(dev->dv_xname) + 1; |
1206 | config_found_sm(dev, &arg, drmprint, drmsubmatch); |
1207 | } |
1208 | |
1209 | struct drm_device * |
1210 | drm_attach_pci(const struct drm_driver *driver, struct pci_attach_args *pa, |
1211 | int is_agp, int primary, struct device *dev, struct drm_device *drm) |
1212 | { |
1213 | struct drm_attach_args arg; |
1214 | struct drm_softc *sc; |
1215 | |
1216 | arg.drm = drm; |
1217 | arg.driver = driver; |
1218 | arg.dmat = pa->pa_dmat; |
1219 | arg.bst = pa->pa_memt; |
1220 | arg.is_agp = is_agp; |
1221 | arg.primary = primary; |
1222 | arg.pa = pa; |
1223 | |
1224 | arg.busid_len = 20; |
1225 | arg.busid = malloc(arg.busid_len + 1, M_DRM145, M_NOWAIT0x0002); |
1226 | if (arg.busid == NULL((void *)0)) { |
1227 | printf("%s: no memory for drm\n", dev->dv_xname); |
1228 | return (NULL((void *)0)); |
1229 | } |
1230 | snprintf(arg.busid, arg.busid_len, "pci:%04x:%02x:%02x.%1x", |
1231 | pa->pa_domain, pa->pa_bus, pa->pa_device, pa->pa_function); |
1232 | |
1233 | sc = (struct drm_softc *)config_found_sm(dev, &arg, drmprint, drmsubmatch); |
1234 | if (sc == NULL((void *)0)) |
1235 | return NULL((void *)0); |
1236 | |
1237 | return sc->sc_drm; |
1238 | } |
1239 | |
1240 | int |
1241 | drmprint(void *aux, const char *pnp) |
1242 | { |
1243 | if (pnp != NULL((void *)0)) |
1244 | printf("drm at %s", pnp); |
1245 | return (UNCONF1); |
1246 | } |
1247 | |
1248 | int |
1249 | drmsubmatch(struct device *parent, void *match, void *aux) |
1250 | { |
1251 | extern struct cfdriver drm_cd; |
1252 | struct cfdata *cf = match; |
1253 | |
1254 | /* only allow drm to attach */ |
1255 | if (cf->cf_driver == &drm_cd) |
1256 | return ((*cf->cf_attach->ca_match)(parent, match, aux)); |
1257 | return (0); |
1258 | } |
1259 | |
1260 | int |
1261 | drm_pciprobe(struct pci_attach_args *pa, const struct pci_device_id *idlist) |
1262 | { |
1263 | const struct pci_device_id *id_entry; |
1264 | |
1265 | id_entry = drm_find_description(PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff), |
1266 | PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff), idlist); |
1267 | if (id_entry != NULL((void *)0)) |
1268 | return 1; |
1269 | |
1270 | return 0; |
1271 | } |
1272 | |
1273 | int |
1274 | drm_probe(struct device *parent, void *match, void *aux) |
1275 | { |
1276 | struct cfdata *cf = match; |
1277 | struct drm_attach_args *da = aux; |
1278 | |
1279 | if (cf->drmdevcf_primarycf_loc[0] != DRMDEVCF_PRIMARY_UNK-1) { |
1280 | /* |
1281 | * If primary-ness of device specified, either match |
1282 | * exactly (at high priority), or fail. |
1283 | */ |
1284 | if (cf->drmdevcf_primarycf_loc[0] != 0 && da->primary != 0) |
1285 | return (10); |
1286 | else |
1287 | return (0); |
1288 | } |
1289 | |
1290 | /* If primary-ness unspecified, it wins. */ |
1291 | return (1); |
1292 | } |
1293 | |
1294 | int drm_buddy_module_init(void); |
1295 | void drm_buddy_module_exit(void); |
1296 | |
1297 | void |
1298 | drm_attach(struct device *parent, struct device *self, void *aux) |
1299 | { |
1300 | struct drm_softc *sc = (struct drm_softc *)self; |
1301 | struct drm_attach_args *da = aux; |
1302 | struct drm_device *dev = da->drm; |
1303 | int ret; |
1304 | |
1305 | if (drm_refcnt == 0) { |
1306 | drm_linux_init(); |
1307 | drm_core_init(); |
1308 | drm_buddy_module_init(); |
1309 | } |
1310 | drm_refcnt++; |
1311 | |
1312 | if (dev == NULL((void *)0)) { |
1313 | dev = malloc(sizeof(struct drm_device), M_DRM145, |
1314 | M_WAITOK0x0001 | M_ZERO0x0008); |
1315 | sc->sc_allocated = 1; |
1316 | } |
1317 | |
1318 | sc->sc_drm = dev; |
1319 | |
1320 | kref_init(&dev->ref); |
1321 | dev->dev = self; |
1322 | dev->dev_private = parent; |
1323 | dev->driver = da->driver; |
1324 | |
1325 | INIT_LIST_HEAD(&dev->managed.resources); |
1326 | mtx_init(&dev->managed.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> managed.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9 ) ? 0x9 : ((0x9)))); } while (0); |
1327 | |
1328 | /* no per-device feature limits by default */ |
1329 | dev->driver_features = ~0u; |
1330 | |
1331 | dev->dmat = da->dmat; |
1332 | dev->bst = da->bst; |
1333 | dev->unique = da->busid; |
1334 | |
1335 | if (da->pa) { |
1336 | struct pci_attach_args *pa = da->pa; |
1337 | pcireg_t subsys; |
1338 | |
1339 | subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, |
1340 | PCI_SUBSYS_ID_REG0x2c); |
1341 | |
1342 | dev->pdev = &dev->_pdev; |
1343 | dev->pdev->vendor = PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff); |
1344 | dev->pdev->device = PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff); |
1345 | dev->pdev->subsystem_vendor = PCI_VENDOR(subsys)(((subsys) >> 0) & 0xffff); |
1346 | dev->pdev->subsystem_device = PCI_PRODUCT(subsys)(((subsys) >> 16) & 0xffff); |
1347 | dev->pdev->revision = PCI_REVISION(pa->pa_class)(((pa->pa_class) >> 0) & 0xff); |
1348 | dev->pdev->class = (PCI_CLASS(pa->pa_class)(((pa->pa_class) >> 24) & 0xff) << 16) | |
1349 | (PCI_SUBCLASS(pa->pa_class)(((pa->pa_class) >> 16) & 0xff) << 8) | |
1350 | PCI_INTERFACE(pa->pa_class)(((pa->pa_class) >> 8) & 0xff); |
1351 | |
1352 | dev->pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function)((pa->pa_device) << 3 | (pa->pa_function)); |
1353 | dev->pdev->bus = &dev->pdev->_bus; |
1354 | dev->pdev->bus->pc = pa->pa_pc; |
1355 | dev->pdev->bus->number = pa->pa_bus; |
1356 | dev->pdev->bus->domain_nr = pa->pa_domain; |
1357 | dev->pdev->bus->bridgetag = pa->pa_bridgetag; |
1358 | |
1359 | if (pa->pa_bridgetag != NULL((void *)0)) { |
1360 | dev->pdev->bus->self = malloc(sizeof(struct pci_dev), |
1361 | M_DRM145, M_WAITOK0x0001 | M_ZERO0x0008); |
1362 | dev->pdev->bus->self->pc = pa->pa_pc; |
1363 | dev->pdev->bus->self->tag = *pa->pa_bridgetag; |
1364 | } |
1365 | |
1366 | dev->pdev->pc = pa->pa_pc; |
1367 | dev->pdev->tag = pa->pa_tag; |
1368 | dev->pdev->pci = (struct pci_softc *)parent->dv_parent; |
1369 | |
1370 | #ifdef CONFIG_ACPI1 |
1371 | dev->pdev->dev.node = acpi_find_pci(pa->pa_pc, pa->pa_tag); |
1372 | aml_register_notify(dev->pdev->dev.node, NULL((void *)0), |
1373 | drm_linux_acpi_notify, NULL((void *)0), ACPIDEV_NOPOLL0); |
1374 | #endif |
1375 | } |
1376 | |
1377 | mtx_init(&dev->quiesce_mtx, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> quiesce_mtx), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); |
1378 | mtx_init(&dev->event_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev-> event_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
1379 | rw_init(&dev->struct_mutex, "drmdevlk")_rw_init_flags(&dev->struct_mutex, "drmdevlk", 0, ((void *)0)); |
1380 | rw_init(&dev->filelist_mutex, "drmflist")_rw_init_flags(&dev->filelist_mutex, "drmflist", 0, (( void *)0)); |
1381 | rw_init(&dev->clientlist_mutex, "drmclist")_rw_init_flags(&dev->clientlist_mutex, "drmclist", 0, ( (void *)0)); |
1382 | rw_init(&dev->master_mutex, "drmmast")_rw_init_flags(&dev->master_mutex, "drmmast", 0, ((void *)0)); |
1383 | |
1384 | ret = drmm_add_action(dev, drm_dev_init_release, NULL((void *)0)); |
1385 | if (ret) |
1386 | goto error; |
1387 | |
1388 | SPLAY_INIT(&dev->files)do { (&dev->files)->sph_root = ((void *)0); } while (0); |
1389 | INIT_LIST_HEAD(&dev->filelist_internal); |
1390 | INIT_LIST_HEAD(&dev->clientlist); |
1391 | INIT_LIST_HEAD(&dev->vblank_event_list); |
1392 | |
1393 | if (drm_core_check_feature(dev, DRIVER_RENDER)) { |
1394 | ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); |
1395 | if (ret) |
1396 | goto error; |
1397 | } |
1398 | |
1399 | ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); |
1400 | if (ret) |
1401 | goto error; |
1402 | |
1403 | #ifdef CONFIG_DRM_LEGACY |
1404 | if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { |
1405 | #if IS_ENABLED(CONFIG_AGP)1 |
1406 | if (da->is_agp) |
1407 | dev->agp = drm_agp_init(); |
1408 | #endif |
1409 | if (dev->agp != NULL((void *)0)) { |
1410 | if (drm_mtrr_add(dev->agp->info.ai_aperture_base, |
1411 | dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1)) == 0) |
1412 | dev->agp->mtrr = 1; |
1413 | } |
1414 | } |
1415 | #endif |
1416 | |
1417 | if (dev->driver->gem_size > 0) { |
1418 | KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object))((dev->driver->gem_size >= sizeof(struct drm_gem_object )) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/drm_drv.c" , 1418, "dev->driver->gem_size >= sizeof(struct drm_gem_object)" )); |
1419 | /* XXX unique name */ |
1420 | pool_init(&dev->objpl, dev->driver->gem_size, 0, IPL_NONE0x0, 0, |
1421 | "drmobjpl", NULL((void *)0)); |
1422 | } |
1423 | |
1424 | if (drm_core_check_feature(dev, DRIVER_GEM)) { |
1425 | ret = drm_gem_init(dev); |
1426 | if (ret) { |
1427 | DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n")__drm_err("Cannot initialize graphics execution manager (GEM)\n" ); |
1428 | goto error; |
1429 | } |
1430 | } |
1431 | |
1432 | drmm_add_final_kfree(dev, dev); |
1433 | |
1434 | printf("\n"); |
1435 | return; |
1436 | |
1437 | error: |
1438 | drm_managed_release(dev); |
1439 | dev->dev_private = NULL((void *)0); |
1440 | } |
1441 | |
1442 | int |
1443 | drm_detach(struct device *self, int flags) |
1444 | { |
1445 | struct drm_softc *sc = (struct drm_softc *)self; |
1446 | struct drm_device *dev = sc->sc_drm; |
1447 | |
1448 | drm_refcnt--; |
1449 | if (drm_refcnt == 0) { |
1450 | drm_buddy_module_exit(); |
1451 | drm_core_exit(); |
1452 | drm_linux_exit(); |
1453 | } |
1454 | |
1455 | drm_lastclose(dev); |
1456 | |
1457 | if (drm_core_check_feature(dev, DRIVER_GEM)) { |
1458 | if (dev->driver->gem_size > 0) |
1459 | pool_destroy(&dev->objpl); |
1460 | } |
1461 | |
1462 | #ifdef CONFIG_DRM_LEGACY |
1463 | if (dev->agp && dev->agp->mtrr) { |
1464 | int retcode; |
1465 | |
1466 | retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, |
1467 | dev->agp->info.ai_aperture_size, DRM_MTRR_WC(1<<1)); |
1468 | DRM_DEBUG("mtrr_del = %d", retcode)___drm_dbg(((void *)0), DRM_UT_CORE, "mtrr_del = %d", retcode ); |
1469 | } |
1470 | |
1471 | free(dev->agp, M_DRM145, 0); |
1472 | #endif |
1473 | if (dev->pdev && dev->pdev->bus) |
1474 | free(dev->pdev->bus->self, M_DRM145, sizeof(struct pci_dev)); |
1475 | |
1476 | if (sc->sc_allocated) |
1477 | free(dev, M_DRM145, sizeof(struct drm_device)); |
1478 | |
1479 | return 0; |
1480 | } |
1481 | |
1482 | void |
1483 | drm_quiesce(struct drm_device *dev) |
1484 | { |
1485 | mtx_enter(&dev->quiesce_mtx); |
1486 | dev->quiesce = 1; |
1487 | while (dev->quiesce_count > 0) { |
1488 | msleep_nsec(&dev->quiesce_count, &dev->quiesce_mtx, |
1489 | PZERO22, "drmqui", INFSLP0xffffffffffffffffULL); |
1490 | } |
1491 | mtx_leave(&dev->quiesce_mtx); |
1492 | } |
1493 | |
1494 | void |
1495 | drm_wakeup(struct drm_device *dev) |
1496 | { |
1497 | mtx_enter(&dev->quiesce_mtx); |
1498 | dev->quiesce = 0; |
1499 | wakeup(&dev->quiesce); |
1500 | mtx_leave(&dev->quiesce_mtx); |
1501 | } |
1502 | |
1503 | int |
1504 | drm_activate(struct device *self, int act) |
1505 | { |
1506 | struct drm_softc *sc = (struct drm_softc *)self; |
1507 | struct drm_device *dev = sc->sc_drm; |
1508 | |
1509 | switch (act) { |
1510 | case DVACT_QUIESCE2: |
1511 | drm_quiesce(dev); |
1512 | break; |
1513 | case DVACT_WAKEUP5: |
1514 | drm_wakeup(dev); |
1515 | break; |
1516 | } |
1517 | |
1518 | return (0); |
1519 | } |
1520 | |
1521 | const struct cfattach drm_ca = { |
1522 | sizeof(struct drm_softc), drm_probe, drm_attach, |
1523 | drm_detach, drm_activate |
1524 | }; |
1525 | |
1526 | struct cfdriver drm_cd = { |
1527 | 0, "drm", DV_DULL |
1528 | }; |
1529 | |
1530 | const struct pci_device_id * |
1531 | drm_find_description(int vendor, int device, const struct pci_device_id *idlist) |
1532 | { |
1533 | int i = 0; |
1534 | |
1535 | for (i = 0; idlist[i].vendor != 0; i++) { |
1536 | if ((idlist[i].vendor == vendor) && |
1537 | (idlist[i].device == device || |
1538 | idlist[i].device == PCI_ANY_ID(uint16_t) (~0U)) && |
1539 | (idlist[i].subvendor == PCI_ANY_ID(uint16_t) (~0U)) && |
1540 | (idlist[i].subdevice == PCI_ANY_ID(uint16_t) (~0U))) |
1541 | return &idlist[i]; |
1542 | } |
1543 | return NULL((void *)0); |
1544 | } |
1545 | |
1546 | int |
1547 | drm_file_cmp(struct drm_file *f1, struct drm_file *f2) |
1548 | { |
1549 | return (f1->fminor < f2->fminor ? -1 : f1->fminor > f2->fminor); |
1550 | } |
1551 | |
1552 | SPLAY_GENERATE(drm_file_tree, drm_file, link, drm_file_cmp)struct drm_file * drm_file_tree_SPLAY_INSERT(struct drm_file_tree *head, struct drm_file *elm) { if (((head)->sph_root == ( (void *)0))) { (elm)->link.spe_left = (elm)->link.spe_right = ((void *)0); } else { int __comp; drm_file_tree_SPLAY(head , elm); __comp = (drm_file_cmp)(elm, (head)->sph_root); if (__comp < 0) { (elm)->link.spe_left = ((head)->sph_root )->link.spe_left; (elm)->link.spe_right = (head)->sph_root ; ((head)->sph_root)->link.spe_left = ((void *)0); } else if (__comp > 0) { (elm)->link.spe_right = ((head)-> sph_root)->link.spe_right; (elm)->link.spe_left = (head )->sph_root; ((head)->sph_root)->link.spe_right = (( void *)0); } else return ((head)->sph_root); } (head)-> sph_root = (elm); return (((void *)0)); } struct drm_file * drm_file_tree_SPLAY_REMOVE (struct drm_file_tree *head, struct drm_file *elm) { struct drm_file *__tmp; if (((head)->sph_root == ((void *)0))) return ((( void *)0)); drm_file_tree_SPLAY(head, elm); if ((drm_file_cmp )(elm, (head)->sph_root) == 0) { if (((head)->sph_root) ->link.spe_left == ((void *)0)) { (head)->sph_root = (( head)->sph_root)->link.spe_right; } else { __tmp = ((head )->sph_root)->link.spe_right; (head)->sph_root = ((head )->sph_root)->link.spe_left; drm_file_tree_SPLAY(head, elm ); ((head)->sph_root)->link.spe_right = __tmp; } return (elm); } return (((void *)0)); } void drm_file_tree_SPLAY(struct drm_file_tree *head, struct drm_file *elm) { struct drm_file __node, *__left, *__right, *__tmp; int __comp; (&__node) ->link.spe_left = (&__node)->link.spe_right = ((void *)0); __left = __right = &__node; while ((__comp = (drm_file_cmp )(elm, (head)->sph_root))) { if (__comp < 0) { __tmp = ( (head)->sph_root)->link.spe_left; if (__tmp == ((void * )0)) break; if ((drm_file_cmp)(elm, __tmp) < 0){ do { ((head )->sph_root)->link.spe_left = (__tmp)->link.spe_right ; (__tmp)->link.spe_right = (head)->sph_root; (head)-> sph_root = __tmp; } while (0); if (((head)->sph_root)-> link.spe_left == ((void *)0)) break; } do { (__right)->link .spe_left = (head)->sph_root; __right = (head)->sph_root ; (head)->sph_root = ((head)->sph_root)->link.spe_left ; } while (0); } else if (__comp > 0) { __tmp = ((head)-> sph_root)->link.spe_right; if (__tmp == ((void *)0)) break ; if ((drm_file_cmp)(elm, __tmp) > 0){ do { ((head)->sph_root )->link.spe_right = (__tmp)->link.spe_left; (__tmp)-> link.spe_left = (head)->sph_root; (head)->sph_root = __tmp ; } while (0); if (((head)->sph_root)->link.spe_right == ((void *)0)) break; } do { (__left)->link.spe_right = (head )->sph_root; __left = (head)->sph_root; (head)->sph_root = ((head)->sph_root)->link.spe_right; } while (0); } } do { (__left)->link.spe_right = ((head)->sph_root)-> link.spe_left; (__right)->link.spe_left = ((head)->sph_root )->link.spe_right; ((head)->sph_root)->link.spe_left = (&__node)->link.spe_right; ((head)->sph_root)-> link.spe_right = (&__node)->link.spe_left; } while (0) ; } void drm_file_tree_SPLAY_MINMAX(struct drm_file_tree *head , int __comp) { struct drm_file __node, *__left, *__right, *__tmp ; (&__node)->link.spe_left = (&__node)->link.spe_right = ((void *)0); __left = __right = &__node; while (1) { if (__comp < 0) { __tmp = ((head)->sph_root)->link.spe_left ; if (__tmp == ((void *)0)) break; if (__comp < 0){ do { ( (head)->sph_root)->link.spe_left = (__tmp)->link.spe_right ; (__tmp)->link.spe_right = (head)->sph_root; (head)-> sph_root = __tmp; } while (0); if (((head)->sph_root)-> link.spe_left == ((void *)0)) break; } do { (__right)->link .spe_left = (head)->sph_root; __right = (head)->sph_root ; (head)->sph_root = ((head)->sph_root)->link.spe_left ; } while (0); } else if (__comp > 0) { __tmp = ((head)-> sph_root)->link.spe_right; if (__tmp == ((void *)0)) break ; if (__comp > 0) { do { ((head)->sph_root)->link.spe_right = (__tmp)->link.spe_left; (__tmp)->link.spe_left = (head )->sph_root; (head)->sph_root = __tmp; } while (0); if ( ((head)->sph_root)->link.spe_right == ((void *)0)) break ; } do { (__left)->link.spe_right = (head)->sph_root; __left = (head)->sph_root; (head)->sph_root = ((head)->sph_root )->link.spe_right; } while (0); } } do { (__left)->link .spe_right = ((head)->sph_root)->link.spe_left; (__right )->link.spe_left = ((head)->sph_root)->link.spe_right ; ((head)->sph_root)->link.spe_left = (&__node)-> link.spe_right; ((head)->sph_root)->link.spe_right = (& __node)->link.spe_left; } while (0); }; |
1553 | |
1554 | struct drm_file * |
1555 | drm_find_file_by_minor(struct drm_device *dev, int minor) |
1556 | { |
1557 | struct drm_file key; |
1558 | |
1559 | key.fminor = minor; |
1560 | return (SPLAY_FIND(drm_file_tree, &dev->files, &key)drm_file_tree_SPLAY_FIND(&dev->files, &key)); |
1561 | } |
1562 | |
1563 | struct drm_device * |
1564 | drm_get_device_from_kdev(dev_t kdev) |
1565 | { |
1566 | int unit = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)) & ((1 << CLONE_SHIFT8) - 1); |
1567 | /* render */ |
1568 | if (unit >= 128) |
1569 | unit -= 128; |
1570 | struct drm_softc *sc; |
1571 | |
1572 | if (unit < drm_cd.cd_ndevs) { |
1573 | sc = (struct drm_softc *)drm_cd.cd_devs[unit]; |
1574 | if (sc) |
1575 | return sc->sc_drm; |
1576 | } |
1577 | |
1578 | return NULL((void *)0); |
1579 | } |
1580 | |
1581 | void |
1582 | filt_drmdetach(struct knote *kn) |
1583 | { |
1584 | struct drm_device *dev = kn->kn_hook; |
1585 | int s; |
1586 | |
1587 | s = spltty()splraise(0x9); |
1588 | klist_remove_locked(&dev->note, kn); |
1589 | splx(s)spllower(s); |
1590 | } |
1591 | |
1592 | int |
1593 | filt_drmkms(struct knote *kn, long hint) |
1594 | { |
1595 | if (kn->kn_sfflags & hint) |
1596 | kn->kn_fflagskn_kevent.fflags |= hint; |
1597 | return (kn->kn_fflagskn_kevent.fflags != 0); |
1598 | } |
1599 | |
1600 | void |
1601 | filt_drmreaddetach(struct knote *kn) |
1602 | { |
1603 | struct drm_file *file_priv = kn->kn_hook; |
1604 | int s; |
1605 | |
1606 | s = spltty()splraise(0x9); |
1607 | klist_remove_locked(&file_priv->rsel.si_note, kn); |
1608 | splx(s)spllower(s); |
1609 | } |
1610 | |
1611 | int |
1612 | filt_drmread(struct knote *kn, long hint) |
1613 | { |
1614 | struct drm_file *file_priv = kn->kn_hook; |
1615 | int val = 0; |
1616 | |
1617 | if ((hint & NOTE_SUBMIT0x01000000) == 0) |
1618 | mtx_enter(&file_priv->minor->dev->event_lock); |
1619 | val = !list_empty(&file_priv->event_list); |
1620 | if ((hint & NOTE_SUBMIT0x01000000) == 0) |
1621 | mtx_leave(&file_priv->minor->dev->event_lock); |
1622 | return (val); |
1623 | } |
1624 | |
1625 | const struct filterops drm_filtops = { |
1626 | .f_flags = FILTEROP_ISFD0x00000001, |
1627 | .f_attach = NULL((void *)0), |
1628 | .f_detach = filt_drmdetach, |
1629 | .f_event = filt_drmkms, |
1630 | }; |
1631 | |
1632 | const struct filterops drmread_filtops = { |
1633 | .f_flags = FILTEROP_ISFD0x00000001, |
1634 | .f_attach = NULL((void *)0), |
1635 | .f_detach = filt_drmreaddetach, |
1636 | .f_event = filt_drmread, |
1637 | }; |
1638 | |
1639 | int |
1640 | drmkqfilter(dev_t kdev, struct knote *kn) |
1641 | { |
1642 | struct drm_device *dev = NULL((void *)0); |
1643 | struct drm_file *file_priv = NULL((void *)0); |
1644 | int s; |
1645 | |
1646 | dev = drm_get_device_from_kdev(kdev); |
1647 | if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0)) |
1648 | return (ENXIO6); |
1649 | |
1650 | switch (kn->kn_filterkn_kevent.filter) { |
1651 | case EVFILT_READ(-1): |
1652 | mutex_lock(&dev->struct_mutex)rw_enter_write(&dev->struct_mutex); |
1653 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); |
1654 | mutex_unlock(&dev->struct_mutex)rw_exit_write(&dev->struct_mutex); |
1655 | if (file_priv == NULL((void *)0)) |
1656 | return (ENXIO6); |
1657 | |
1658 | kn->kn_fop = &drmread_filtops; |
1659 | kn->kn_hook = file_priv; |
1660 | |
1661 | s = spltty()splraise(0x9); |
1662 | klist_insert_locked(&file_priv->rsel.si_note, kn); |
1663 | splx(s)spllower(s); |
1664 | break; |
1665 | case EVFILT_DEVICE(-8): |
1666 | kn->kn_fop = &drm_filtops; |
1667 | kn->kn_hook = dev; |
1668 | |
1669 | s = spltty()splraise(0x9); |
1670 | klist_insert_locked(&dev->note, kn); |
1671 | splx(s)spllower(s); |
1672 | break; |
1673 | default: |
1674 | return (EINVAL22); |
1675 | } |
1676 | |
1677 | return (0); |
1678 | } |
1679 | |
1680 | int |
1681 | drmopen(dev_t kdev, int flags, int fmt, struct proc *p) |
1682 | { |
1683 | struct drm_device *dev = NULL((void *)0); |
1684 | struct drm_file *file_priv; |
1685 | struct drm_minor *dm; |
1686 | int ret = 0; |
1687 | int dminor, realminor, minor_type; |
1688 | int need_setup = 0; |
1689 | |
1690 | dev = drm_get_device_from_kdev(kdev); |
1691 | if (dev == NULL((void *)0) || dev->dev_private == NULL((void *)0)) |
1692 | return (ENXIO6); |
1693 | |
1694 | DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))___drm_dbg(((void *)0), DRM_UT_CORE, "open_count = %d\n", ({ typeof (*(&dev->open_count)) __tmp = *(volatile typeof(*(& dev->open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer (); __tmp; })); |
1695 | |
1696 | if (flags & O_EXCL0x0800) |
1697 | return (EBUSY16); /* No exclusive opens */ |
1698 | |
1699 | if (drm_dev_needs_global_mutex(dev)) |
1700 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); |
1701 | |
1702 | if (!atomic_fetch_inc(&dev->open_count)__sync_fetch_and_add(&dev->open_count, 1)) |
1703 | need_setup = 1; |
1704 | |
1705 | dminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); |
1706 | realminor = dminor & ((1 << CLONE_SHIFT8) - 1); |
1707 | if (realminor < 64) |
1708 | minor_type = DRM_MINOR_PRIMARY; |
1709 | else if (realminor >= 128 && realminor < 192) |
1710 | minor_type = DRM_MINOR_RENDER; |
1711 | else { |
1712 | ret = ENXIO6; |
1713 | goto err; |
1714 | } |
1715 | |
1716 | dm = *drm_minor_get_slot(dev, minor_type); |
1717 | if (dm == NULL((void *)0)) { |
1718 | ret = ENXIO6; |
1719 | goto err; |
1720 | } |
1721 | dm->index = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); |
1722 | |
1723 | file_priv = drm_file_alloc(dm); |
1724 | if (IS_ERR(file_priv)) { |
1725 | ret = ENOMEM12; |
1726 | goto err; |
1727 | } |
1728 | |
1729 | /* first opener automatically becomes master */ |
1730 | if (drm_is_primary_client(file_priv)) { |
1731 | ret = drm_master_open(file_priv); |
1732 | if (ret != 0) |
1733 | goto out_file_free; |
1734 | } |
1735 | |
1736 | file_priv->filp = (void *)file_priv; |
1737 | file_priv->fminor = minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8)); |
1738 | |
1739 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); |
1740 | SPLAY_INSERT(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_INSERT(&dev->files, file_priv); |
1741 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); |
1742 | |
1743 | if (need_setup) { |
1744 | ret = drm_legacy_setup(dev); |
1745 | if (ret) |
1746 | goto out_file_free; |
1747 | } |
1748 | |
1749 | if (drm_dev_needs_global_mutex(dev)) |
1750 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); |
1751 | |
1752 | return 0; |
1753 | |
1754 | out_file_free: |
1755 | drm_file_free(file_priv); |
1756 | err: |
1757 | atomic_dec(&dev->open_count)__sync_fetch_and_sub(&dev->open_count, 1); |
1758 | if (drm_dev_needs_global_mutex(dev)) |
1759 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); |
1760 | return (ret); |
1761 | } |
1762 | |
1763 | int |
1764 | drmclose(dev_t kdev, int flags, int fmt, struct proc *p) |
1765 | { |
1766 | struct drm_device *dev = drm_get_device_from_kdev(kdev); |
1767 | struct drm_file *file_priv; |
1768 | int retcode = 0; |
1769 | |
1770 | if (dev == NULL((void *)0)) |
1771 | return (ENXIO6); |
1772 | |
1773 | if (drm_dev_needs_global_mutex(dev)) |
1774 | mutex_lock(&drm_global_mutex)rw_enter_write(&drm_global_mutex); |
1775 | |
1776 | DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count))___drm_dbg(((void *)0), DRM_UT_CORE, "open_count = %d\n", ({ typeof (*(&dev->open_count)) __tmp = *(volatile typeof(*(& dev->open_count)) *)&(*(&dev->open_count)); membar_datadep_consumer (); __tmp; })); |
1777 | |
1778 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); |
1779 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); |
1780 | if (file_priv == NULL((void *)0)) { |
1781 | DRM_ERROR("can't find authenticator\n")__drm_err("can't find authenticator\n"); |
1782 | retcode = EINVAL22; |
1783 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); |
1784 | goto done; |
1785 | } |
1786 | |
1787 | SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv)drm_file_tree_SPLAY_REMOVE(&dev->files, file_priv); |
1788 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); |
1789 | drm_file_free(file_priv); |
1790 | done: |
1791 | if (atomic_dec_and_test(&dev->open_count)(__sync_sub_and_fetch((&dev->open_count), 1) == 0)) |
1792 | drm_lastclose(dev); |
1793 | |
1794 | if (drm_dev_needs_global_mutex(dev)) |
1795 | mutex_unlock(&drm_global_mutex)rw_exit_write(&drm_global_mutex); |
1796 | |
1797 | return (retcode); |
1798 | } |
1799 | |
1800 | int |
1801 | drmread(dev_t kdev, struct uio *uio, int ioflag) |
1802 | { |
1803 | struct drm_device *dev = drm_get_device_from_kdev(kdev); |
1804 | struct drm_file *file_priv; |
1805 | struct drm_pending_event *ev; |
1806 | int error = 0; |
1807 | |
1808 | if (dev == NULL((void *)0)) |
1809 | return (ENXIO6); |
1810 | |
1811 | mutex_lock(&dev->filelist_mutex)rw_enter_write(&dev->filelist_mutex); |
1812 | file_priv = drm_find_file_by_minor(dev, minor(kdev)((unsigned)((kdev) & 0xff) | (((kdev) & 0xffff0000) >> 8))); |
1813 | mutex_unlock(&dev->filelist_mutex)rw_exit_write(&dev->filelist_mutex); |
1814 | if (file_priv == NULL((void *)0)) |
1815 | return (ENXIO6); |
1816 | |
1817 | /* |
1818 | * The semantics are a little weird here. We will wait until we |
1819 | * have events to process, but as soon as we have events we will |
1820 | * only deliver as many as we have. |
1821 | * Note that events are atomic, if the read buffer will not fit in |
1822 | * a whole event, we won't read any of it out. |
1823 | */ |
1824 | mtx_enter(&dev->event_lock); |
1825 | while (error == 0 && list_empty(&file_priv->event_list)) { |
1826 | if (ioflag & IO_NDELAY0x10) { |
1827 | mtx_leave(&dev->event_lock); |
1828 | return (EAGAIN35); |
1829 | } |
1830 | error = msleep_nsec(&file_priv->event_wait, &dev->event_lock, |
1831 | PWAIT32 | PCATCH0x100, "drmread", INFSLP0xffffffffffffffffULL); |
1832 | } |
1833 | if (error) { |
1834 | mtx_leave(&dev->event_lock); |
1835 | return (error); |
1836 | } |
1837 | while (drm_dequeue_event(dev, file_priv, uio->uio_resid, &ev)) { |
1838 | MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p held in %s" , (&dev->event_lock), __func__); } while (0); |
1839 | /* XXX we always destroy the event on error. */ |
1840 | error = uiomove(ev->event, ev->event->length, uio); |
1841 | kfree(ev); |
1842 | if (error) |
1843 | break; |
1844 | mtx_enter(&dev->event_lock); |
1845 | } |
1846 | MUTEX_ASSERT_UNLOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner == ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p held in %s" , (&dev->event_lock), __func__); } while (0); |
1847 | |
1848 | return (error); |
1849 | } |
1850 | |
1851 | /* |
1852 | * Deqeue an event from the file priv in question. returning 1 if an |
1853 | * event was found. We take the resid from the read as a parameter because |
1854 | * we will only dequeue and event if the read buffer has space to fit the |
1855 | * entire thing. |
1856 | * |
1857 | * We are called locked, but we will *unlock* the queue on return so that |
1858 | * we may sleep to copyout the event. |
1859 | */ |
1860 | int |
1861 | drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv, |
1862 | size_t resid, struct drm_pending_event **out) |
1863 | { |
1864 | struct drm_pending_event *e = NULL((void *)0); |
1865 | int gotone = 0; |
1866 | |
1867 | MUTEX_ASSERT_LOCKED(&dev->event_lock)do { if (((&dev->event_lock)->mtx_owner != ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })) && !(panicstr || db_active)) panic("mutex %p not held in %s" , (&dev->event_lock), __func__); } while (0); |
1868 | |
1869 | *out = NULL((void *)0); |
1870 | if (list_empty(&file_priv->event_list)) |
1871 | goto out; |
1872 | e = list_first_entry(&file_priv->event_list,({ const __typeof( ((struct drm_pending_event *)0)->link ) *__mptr = ((&file_priv->event_list)->next); (struct drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct drm_pending_event, link) );}) |
1873 | struct drm_pending_event, link)({ const __typeof( ((struct drm_pending_event *)0)->link ) *__mptr = ((&file_priv->event_list)->next); (struct drm_pending_event *)( (char *)__mptr - __builtin_offsetof(struct drm_pending_event, link) );}); |
1874 | if (e->event->length > resid) |
1875 | goto out; |
1876 | |
1877 | file_priv->event_space += e->event->length; |
1878 | list_del(&e->link); |
1879 | *out = e; |
1880 | gotone = 1; |
1881 | |
1882 | out: |
1883 | mtx_leave(&dev->event_lock); |
1884 | |
1885 | return (gotone); |
1886 | } |
1887 | |
1888 | paddr_t |
1889 | drmmmap(dev_t kdev, off_t offset, int prot) |
1890 | { |
1891 | return -1; |
1892 | } |
1893 | |
1894 | struct drm_dmamem * |
1895 | drm_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t alignment, |
1896 | int nsegments, bus_size_t maxsegsz, int mapflags, int loadflags) |
1897 | { |
1898 | struct drm_dmamem *mem; |
1899 | size_t strsize; |
1900 | /* |
1901 | * segs is the last member of the struct since we modify the size |
1902 | * to allow extra segments if more than one are allowed. |
1903 | */ |
1904 | strsize = sizeof(*mem) + (sizeof(bus_dma_segment_t) * (nsegments - 1)); |
1905 | mem = malloc(strsize, M_DRM145, M_NOWAIT0x0002 | M_ZERO0x0008); |
1906 | if (mem == NULL((void *)0)) |
1907 | return (NULL((void *)0)); |
1908 | |
1909 | mem->size = size; |
1910 | |
1911 | if (bus_dmamap_create(dmat, size, nsegments, maxsegsz, 0,(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz ), (0), (0x0001 | 0x0002), (&mem->map)) |
1912 | BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mem->map)(*(dmat)->_dmamap_create)((dmat), (size), (nsegments), (maxsegsz ), (0), (0x0001 | 0x0002), (&mem->map)) != 0) |
1913 | goto strfree; |
1914 | |
1915 | if (bus_dmamem_alloc(dmat, size, alignment, 0, mem->segs, nsegments,(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0), (mem->segs), (nsegments), (&mem->nsegs), (0x0001 | 0x1000)) |
1916 | &mem->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(dmat)->_dmamem_alloc)((dmat), (size), (alignment), (0), (mem->segs), (nsegments), (&mem->nsegs), (0x0001 | 0x1000)) != 0) |
1917 | goto destroy; |
1918 | |
1919 | if (bus_dmamem_map(dmat, mem->segs, mem->nsegs, size,(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs ), (size), (&mem->kva), (0x0001 | mapflags)) |
1920 | &mem->kva, BUS_DMA_NOWAIT | mapflags)(*(dmat)->_dmamem_map)((dmat), (mem->segs), (mem->nsegs ), (size), (&mem->kva), (0x0001 | mapflags)) != 0) |
1921 | goto free; |
1922 | |
1923 | if (bus_dmamap_load(dmat, mem->map, mem->kva, size,(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva ), (size), (((void *)0)), (0x0001 | loadflags)) |
1924 | NULL, BUS_DMA_NOWAIT | loadflags)(*(dmat)->_dmamap_load)((dmat), (mem->map), (mem->kva ), (size), (((void *)0)), (0x0001 | loadflags)) != 0) |
1925 | goto unmap; |
1926 | |
1927 | return (mem); |
1928 | |
1929 | unmap: |
1930 | bus_dmamem_unmap(dmat, mem->kva, size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (size)); |
1931 | free: |
1932 | bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs )); |
1933 | destroy: |
1934 | bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map)); |
1935 | strfree: |
1936 | free(mem, M_DRM145, 0); |
1937 | |
1938 | return (NULL((void *)0)); |
1939 | } |
1940 | |
1941 | void |
1942 | drm_dmamem_free(bus_dma_tag_t dmat, struct drm_dmamem *mem) |
1943 | { |
1944 | if (mem == NULL((void *)0)) |
1945 | return; |
1946 | |
1947 | bus_dmamap_unload(dmat, mem->map)(*(dmat)->_dmamap_unload)((dmat), (mem->map)); |
1948 | bus_dmamem_unmap(dmat, mem->kva, mem->size)(*(dmat)->_dmamem_unmap)((dmat), (mem->kva), (mem->size )); |
1949 | bus_dmamem_free(dmat, mem->segs, mem->nsegs)(*(dmat)->_dmamem_free)((dmat), (mem->segs), (mem->nsegs )); |
1950 | bus_dmamap_destroy(dmat, mem->map)(*(dmat)->_dmamap_destroy)((dmat), (mem->map)); |
1951 | free(mem, M_DRM145, 0); |
1952 | } |
1953 | |
1954 | struct drm_dma_handle * |
1955 | drm_pci_alloc(struct drm_device *dev, size_t size, size_t align) |
1956 | { |
1957 | struct drm_dma_handle *dmah; |
1958 | |
1959 | dmah = malloc(sizeof(*dmah), M_DRM145, M_WAITOK0x0001); |
1960 | dmah->mem = drm_dmamem_alloc(dev->dmat, size, align, 1, size, |
1961 | BUS_DMA_NOCACHE0x0800, 0); |
1962 | if (dmah->mem == NULL((void *)0)) { |
1963 | free(dmah, M_DRM145, sizeof(*dmah)); |
1964 | return NULL((void *)0); |
1965 | } |
1966 | dmah->busaddr = dmah->mem->segs[0].ds_addr; |
1967 | dmah->size = dmah->mem->size; |
1968 | dmah->vaddr = dmah->mem->kva; |
1969 | return (dmah); |
1970 | } |
1971 | |
1972 | void |
1973 | drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah) |
1974 | { |
1975 | if (dmah == NULL((void *)0)) |
1976 | return; |
1977 | |
1978 | drm_dmamem_free(dev->dmat, dmah->mem); |
1979 | free(dmah, M_DRM145, sizeof(*dmah)); |
1980 | } |
1981 | |
1982 | /* |
1983 | * Compute order. Can be made faster. |
1984 | */ |
1985 | int |
1986 | drm_order(unsigned long size) |
1987 | { |
1988 | int order; |
1989 | unsigned long tmp; |
1990 | |
1991 | for (order = 0, tmp = size; tmp >>= 1; ++order) |
1992 | ; |
1993 | |
1994 | if (size & ~(1 << order)) |
1995 | ++order; |
1996 | |
1997 | return order; |
1998 | } |
1999 | |
2000 | int |
2001 | drm_getpciinfo(struct drm_device *dev, void *data, struct drm_file *file_priv) |
2002 | { |
2003 | struct drm_pciinfo *info = data; |
2004 | |
2005 | if (dev->pdev == NULL((void *)0)) |
2006 | return -ENOTTY25; |
2007 | |
2008 | info->domain = dev->pdev->bus->domain_nr; |
2009 | info->bus = dev->pdev->bus->number; |
2010 | info->dev = PCI_SLOT(dev->pdev->devfn)((dev->pdev->devfn) >> 3); |
2011 | info->func = PCI_FUNC(dev->pdev->devfn)((dev->pdev->devfn) & 0x7); |
2012 | info->vendor_id = dev->pdev->vendor; |
2013 | info->device_id = dev->pdev->device; |
2014 | info->subvendor_id = dev->pdev->subsystem_vendor; |
2015 | info->subdevice_id = dev->pdev->subsystem_device; |
2016 | info->revision_id = 0; |
2017 | |
2018 | return 0; |
2019 | } |