File: | dev/pci/drm/i915/gem/i915_gem_object.h |
Warning: | line 130, column 17 Access to field 'contended' results in a dereference of a null pointer (loaded from variable 'ww') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * SPDX-License-Identifier: MIT | |||
3 | * | |||
4 | * Copyright © 2019 Intel Corporation | |||
5 | */ | |||
6 | ||||
7 | #include "gem/i915_gem_pm.h" | |||
8 | #include "gt/intel_gt.h" | |||
9 | #include "gt/intel_gt_pm.h" | |||
10 | #include "gt/intel_gt_requests.h" | |||
11 | ||||
12 | #include "i915_drv.h" | |||
13 | ||||
14 | void i915_gem_suspend(struct drm_i915_privateinteldrm_softc *i915) | |||
15 | { | |||
16 | GEM_TRACE("%s\n", dev_name(i915->drm.dev))do { } while (0); | |||
17 | ||||
18 | intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); | |||
19 | flush_workqueue(i915->wq); | |||
20 | ||||
21 | /* | |||
22 | * We have to flush all the executing contexts to main memory so | |||
23 | * that they can saved in the hibernation image. To ensure the last | |||
24 | * context image is coherent, we have to switch away from it. That | |||
25 | * leaves the i915->kernel_context still active when | |||
26 | * we actually suspend, and its image in memory may not match the GPU | |||
27 | * state. Fortunately, the kernel_context is disposable and we do | |||
28 | * not rely on its state. | |||
29 | */ | |||
30 | intel_gt_suspend_prepare(&i915->gt); | |||
31 | ||||
32 | i915_gem_drain_freed_objects(i915); | |||
33 | } | |||
34 | ||||
35 | static struct drm_i915_gem_object *first_mm_object(struct list_head *list) | |||
36 | { | |||
37 | return list_first_entry_or_null(list,(list_empty(list) ? ((void *)0) : ({ const __typeof( ((struct drm_i915_gem_object *)0)->mm.link ) *__mptr = ((list)-> next); (struct drm_i915_gem_object *)( (char *)__mptr - __builtin_offsetof (struct drm_i915_gem_object, mm.link) );})) | |||
38 | struct drm_i915_gem_object,(list_empty(list) ? ((void *)0) : ({ const __typeof( ((struct drm_i915_gem_object *)0)->mm.link ) *__mptr = ((list)-> next); (struct drm_i915_gem_object *)( (char *)__mptr - __builtin_offsetof (struct drm_i915_gem_object, mm.link) );})) | |||
39 | mm.link)(list_empty(list) ? ((void *)0) : ({ const __typeof( ((struct drm_i915_gem_object *)0)->mm.link ) *__mptr = ((list)-> next); (struct drm_i915_gem_object *)( (char *)__mptr - __builtin_offsetof (struct drm_i915_gem_object, mm.link) );})); | |||
40 | } | |||
41 | ||||
42 | void i915_gem_suspend_late(struct drm_i915_privateinteldrm_softc *i915) | |||
43 | { | |||
44 | struct drm_i915_gem_object *obj; | |||
45 | struct list_head *phases[] = { | |||
46 | &i915->mm.shrink_list, | |||
47 | &i915->mm.purge_list, | |||
48 | NULL((void *)0) | |||
49 | }, **phase; | |||
50 | unsigned long flags; | |||
51 | ||||
52 | /* | |||
53 | * Neither the BIOS, ourselves or any other kernel | |||
54 | * expects the system to be in execlists mode on startup, | |||
55 | * so we need to reset the GPU back to legacy mode. And the only | |||
56 | * known way to disable logical contexts is through a GPU reset. | |||
57 | * | |||
58 | * So in order to leave the system in a known default configuration, | |||
59 | * always reset the GPU upon unload and suspend. Afterwards we then | |||
60 | * clean up the GEM state tracking, flushing off the requests and | |||
61 | * leaving the system in a known idle state. | |||
62 | * | |||
63 | * Note that is of the upmost importance that the GPU is idle and | |||
64 | * all stray writes are flushed *before* we dismantle the backing | |||
65 | * storage for the pinned objects. | |||
66 | * | |||
67 | * However, since we are uncertain that resetting the GPU on older | |||
68 | * machines is a good idea, we don't - just in case it leaves the | |||
69 | * machine in an unusable condition. | |||
70 | */ | |||
71 | ||||
72 | intel_gt_suspend_late(&i915->gt); | |||
73 | ||||
74 | spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while (0); | |||
| ||||
75 | for (phase = phases; *phase; phase++) { | |||
76 | DRM_LIST_HEAD(keep)struct list_head keep = { &(keep), &(keep) }; | |||
77 | ||||
78 | while ((obj = first_mm_object(*phase))) { | |||
79 | list_move_tail(&obj->mm.link, &keep); | |||
80 | ||||
81 | /* Beware the background _i915_gem_free_objects */ | |||
82 | if (!kref_get_unless_zero(&obj->base.refcount)) | |||
83 | continue; | |||
84 | ||||
85 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while (0); | |||
86 | ||||
87 | i915_gem_object_lock(obj, NULL((void *)0)); | |||
88 | drm_WARN_ON(&i915->drm,({ int __ret = !!((i915_gem_object_set_to_gtt_domain(obj, 0)) ); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_gtt_domain(obj, 0)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
89 | i915_gem_object_set_to_gtt_domain(obj, false))({ int __ret = !!((i915_gem_object_set_to_gtt_domain(obj, 0)) ); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "i915_gem_object_set_to_gtt_domain(obj, 0)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
90 | i915_gem_object_unlock(obj); | |||
91 | i915_gem_object_put(obj); | |||
92 | ||||
93 | spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while (0); | |||
94 | } | |||
95 | ||||
96 | list_splice_tail(&keep, *phase); | |||
97 | } | |||
98 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while (0); | |||
99 | } | |||
100 | ||||
101 | void i915_gem_resume(struct drm_i915_privateinteldrm_softc *i915) | |||
102 | { | |||
103 | GEM_TRACE("%s\n", dev_name(i915->drm.dev))do { } while (0); | |||
104 | ||||
105 | /* | |||
106 | * As we didn't flush the kernel context before suspend, we cannot | |||
107 | * guarantee that the context image is complete. So let's just reset | |||
108 | * it and start again. | |||
109 | */ | |||
110 | intel_gt_resume(&i915->gt); | |||
111 | } |
1 | /* | ||||
2 | * SPDX-License-Identifier: MIT | ||||
3 | * | ||||
4 | * Copyright © 2016 Intel Corporation | ||||
5 | */ | ||||
6 | |||||
7 | #ifndef __I915_GEM_OBJECT_H__ | ||||
8 | #define __I915_GEM_OBJECT_H__ | ||||
9 | |||||
10 | #include <drm/drm_gem.h> | ||||
11 | #include <drm/drm_file.h> | ||||
12 | #include <drm/drm_device.h> | ||||
13 | |||||
14 | #include "display/intel_frontbuffer.h" | ||||
15 | #include "i915_gem_object_types.h" | ||||
16 | #include "i915_gem_gtt.h" | ||||
17 | #include "i915_vma_types.h" | ||||
18 | |||||
19 | void i915_gem_init__objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
20 | |||||
21 | struct drm_i915_gem_object *i915_gem_object_alloc(void); | ||||
22 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | ||||
23 | |||||
24 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | ||||
25 | const struct drm_i915_gem_object_ops *ops, | ||||
26 | struct lock_class_key *key); | ||||
27 | struct drm_i915_gem_object * | ||||
28 | i915_gem_object_create_shmem(struct drm_i915_privateinteldrm_softc *i915, | ||||
29 | resource_size_t size); | ||||
30 | struct drm_i915_gem_object * | ||||
31 | i915_gem_object_create_shmem_from_data(struct drm_i915_privateinteldrm_softc *i915, | ||||
32 | const void *data, resource_size_t size); | ||||
33 | |||||
34 | extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; | ||||
35 | void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, | ||||
36 | struct sg_table *pages, | ||||
37 | bool_Bool needs_clflush); | ||||
38 | |||||
39 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); | ||||
40 | |||||
41 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); | ||||
42 | void i915_gem_free_object(struct drm_gem_object *obj); | ||||
43 | |||||
44 | void i915_gem_flush_free_objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
45 | |||||
46 | struct sg_table * | ||||
47 | __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); | ||||
48 | void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||||
49 | |||||
50 | /** | ||||
51 | * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle | ||||
52 | * @filp: DRM file private date | ||||
53 | * @handle: userspace handle | ||||
54 | * | ||||
55 | * Returns: | ||||
56 | * | ||||
57 | * A pointer to the object named by the handle if such exists on @filp, NULL | ||||
58 | * otherwise. This object is only valid whilst under the RCU read lock, and | ||||
59 | * note carefully the object may be in the process of being destroyed. | ||||
60 | */ | ||||
61 | static inline struct drm_i915_gem_object * | ||||
62 | i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) | ||||
63 | { | ||||
64 | #ifdef CONFIG_LOCKDEP | ||||
65 | WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map))({ int __ret = !!((debug_locks && !lock_is_held(& rcu_lock_map))); if (__ret) printf("%s", "WARN_ON(" "debug_locks && !lock_is_held(&rcu_lock_map)" ")"); __builtin_expect(!!(__ret), 0); }); | ||||
66 | #endif | ||||
67 | return idr_find(&file->object_idr, handle); | ||||
68 | } | ||||
69 | |||||
70 | static inline struct drm_i915_gem_object * | ||||
71 | i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) | ||||
72 | { | ||||
73 | if (obj && !kref_get_unless_zero(&obj->base.refcount)) | ||||
74 | obj = NULL((void *)0); | ||||
75 | |||||
76 | return obj; | ||||
77 | } | ||||
78 | |||||
79 | static inline struct drm_i915_gem_object * | ||||
80 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | ||||
81 | { | ||||
82 | struct drm_i915_gem_object *obj; | ||||
83 | |||||
84 | rcu_read_lock(); | ||||
85 | obj = i915_gem_object_lookup_rcu(file, handle); | ||||
86 | obj = i915_gem_object_get_rcu(obj); | ||||
87 | rcu_read_unlock(); | ||||
88 | |||||
89 | return obj; | ||||
90 | } | ||||
91 | |||||
92 | __deprecated | ||||
93 | struct drm_gem_object * | ||||
94 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | ||||
95 | |||||
96 | __attribute__((nonnull)) | ||||
97 | static inline struct drm_i915_gem_object * | ||||
98 | i915_gem_object_get(struct drm_i915_gem_object *obj) | ||||
99 | { | ||||
100 | drm_gem_object_get(&obj->base); | ||||
101 | return obj; | ||||
102 | } | ||||
103 | |||||
104 | __attribute__((nonnull)) | ||||
105 | static inline void | ||||
106 | i915_gem_object_put(struct drm_i915_gem_object *obj) | ||||
107 | { | ||||
108 | __drm_gem_object_put(&obj->base); | ||||
109 | } | ||||
110 | |||||
111 | #define assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0) dma_resv_assert_held((obj)->base.resv)do { (void)(&((obj)->base.resv)->lock.base); } while (0) | ||||
112 | |||||
113 | static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
114 | struct i915_gem_ww_ctx *ww, | ||||
115 | bool_Bool intr) | ||||
116 | { | ||||
117 | int ret; | ||||
118 | |||||
119 | if (intr
| ||||
120 | ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL((void *)0)); | ||||
121 | else | ||||
122 | ret = dma_resv_lock(obj->base.resv, ww
| ||||
123 | |||||
124 | if (!ret && ww) | ||||
125 | list_add_tail(&obj->obj_link, &ww->obj_list); | ||||
126 | if (ret == -EALREADY37) | ||||
127 | ret = 0; | ||||
128 | |||||
129 | if (ret == -EDEADLK11) | ||||
130 | ww->contended = obj; | ||||
| |||||
131 | |||||
132 | return ret; | ||||
133 | } | ||||
134 | |||||
135 | static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
136 | struct i915_gem_ww_ctx *ww) | ||||
137 | { | ||||
138 | return __i915_gem_object_lock(obj, ww, ww
| ||||
139 | } | ||||
140 | |||||
141 | static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, | ||||
142 | struct i915_gem_ww_ctx *ww) | ||||
143 | { | ||||
144 | WARN_ON(ww && !ww->intr)({ int __ret = !!((ww && !ww->intr)); if (__ret) printf ("%s", "WARN_ON(" "ww && !ww->intr" ")"); __builtin_expect (!!(__ret), 0); }); | ||||
145 | return __i915_gem_object_lock(obj, ww, true1); | ||||
146 | } | ||||
147 | |||||
148 | static inline bool_Bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) | ||||
149 | { | ||||
150 | return dma_resv_trylock(obj->base.resv); | ||||
151 | } | ||||
152 | |||||
153 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) | ||||
154 | { | ||||
155 | dma_resv_unlock(obj->base.resv); | ||||
156 | } | ||||
157 | |||||
158 | struct dma_fence * | ||||
159 | i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); | ||||
160 | void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, | ||||
161 | struct dma_fence *fence); | ||||
162 | |||||
163 | static inline void | ||||
164 | i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) | ||||
165 | { | ||||
166 | obj->flags |= I915_BO_READONLY(1UL << (2)); | ||||
167 | } | ||||
168 | |||||
169 | static inline bool_Bool | ||||
170 | i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) | ||||
171 | { | ||||
172 | return obj->flags & I915_BO_READONLY(1UL << (2)); | ||||
173 | } | ||||
174 | |||||
175 | static inline bool_Bool | ||||
176 | i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) | ||||
177 | { | ||||
178 | return obj->flags & I915_BO_ALLOC_CONTIGUOUS(1UL << (0)); | ||||
179 | } | ||||
180 | |||||
181 | static inline bool_Bool | ||||
182 | i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) | ||||
183 | { | ||||
184 | return obj->flags & I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
185 | } | ||||
186 | |||||
187 | static inline void | ||||
188 | i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) | ||||
189 | { | ||||
190 | obj->flags |= I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
191 | } | ||||
192 | |||||
193 | static inline bool_Bool | ||||
194 | i915_gem_object_type_has(const struct drm_i915_gem_object *obj, | ||||
195 | unsigned long flags) | ||||
196 | { | ||||
197 | return obj->ops->flags & flags; | ||||
198 | } | ||||
199 | |||||
200 | static inline bool_Bool | ||||
201 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | ||||
202 | { | ||||
203 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE(1UL << (0))); | ||||
204 | } | ||||
205 | |||||
206 | static inline bool_Bool | ||||
207 | i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) | ||||
208 | { | ||||
209 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE(1UL << (2))); | ||||
210 | } | ||||
211 | |||||
212 | static inline bool_Bool | ||||
213 | i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) | ||||
214 | { | ||||
215 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY(1UL << (3))); | ||||
216 | } | ||||
217 | |||||
218 | static inline bool_Bool | ||||
219 | i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) | ||||
220 | { | ||||
221 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP(1UL << (4))); | ||||
222 | } | ||||
223 | |||||
224 | static inline bool_Bool | ||||
225 | i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) | ||||
226 | { | ||||
227 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL(1UL << (5))); | ||||
228 | } | ||||
229 | |||||
230 | static inline bool_Bool | ||||
231 | i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) | ||||
232 | { | ||||
233 | return READ_ONCE(obj->frontbuffer)({ typeof(obj->frontbuffer) __tmp = *(volatile typeof(obj-> frontbuffer) *)&(obj->frontbuffer); membar_datadep_consumer (); __tmp; }); | ||||
234 | } | ||||
235 | |||||
236 | static inline unsigned int | ||||
237 | i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) | ||||
238 | { | ||||
239 | return obj->tiling_and_stride & TILING_MASK(128 - 1); | ||||
240 | } | ||||
241 | |||||
242 | static inline bool_Bool | ||||
243 | i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) | ||||
244 | { | ||||
245 | return i915_gem_object_get_tiling(obj) != I915_TILING_NONE0; | ||||
246 | } | ||||
247 | |||||
248 | static inline unsigned int | ||||
249 | i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) | ||||
250 | { | ||||
251 | return obj->tiling_and_stride & STRIDE_MASK(~(128 - 1)); | ||||
252 | } | ||||
253 | |||||
254 | static inline unsigned int | ||||
255 | i915_gem_tile_height(unsigned int tiling) | ||||
256 | { | ||||
257 | GEM_BUG_ON(!tiling)((void)0); | ||||
258 | return tiling == I915_TILING_Y2 ? 32 : 8; | ||||
259 | } | ||||
260 | |||||
261 | static inline unsigned int | ||||
262 | i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) | ||||
263 | { | ||||
264 | return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); | ||||
265 | } | ||||
266 | |||||
267 | static inline unsigned int | ||||
268 | i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) | ||||
269 | { | ||||
270 | return (i915_gem_object_get_stride(obj) * | ||||
271 | i915_gem_object_get_tile_height(obj)); | ||||
272 | } | ||||
273 | |||||
274 | int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, | ||||
275 | unsigned int tiling, unsigned int stride); | ||||
276 | |||||
277 | struct scatterlist * | ||||
278 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | ||||
279 | unsigned int n, unsigned int *offset); | ||||
280 | |||||
281 | struct vm_page * | ||||
282 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, | ||||
283 | unsigned int n); | ||||
284 | |||||
285 | struct vm_page * | ||||
286 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, | ||||
287 | unsigned int n); | ||||
288 | |||||
289 | dma_addr_t | ||||
290 | i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, | ||||
291 | unsigned long n, | ||||
292 | unsigned int *len); | ||||
293 | |||||
294 | dma_addr_t | ||||
295 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, | ||||
296 | unsigned long n); | ||||
297 | |||||
298 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, | ||||
299 | struct sg_table *pages, | ||||
300 | unsigned int sg_page_sizes); | ||||
301 | |||||
302 | int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
303 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
304 | |||||
305 | enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ | ||||
306 | I915_MM_NORMAL = 0, | ||||
307 | /* | ||||
308 | * Only used by struct_mutex, when called "recursively" from | ||||
309 | * direct-reclaim-esque. Safe because there is only every one | ||||
310 | * struct_mutex in the entire system. | ||||
311 | */ | ||||
312 | I915_MM_SHRINKER = 1, | ||||
313 | /* | ||||
314 | * Used for obj->mm.lock when allocating pages. Safe because the object | ||||
315 | * isn't yet on any LRU, and therefore the shrinker can't deadlock on | ||||
316 | * it. As soon as the object has pages, obj->mm.lock nests within | ||||
317 | * fs_reclaim. | ||||
318 | */ | ||||
319 | I915_MM_GET_PAGES = 1, | ||||
320 | }; | ||||
321 | |||||
322 | static inline int __must_check | ||||
323 | i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
324 | { | ||||
325 | might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); | ||||
326 | |||||
327 | if (atomic_inc_not_zero(&obj->mm.pages_pin_count)atomic_add_unless((&obj->mm.pages_pin_count), 1, 0)) | ||||
328 | return 0; | ||||
329 | |||||
330 | return __i915_gem_object_get_pages(obj); | ||||
331 | } | ||||
332 | |||||
333 | static inline bool_Bool | ||||
334 | i915_gem_object_has_pages(struct drm_i915_gem_object *obj) | ||||
335 | { | ||||
336 | return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)({ typeof(obj->mm.pages) __tmp = *(volatile typeof(obj-> mm.pages) *)&(obj->mm.pages); membar_datadep_consumer( ); __tmp; })); | ||||
337 | } | ||||
338 | |||||
339 | static inline void | ||||
340 | __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
341 | { | ||||
342 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
343 | |||||
344 | atomic_inc(&obj->mm.pages_pin_count)__sync_fetch_and_add(&obj->mm.pages_pin_count, 1); | ||||
345 | } | ||||
346 | |||||
347 | static inline bool_Bool | ||||
348 | i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) | ||||
349 | { | ||||
350 | return atomic_read(&obj->mm.pages_pin_count)({ typeof(*(&obj->mm.pages_pin_count)) __tmp = *(volatile typeof(*(&obj->mm.pages_pin_count)) *)&(*(&obj ->mm.pages_pin_count)); membar_datadep_consumer(); __tmp; } ); | ||||
351 | } | ||||
352 | |||||
353 | static inline void | ||||
354 | __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
355 | { | ||||
356 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
357 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); | ||||
358 | |||||
359 | atomic_dec(&obj->mm.pages_pin_count)__sync_fetch_and_sub(&obj->mm.pages_pin_count, 1); | ||||
360 | } | ||||
361 | |||||
362 | static inline void | ||||
363 | i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
364 | { | ||||
365 | __i915_gem_object_unpin_pages(obj); | ||||
366 | } | ||||
367 | |||||
368 | int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | ||||
369 | void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||||
370 | void i915_gem_object_writeback(struct drm_i915_gem_object *obj); | ||||
371 | |||||
372 | enum i915_map_type { | ||||
373 | I915_MAP_WB = 0, | ||||
374 | I915_MAP_WC, | ||||
375 | #define I915_MAP_OVERRIDE(1UL << (31)) BIT(31)(1UL << (31)) | ||||
376 | I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE(1UL << (31)), | ||||
377 | I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE(1UL << (31)), | ||||
378 | }; | ||||
379 | |||||
380 | /** | ||||
381 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object | ||||
382 | * @obj: the object to map into kernel address space | ||||
383 | * @type: the type of mapping, used to select pgprot_t | ||||
384 | * | ||||
385 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's | ||||
386 | * pages and then returns a contiguous mapping of the backing storage into | ||||
387 | * the kernel address space. Based on the @type of mapping, the PTE will be | ||||
388 | * set to either WriteBack or WriteCombine (via pgprot_t). | ||||
389 | * | ||||
390 | * The caller is responsible for calling i915_gem_object_unpin_map() when the | ||||
391 | * mapping is no longer required. | ||||
392 | * | ||||
393 | * Returns the pointer through which to access the mapped object, or an | ||||
394 | * ERR_PTR() on error. | ||||
395 | */ | ||||
396 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | ||||
397 | enum i915_map_type type); | ||||
398 | |||||
399 | void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, | ||||
400 | unsigned long offset, | ||||
401 | unsigned long size); | ||||
402 | static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) | ||||
403 | { | ||||
404 | __i915_gem_object_flush_map(obj, 0, obj->base.size); | ||||
405 | } | ||||
406 | |||||
407 | /** | ||||
408 | * i915_gem_object_unpin_map - releases an earlier mapping | ||||
409 | * @obj: the object to unmap | ||||
410 | * | ||||
411 | * After pinning the object and mapping its pages, once you are finished | ||||
412 | * with your access, call i915_gem_object_unpin_map() to release the pin | ||||
413 | * upon the mapping. Once the pin count reaches zero, that mapping may be | ||||
414 | * removed. | ||||
415 | */ | ||||
416 | static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) | ||||
417 | { | ||||
418 | i915_gem_object_unpin_pages(obj); | ||||
419 | } | ||||
420 | |||||
421 | void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); | ||||
422 | |||||
423 | void | ||||
424 | i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, | ||||
425 | unsigned int flush_domains); | ||||
426 | |||||
427 | int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, | ||||
428 | unsigned int *needs_clflush); | ||||
429 | int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, | ||||
430 | unsigned int *needs_clflush); | ||||
431 | #define CLFLUSH_BEFORE(1UL << (0)) BIT(0)(1UL << (0)) | ||||
432 | #define CLFLUSH_AFTER(1UL << (1)) BIT(1)(1UL << (1)) | ||||
433 | #define CLFLUSH_FLAGS((1UL << (0)) | (1UL << (1))) (CLFLUSH_BEFORE(1UL << (0)) | CLFLUSH_AFTER(1UL << (1))) | ||||
434 | |||||
435 | static inline void | ||||
436 | i915_gem_object_finish_access(struct drm_i915_gem_object *obj) | ||||
437 | { | ||||
438 | i915_gem_object_unpin_pages(obj); | ||||
439 | } | ||||
440 | |||||
441 | static inline struct intel_engine_cs * | ||||
442 | i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) | ||||
443 | { | ||||
444 | struct intel_engine_cs *engine = NULL((void *)0); | ||||
445 | struct dma_fence *fence; | ||||
446 | |||||
447 | rcu_read_lock(); | ||||
448 | fence = dma_resv_get_excl_rcu(obj->base.resv); | ||||
449 | rcu_read_unlock(); | ||||
450 | |||||
451 | if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) | ||||
452 | engine = to_request(fence)->engine; | ||||
453 | dma_fence_put(fence); | ||||
454 | |||||
455 | return engine; | ||||
456 | } | ||||
457 | |||||
458 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, | ||||
459 | unsigned int cache_level); | ||||
460 | void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); | ||||
461 | void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); | ||||
462 | |||||
463 | int __must_check | ||||
464 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
465 | int __must_check | ||||
466 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
467 | int __must_check | ||||
468 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
469 | struct i915_vma * __must_check | ||||
470 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | ||||
471 | u32 alignment, | ||||
472 | const struct i915_ggtt_view *view, | ||||
473 | unsigned int flags); | ||||
474 | |||||
475 | void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); | ||||
476 | void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); | ||||
477 | void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); | ||||
478 | |||||
479 | static inline bool_Bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) | ||||
480 | { | ||||
481 | if (obj->cache_dirty) | ||||
482 | return false0; | ||||
483 | |||||
484 | if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE(1UL << (1)))) | ||||
485 | return true1; | ||||
486 | |||||
487 | /* Currently in use by HW (display engine)? Keep flushed. */ | ||||
488 | return i915_gem_object_is_framebuffer(obj); | ||||
489 | } | ||||
490 | |||||
491 | static inline void __start_cpu_write(struct drm_i915_gem_object *obj) | ||||
492 | { | ||||
493 | obj->read_domains = I915_GEM_DOMAIN_CPU0x00000001; | ||||
494 | obj->write_domain = I915_GEM_DOMAIN_CPU0x00000001; | ||||
495 | if (cpu_write_needs_clflush(obj)) | ||||
496 | obj->cache_dirty = true1; | ||||
497 | } | ||||
498 | |||||
499 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, | ||||
500 | unsigned int flags, | ||||
501 | long timeout); | ||||
502 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | ||||
503 | unsigned int flags, | ||||
504 | const struct i915_sched_attr *attr); | ||||
505 | |||||
506 | void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
507 | enum fb_op_origin origin); | ||||
508 | void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
509 | enum fb_op_origin origin); | ||||
510 | |||||
511 | static inline void | ||||
512 | i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
513 | enum fb_op_origin origin) | ||||
514 | { | ||||
515 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
516 | __i915_gem_object_flush_frontbuffer(obj, origin); | ||||
517 | } | ||||
518 | |||||
519 | static inline void | ||||
520 | i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
521 | enum fb_op_origin origin) | ||||
522 | { | ||||
523 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
524 | __i915_gem_object_invalidate_frontbuffer(obj, origin); | ||||
525 | } | ||||
526 | |||||
527 | #endif |