File: | dev/pci/drm/i915/gem/i915_gem_object.h |
Warning: | line 130, column 17 Access to field 'contended' results in a dereference of a null pointer (loaded from variable 'ww') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||
2 | * SPDX-License-Identifier: MIT | ||||
3 | * | ||||
4 | * Copyright © 2008 Intel Corporation | ||||
5 | */ | ||||
6 | |||||
7 | #include <linux/string.h> | ||||
8 | #include <linux/bitops.h> | ||||
9 | |||||
10 | #include "i915_drv.h" | ||||
11 | #include "i915_gem.h" | ||||
12 | #include "i915_gem_ioctls.h" | ||||
13 | #include "i915_gem_mman.h" | ||||
14 | #include "i915_gem_object.h" | ||||
15 | |||||
16 | /** | ||||
17 | * DOC: buffer object tiling | ||||
18 | * | ||||
19 | * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace | ||||
20 | * interface to declare fence register requirements. | ||||
21 | * | ||||
22 | * In principle GEM doesn't care at all about the internal data layout of an | ||||
23 | * object, and hence it also doesn't care about tiling or swizzling. There's two | ||||
24 | * exceptions: | ||||
25 | * | ||||
26 | * - For X and Y tiling the hardware provides detilers for CPU access, so called | ||||
27 | * fences. Since there's only a limited amount of them the kernel must manage | ||||
28 | * these, and therefore userspace must tell the kernel the object tiling if it | ||||
29 | * wants to use fences for detiling. | ||||
30 | * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which | ||||
31 | * depends upon the physical page frame number. When swapping such objects the | ||||
32 | * page frame number might change and the kernel must be able to fix this up | ||||
33 | * and hence now the tiling. Note that on a subset of platforms with | ||||
34 | * asymmetric memory channel population the swizzling pattern changes in an | ||||
35 | * unknown way, and for those the kernel simply forbids swapping completely. | ||||
36 | * | ||||
37 | * Since neither of this applies for new tiling layouts on modern platforms like | ||||
38 | * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. | ||||
39 | * Anything else can be handled in userspace entirely without the kernel's | ||||
40 | * invovlement. | ||||
41 | */ | ||||
42 | |||||
43 | /** | ||||
44 | * i915_gem_fence_size - required global GTT size for a fence | ||||
45 | * @i915: i915 device | ||||
46 | * @size: object size | ||||
47 | * @tiling: tiling mode | ||||
48 | * @stride: tiling stride | ||||
49 | * | ||||
50 | * Return the required global GTT size for a fence (view of a tiled object), | ||||
51 | * taking into account potential fence register mapping. | ||||
52 | */ | ||||
53 | u32 i915_gem_fence_size(struct drm_i915_privateinteldrm_softc *i915, | ||||
54 | u32 size, unsigned int tiling, unsigned int stride) | ||||
55 | { | ||||
56 | u32 ggtt_size; | ||||
57 | |||||
58 | GEM_BUG_ON(!size)((void)0); | ||||
59 | |||||
60 | if (tiling == I915_TILING_NONE0) | ||||
61 | return size; | ||||
62 | |||||
63 | GEM_BUG_ON(!stride)((void)0); | ||||
64 | |||||
65 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 4) { | ||||
66 | stride *= i915_gem_tile_height(tiling); | ||||
67 | GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE))((void)0); | ||||
68 | return roundup(size, stride)((((size)+((stride)-1))/(stride))*(stride)); | ||||
69 | } | ||||
70 | |||||
71 | /* Previous chips need a power-of-two fence region when tiling */ | ||||
72 | if (IS_GEN(i915, 3)(0 + (&(i915)->__info)->gen == (3))) | ||||
73 | ggtt_size = 1024*1024; | ||||
74 | else | ||||
75 | ggtt_size = 512*1024; | ||||
76 | |||||
77 | while (ggtt_size < size) | ||||
78 | ggtt_size <<= 1; | ||||
79 | |||||
80 | return ggtt_size; | ||||
81 | } | ||||
82 | |||||
83 | /** | ||||
84 | * i915_gem_fence_alignment - required global GTT alignment for a fence | ||||
85 | * @i915: i915 device | ||||
86 | * @size: object size | ||||
87 | * @tiling: tiling mode | ||||
88 | * @stride: tiling stride | ||||
89 | * | ||||
90 | * Return the required global GTT alignment for a fence (a view of a tiled | ||||
91 | * object), taking into account potential fence register mapping. | ||||
92 | */ | ||||
93 | u32 i915_gem_fence_alignment(struct drm_i915_privateinteldrm_softc *i915, u32 size, | ||||
94 | unsigned int tiling, unsigned int stride) | ||||
95 | { | ||||
96 | GEM_BUG_ON(!size)((void)0); | ||||
97 | |||||
98 | /* | ||||
99 | * Minimum alignment is 4k (GTT page size), but might be greater | ||||
100 | * if a fence register is needed for the object. | ||||
101 | */ | ||||
102 | if (tiling == I915_TILING_NONE0) | ||||
103 | return I915_GTT_MIN_ALIGNMENT(1ULL << (12)); | ||||
104 | |||||
105 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 4) | ||||
106 | return I965_FENCE_PAGE4096UL; | ||||
107 | |||||
108 | /* | ||||
109 | * Previous chips need to be aligned to the size of the smallest | ||||
110 | * fence register that can contain the object. | ||||
111 | */ | ||||
112 | return i915_gem_fence_size(i915, size, tiling, stride); | ||||
113 | } | ||||
114 | |||||
115 | /* Check pitch constriants for all chips & tiling formats */ | ||||
116 | static bool_Bool | ||||
117 | i915_tiling_ok(struct drm_i915_gem_object *obj, | ||||
118 | unsigned int tiling, unsigned int stride) | ||||
119 | { | ||||
120 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
121 | unsigned int tile_width; | ||||
122 | |||||
123 | /* Linear is always fine */ | ||||
124 | if (tiling == I915_TILING_NONE0) | ||||
125 | return true1; | ||||
126 | |||||
127 | if (tiling > I915_TILING_LAST2) | ||||
128 | return false0; | ||||
129 | |||||
130 | /* check maximum stride & object size */ | ||||
131 | /* i965+ stores the end address of the gtt mapping in the fence | ||||
132 | * reg, so dont bother to check the size */ | ||||
133 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 7) { | ||||
134 | if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL0x0800) | ||||
135 | return false0; | ||||
136 | } else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 4) { | ||||
137 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL0x0400) | ||||
138 | return false0; | ||||
139 | } else { | ||||
140 | if (stride > 8192) | ||||
141 | return false0; | ||||
142 | |||||
143 | if (!is_power_of_2(stride)(((stride) != 0) && (((stride) - 1) & (stride)) == 0)) | ||||
144 | return false0; | ||||
145 | } | ||||
146 | |||||
147 | if (IS_GEN(i915, 2)(0 + (&(i915)->__info)->gen == (2)) || | ||||
148 | (tiling == I915_TILING_Y2 && HAS_128_BYTE_Y_TILING(i915)(!(0 + (&(i915)->__info)->gen == (2)) && !( IS_PLATFORM(i915, INTEL_I915G) || IS_PLATFORM(i915, INTEL_I915GM ))))) | ||||
149 | tile_width = 128; | ||||
150 | else | ||||
151 | tile_width = 512; | ||||
152 | |||||
153 | if (!stride || !IS_ALIGNED(stride, tile_width)(((stride) & ((tile_width) - 1)) == 0)) | ||||
154 | return false0; | ||||
155 | |||||
156 | return true1; | ||||
157 | } | ||||
158 | |||||
159 | static bool_Bool i915_vma_fence_prepare(struct i915_vma *vma, | ||||
160 | int tiling_mode, unsigned int stride) | ||||
161 | { | ||||
162 | struct drm_i915_privateinteldrm_softc *i915 = vma->vm->i915; | ||||
163 | u32 size, alignment; | ||||
164 | |||||
165 | if (!i915_vma_is_map_and_fenceable(vma)) | ||||
166 | return true1; | ||||
167 | |||||
168 | size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); | ||||
169 | if (vma->node.size < size) | ||||
170 | return false0; | ||||
171 | |||||
172 | alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); | ||||
173 | if (!IS_ALIGNED(vma->node.start, alignment)(((vma->node.start) & ((alignment) - 1)) == 0)) | ||||
174 | return false0; | ||||
175 | |||||
176 | return true1; | ||||
177 | } | ||||
178 | |||||
179 | /* Make the current GTT allocation valid for the change in tiling. */ | ||||
180 | static int | ||||
181 | i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, | ||||
182 | int tiling_mode, unsigned int stride) | ||||
183 | { | ||||
184 | struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; | ||||
185 | struct i915_vma *vma, *vn; | ||||
186 | DRM_LIST_HEAD(unbind)struct list_head unbind = { &(unbind), &(unbind) }; | ||||
187 | int ret = 0; | ||||
188 | |||||
189 | if (tiling_mode == I915_TILING_NONE0) | ||||
190 | return 0; | ||||
191 | |||||
192 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); | ||||
193 | |||||
194 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||
195 | for_each_ggtt_vma(vma, obj)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = ((&(obj)->vma.list)->next); (__typeof( *vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}); &vma->obj_link != (&(obj)->vma .list); vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = (vma->obj_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}) ) if (!i915_vma_is_ggtt(vma)) break; else { | ||||
196 | GEM_BUG_ON(vma->vm != &ggtt->vm)((void)0); | ||||
197 | |||||
198 | if (i915_vma_fence_prepare(vma, tiling_mode, stride)) | ||||
199 | continue; | ||||
200 | |||||
201 | list_move(&vma->vm_link, &unbind); | ||||
202 | } | ||||
203 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||
204 | |||||
205 | list_for_each_entry_safe(vma, vn, &unbind, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&unbind)->next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );}), vn = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) * __mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char *) __mptr - __builtin_offsetof(__typeof(*vma), vm_link) );}); & vma->vm_link != (&unbind); vma = vn, vn = ({ const __typeof ( ((__typeof(*vn) *)0)->vm_link ) *__mptr = (vn->vm_link .next); (__typeof(*vn) *)( (char *)__mptr - __builtin_offsetof (__typeof(*vn), vm_link) );})) { | ||||
206 | ret = __i915_vma_unbind(vma); | ||||
207 | if (ret) { | ||||
208 | /* Restore the remaining vma on an error */ | ||||
209 | list_splice(&unbind, &ggtt->vm.bound_list); | ||||
210 | break; | ||||
211 | } | ||||
212 | } | ||||
213 | |||||
214 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||
215 | |||||
216 | return ret; | ||||
217 | } | ||||
218 | |||||
219 | int | ||||
220 | i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, | ||||
221 | unsigned int tiling, unsigned int stride) | ||||
222 | { | ||||
223 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||
224 | struct i915_vma *vma; | ||||
225 | int err; | ||||
226 | |||||
227 | /* Make sure we don't cross-contaminate obj->tiling_and_stride */ | ||||
228 | BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK)extern char _ctassert[(!(2 & (~(128 - 1)))) ? 1 : -1 ] __attribute__ ((__unused__)); | ||||
229 | |||||
230 | GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride))((void)0); | ||||
231 | GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE))((void)0); | ||||
232 | |||||
233 | if ((tiling | stride) == obj->tiling_and_stride) | ||||
234 | return 0; | ||||
235 | |||||
236 | if (i915_gem_object_is_framebuffer(obj)) | ||||
237 | return -EBUSY16; | ||||
238 | |||||
239 | /* We need to rebind the object if its current allocation | ||||
240 | * no longer meets the alignment restrictions for its new | ||||
241 | * tiling mode. Otherwise we can just leave it alone, but | ||||
242 | * need to ensure that any fence register is updated before | ||||
243 | * the next fenced (either through the GTT or by the BLT unit | ||||
244 | * on older GPUs) access. | ||||
245 | * | ||||
246 | * After updating the tiling parameters, we then flag whether | ||||
247 | * we need to update an associated fence register. Note this | ||||
248 | * has to also include the unfenced register the GPU uses | ||||
249 | * whilst executing a fenced command for an untiled object. | ||||
250 | */ | ||||
251 | |||||
252 | i915_gem_object_lock(obj, NULL((void *)0)); | ||||
253 | if (i915_gem_object_is_framebuffer(obj)) { | ||||
254 | i915_gem_object_unlock(obj); | ||||
255 | return -EBUSY16; | ||||
256 | } | ||||
257 | |||||
258 | err = i915_gem_object_fence_prepare(obj, tiling, stride); | ||||
259 | if (err) { | ||||
260 | i915_gem_object_unlock(obj); | ||||
261 | return err; | ||||
262 | } | ||||
263 | |||||
264 | /* If the memory has unknown (i.e. varying) swizzling, we pin the | ||||
265 | * pages to prevent them being swapped out and causing corruption | ||||
266 | * due to the change in swizzling. | ||||
267 | */ | ||||
268 | mutex_lock(&obj->mm.lock)rw_enter_write(&obj->mm.lock); | ||||
269 | if (i915_gem_object_has_pages(obj) && | ||||
270 | obj->mm.madv == I915_MADV_WILLNEED0 && | ||||
271 | i915->quirks & QUIRK_PIN_SWIZZLED_PAGES(1<<5)) { | ||||
272 | if (tiling == I915_TILING_NONE0) { | ||||
273 | GEM_BUG_ON(!obj->mm.quirked)((void)0); | ||||
274 | __i915_gem_object_unpin_pages(obj); | ||||
275 | obj->mm.quirked = false0; | ||||
276 | } | ||||
277 | if (!i915_gem_object_is_tiled(obj)) { | ||||
278 | GEM_BUG_ON(obj->mm.quirked)((void)0); | ||||
279 | __i915_gem_object_pin_pages(obj); | ||||
280 | obj->mm.quirked = true1; | ||||
281 | } | ||||
282 | } | ||||
283 | mutex_unlock(&obj->mm.lock)rw_exit_write(&obj->mm.lock); | ||||
284 | |||||
285 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||
286 | for_each_ggtt_vma(vma, obj)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = ((&(obj)->vma.list)->next); (__typeof( *vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}); &vma->obj_link != (&(obj)->vma .list); vma = ({ const __typeof( ((__typeof(*vma) *)0)->obj_link ) *__mptr = (vma->obj_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), obj_link) );}) ) if (!i915_vma_is_ggtt(vma)) break; else { | ||||
287 | vma->fence_size = | ||||
288 | i915_gem_fence_size(i915, vma->size, tiling, stride); | ||||
289 | vma->fence_alignment = | ||||
290 | i915_gem_fence_alignment(i915, | ||||
291 | vma->size, tiling, stride); | ||||
292 | |||||
293 | if (vma->fence) | ||||
294 | vma->fence->dirty = true1; | ||||
295 | } | ||||
296 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||
297 | |||||
298 | obj->tiling_and_stride = tiling | stride; | ||||
299 | i915_gem_object_unlock(obj); | ||||
300 | |||||
301 | /* Force the fence to be reacquired for GTT access */ | ||||
302 | i915_gem_object_release_mmap_gtt(obj); | ||||
303 | |||||
304 | /* Try to preallocate memory required to save swizzling on put-pages */ | ||||
305 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | ||||
306 | if (!obj->bit_17) { | ||||
307 | obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT12, | ||||
308 | GFP_KERNEL(0x0001 | 0x0004)); | ||||
309 | } | ||||
310 | } else { | ||||
311 | bitmap_free(obj->bit_17); | ||||
312 | obj->bit_17 = NULL((void *)0); | ||||
313 | } | ||||
314 | |||||
315 | return 0; | ||||
316 | } | ||||
317 | |||||
318 | /** | ||||
319 | * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode | ||||
320 | * @dev: DRM device | ||||
321 | * @data: data pointer for the ioctl | ||||
322 | * @file: DRM file for the ioctl call | ||||
323 | * | ||||
324 | * Sets the tiling mode of an object, returning the required swizzling of | ||||
325 | * bit 6 of addresses in the object. | ||||
326 | * | ||||
327 | * Called by the user via ioctl. | ||||
328 | * | ||||
329 | * Returns: | ||||
330 | * Zero on success, negative errno on failure. | ||||
331 | */ | ||||
332 | int | ||||
333 | i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||||
334 | struct drm_file *file) | ||||
335 | { | ||||
336 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | ||||
337 | struct drm_i915_gem_set_tiling *args = data; | ||||
338 | struct drm_i915_gem_object *obj; | ||||
339 | int err; | ||||
340 | |||||
341 | if (!dev_priv->ggtt.num_fences) | ||||
| |||||
342 | return -EOPNOTSUPP45; | ||||
343 | |||||
344 | obj = i915_gem_object_lookup(file, args->handle); | ||||
345 | if (!obj
| ||||
346 | return -ENOENT2; | ||||
347 | |||||
348 | /* | ||||
349 | * The tiling mode of proxy objects is handled by its generator, and | ||||
350 | * not allowed to be changed by userspace. | ||||
351 | */ | ||||
352 | if (i915_gem_object_is_proxy(obj)) { | ||||
353 | err = -ENXIO6; | ||||
354 | goto err; | ||||
355 | } | ||||
356 | |||||
357 | if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) { | ||||
358 | err = -EINVAL22; | ||||
359 | goto err; | ||||
360 | } | ||||
361 | |||||
362 | if (args->tiling_mode
| ||||
363 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE0; | ||||
364 | args->stride = 0; | ||||
365 | } else { | ||||
366 | if (args->tiling_mode == I915_TILING_X1) | ||||
367 | args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x; | ||||
368 | else | ||||
369 | args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y; | ||||
370 | |||||
371 | /* Hide bit 17 swizzling from the user. This prevents old Mesa | ||||
372 | * from aborting the application on sw fallbacks to bit 17, | ||||
373 | * and we use the pread/pwrite bit17 paths to swizzle for it. | ||||
374 | * If there was a user that was relying on the swizzle | ||||
375 | * information for drm_intel_bo_map()ed reads/writes this would | ||||
376 | * break it, but we don't have any of those. | ||||
377 | */ | ||||
378 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_176) | ||||
379 | args->swizzle_mode = I915_BIT_6_SWIZZLE_91; | ||||
380 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_177) | ||||
381 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_102; | ||||
382 | |||||
383 | /* If we can't handle the swizzling, make it untiled. */ | ||||
384 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN5) { | ||||
385 | args->tiling_mode = I915_TILING_NONE0; | ||||
386 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE0; | ||||
387 | args->stride = 0; | ||||
388 | } | ||||
389 | } | ||||
390 | |||||
391 | err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); | ||||
392 | |||||
393 | /* We have to maintain this existing ABI... */ | ||||
394 | args->stride = i915_gem_object_get_stride(obj); | ||||
395 | args->tiling_mode = i915_gem_object_get_tiling(obj); | ||||
396 | |||||
397 | err: | ||||
398 | i915_gem_object_put(obj); | ||||
399 | return err; | ||||
400 | } | ||||
401 | |||||
402 | /** | ||||
403 | * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode | ||||
404 | * @dev: DRM device | ||||
405 | * @data: data pointer for the ioctl | ||||
406 | * @file: DRM file for the ioctl call | ||||
407 | * | ||||
408 | * Returns the current tiling mode and required bit 6 swizzling for the object. | ||||
409 | * | ||||
410 | * Called by the user via ioctl. | ||||
411 | * | ||||
412 | * Returns: | ||||
413 | * Zero on success, negative errno on failure. | ||||
414 | */ | ||||
415 | int | ||||
416 | i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||||
417 | struct drm_file *file) | ||||
418 | { | ||||
419 | struct drm_i915_gem_get_tiling *args = data; | ||||
420 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | ||||
421 | struct drm_i915_gem_object *obj; | ||||
422 | int err = -ENOENT2; | ||||
423 | |||||
424 | if (!dev_priv->ggtt.num_fences) | ||||
425 | return -EOPNOTSUPP45; | ||||
426 | |||||
427 | rcu_read_lock(); | ||||
428 | obj = i915_gem_object_lookup_rcu(file, args->handle); | ||||
429 | if (obj) { | ||||
430 | args->tiling_mode = | ||||
431 | READ_ONCE(obj->tiling_and_stride)({ typeof(obj->tiling_and_stride) __tmp = *(volatile typeof (obj->tiling_and_stride) *)&(obj->tiling_and_stride ); membar_datadep_consumer(); __tmp; }) & TILING_MASK(128 - 1); | ||||
432 | err = 0; | ||||
433 | } | ||||
434 | rcu_read_unlock(); | ||||
435 | if (unlikely(err)__builtin_expect(!!(err), 0)) | ||||
436 | return err; | ||||
437 | |||||
438 | switch (args->tiling_mode) { | ||||
439 | case I915_TILING_X1: | ||||
440 | args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x; | ||||
441 | break; | ||||
442 | case I915_TILING_Y2: | ||||
443 | args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y; | ||||
444 | break; | ||||
445 | default: | ||||
446 | case I915_TILING_NONE0: | ||||
447 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE0; | ||||
448 | break; | ||||
449 | } | ||||
450 | |||||
451 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ | ||||
452 | if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES(1<<5)) | ||||
453 | args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN5; | ||||
454 | else | ||||
455 | args->phys_swizzle_mode = args->swizzle_mode; | ||||
456 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_176) | ||||
457 | args->swizzle_mode = I915_BIT_6_SWIZZLE_91; | ||||
458 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_177) | ||||
459 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_102; | ||||
460 | |||||
461 | return 0; | ||||
462 | } |
1 | /* | ||||
2 | * SPDX-License-Identifier: MIT | ||||
3 | * | ||||
4 | * Copyright © 2016 Intel Corporation | ||||
5 | */ | ||||
6 | |||||
7 | #ifndef __I915_GEM_OBJECT_H__ | ||||
8 | #define __I915_GEM_OBJECT_H__ | ||||
9 | |||||
10 | #include <drm/drm_gem.h> | ||||
11 | #include <drm/drm_file.h> | ||||
12 | #include <drm/drm_device.h> | ||||
13 | |||||
14 | #include "display/intel_frontbuffer.h" | ||||
15 | #include "i915_gem_object_types.h" | ||||
16 | #include "i915_gem_gtt.h" | ||||
17 | #include "i915_vma_types.h" | ||||
18 | |||||
19 | void i915_gem_init__objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
20 | |||||
21 | struct drm_i915_gem_object *i915_gem_object_alloc(void); | ||||
22 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | ||||
23 | |||||
24 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | ||||
25 | const struct drm_i915_gem_object_ops *ops, | ||||
26 | struct lock_class_key *key); | ||||
27 | struct drm_i915_gem_object * | ||||
28 | i915_gem_object_create_shmem(struct drm_i915_privateinteldrm_softc *i915, | ||||
29 | resource_size_t size); | ||||
30 | struct drm_i915_gem_object * | ||||
31 | i915_gem_object_create_shmem_from_data(struct drm_i915_privateinteldrm_softc *i915, | ||||
32 | const void *data, resource_size_t size); | ||||
33 | |||||
34 | extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; | ||||
35 | void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, | ||||
36 | struct sg_table *pages, | ||||
37 | bool_Bool needs_clflush); | ||||
38 | |||||
39 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); | ||||
40 | |||||
41 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); | ||||
42 | void i915_gem_free_object(struct drm_gem_object *obj); | ||||
43 | |||||
44 | void i915_gem_flush_free_objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
45 | |||||
46 | struct sg_table * | ||||
47 | __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); | ||||
48 | void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||||
49 | |||||
50 | /** | ||||
51 | * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle | ||||
52 | * @filp: DRM file private date | ||||
53 | * @handle: userspace handle | ||||
54 | * | ||||
55 | * Returns: | ||||
56 | * | ||||
57 | * A pointer to the object named by the handle if such exists on @filp, NULL | ||||
58 | * otherwise. This object is only valid whilst under the RCU read lock, and | ||||
59 | * note carefully the object may be in the process of being destroyed. | ||||
60 | */ | ||||
61 | static inline struct drm_i915_gem_object * | ||||
62 | i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) | ||||
63 | { | ||||
64 | #ifdef CONFIG_LOCKDEP | ||||
65 | WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map))({ int __ret = !!((debug_locks && !lock_is_held(& rcu_lock_map))); if (__ret) printf("%s", "WARN_ON(" "debug_locks && !lock_is_held(&rcu_lock_map)" ")"); __builtin_expect(!!(__ret), 0); }); | ||||
66 | #endif | ||||
67 | return idr_find(&file->object_idr, handle); | ||||
68 | } | ||||
69 | |||||
70 | static inline struct drm_i915_gem_object * | ||||
71 | i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) | ||||
72 | { | ||||
73 | if (obj && !kref_get_unless_zero(&obj->base.refcount)) | ||||
74 | obj = NULL((void *)0); | ||||
75 | |||||
76 | return obj; | ||||
77 | } | ||||
78 | |||||
79 | static inline struct drm_i915_gem_object * | ||||
80 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | ||||
81 | { | ||||
82 | struct drm_i915_gem_object *obj; | ||||
83 | |||||
84 | rcu_read_lock(); | ||||
85 | obj = i915_gem_object_lookup_rcu(file, handle); | ||||
86 | obj = i915_gem_object_get_rcu(obj); | ||||
87 | rcu_read_unlock(); | ||||
88 | |||||
89 | return obj; | ||||
90 | } | ||||
91 | |||||
92 | __deprecated | ||||
93 | struct drm_gem_object * | ||||
94 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | ||||
95 | |||||
96 | __attribute__((nonnull)) | ||||
97 | static inline struct drm_i915_gem_object * | ||||
98 | i915_gem_object_get(struct drm_i915_gem_object *obj) | ||||
99 | { | ||||
100 | drm_gem_object_get(&obj->base); | ||||
101 | return obj; | ||||
102 | } | ||||
103 | |||||
104 | __attribute__((nonnull)) | ||||
105 | static inline void | ||||
106 | i915_gem_object_put(struct drm_i915_gem_object *obj) | ||||
107 | { | ||||
108 | __drm_gem_object_put(&obj->base); | ||||
109 | } | ||||
110 | |||||
111 | #define assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0) dma_resv_assert_held((obj)->base.resv)do { (void)(&((obj)->base.resv)->lock.base); } while (0) | ||||
112 | |||||
113 | static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
114 | struct i915_gem_ww_ctx *ww, | ||||
115 | bool_Bool intr) | ||||
116 | { | ||||
117 | int ret; | ||||
118 | |||||
119 | if (intr
| ||||
120 | ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL((void *)0)); | ||||
121 | else | ||||
122 | ret = dma_resv_lock(obj->base.resv, ww
| ||||
123 | |||||
124 | if (!ret && ww) | ||||
125 | list_add_tail(&obj->obj_link, &ww->obj_list); | ||||
126 | if (ret == -EALREADY37) | ||||
127 | ret = 0; | ||||
128 | |||||
129 | if (ret == -EDEADLK11) | ||||
130 | ww->contended = obj; | ||||
| |||||
131 | |||||
132 | return ret; | ||||
133 | } | ||||
134 | |||||
135 | static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
136 | struct i915_gem_ww_ctx *ww) | ||||
137 | { | ||||
138 | return __i915_gem_object_lock(obj, ww, ww
| ||||
139 | } | ||||
140 | |||||
141 | static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, | ||||
142 | struct i915_gem_ww_ctx *ww) | ||||
143 | { | ||||
144 | WARN_ON(ww && !ww->intr)({ int __ret = !!((ww && !ww->intr)); if (__ret) printf ("%s", "WARN_ON(" "ww && !ww->intr" ")"); __builtin_expect (!!(__ret), 0); }); | ||||
145 | return __i915_gem_object_lock(obj, ww, true1); | ||||
146 | } | ||||
147 | |||||
148 | static inline bool_Bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) | ||||
149 | { | ||||
150 | return dma_resv_trylock(obj->base.resv); | ||||
151 | } | ||||
152 | |||||
153 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) | ||||
154 | { | ||||
155 | dma_resv_unlock(obj->base.resv); | ||||
156 | } | ||||
157 | |||||
158 | struct dma_fence * | ||||
159 | i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); | ||||
160 | void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, | ||||
161 | struct dma_fence *fence); | ||||
162 | |||||
163 | static inline void | ||||
164 | i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) | ||||
165 | { | ||||
166 | obj->flags |= I915_BO_READONLY(1UL << (2)); | ||||
167 | } | ||||
168 | |||||
169 | static inline bool_Bool | ||||
170 | i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) | ||||
171 | { | ||||
172 | return obj->flags & I915_BO_READONLY(1UL << (2)); | ||||
173 | } | ||||
174 | |||||
175 | static inline bool_Bool | ||||
176 | i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) | ||||
177 | { | ||||
178 | return obj->flags & I915_BO_ALLOC_CONTIGUOUS(1UL << (0)); | ||||
179 | } | ||||
180 | |||||
181 | static inline bool_Bool | ||||
182 | i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) | ||||
183 | { | ||||
184 | return obj->flags & I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
185 | } | ||||
186 | |||||
187 | static inline void | ||||
188 | i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) | ||||
189 | { | ||||
190 | obj->flags |= I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
191 | } | ||||
192 | |||||
193 | static inline bool_Bool | ||||
194 | i915_gem_object_type_has(const struct drm_i915_gem_object *obj, | ||||
195 | unsigned long flags) | ||||
196 | { | ||||
197 | return obj->ops->flags & flags; | ||||
198 | } | ||||
199 | |||||
200 | static inline bool_Bool | ||||
201 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | ||||
202 | { | ||||
203 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE(1UL << (0))); | ||||
204 | } | ||||
205 | |||||
206 | static inline bool_Bool | ||||
207 | i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) | ||||
208 | { | ||||
209 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE(1UL << (2))); | ||||
210 | } | ||||
211 | |||||
212 | static inline bool_Bool | ||||
213 | i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) | ||||
214 | { | ||||
215 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY(1UL << (3))); | ||||
216 | } | ||||
217 | |||||
218 | static inline bool_Bool | ||||
219 | i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) | ||||
220 | { | ||||
221 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP(1UL << (4))); | ||||
222 | } | ||||
223 | |||||
224 | static inline bool_Bool | ||||
225 | i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) | ||||
226 | { | ||||
227 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL(1UL << (5))); | ||||
228 | } | ||||
229 | |||||
230 | static inline bool_Bool | ||||
231 | i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) | ||||
232 | { | ||||
233 | return READ_ONCE(obj->frontbuffer)({ typeof(obj->frontbuffer) __tmp = *(volatile typeof(obj-> frontbuffer) *)&(obj->frontbuffer); membar_datadep_consumer (); __tmp; }); | ||||
234 | } | ||||
235 | |||||
236 | static inline unsigned int | ||||
237 | i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) | ||||
238 | { | ||||
239 | return obj->tiling_and_stride & TILING_MASK(128 - 1); | ||||
240 | } | ||||
241 | |||||
242 | static inline bool_Bool | ||||
243 | i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) | ||||
244 | { | ||||
245 | return i915_gem_object_get_tiling(obj) != I915_TILING_NONE0; | ||||
246 | } | ||||
247 | |||||
248 | static inline unsigned int | ||||
249 | i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) | ||||
250 | { | ||||
251 | return obj->tiling_and_stride & STRIDE_MASK(~(128 - 1)); | ||||
252 | } | ||||
253 | |||||
254 | static inline unsigned int | ||||
255 | i915_gem_tile_height(unsigned int tiling) | ||||
256 | { | ||||
257 | GEM_BUG_ON(!tiling)((void)0); | ||||
258 | return tiling == I915_TILING_Y2 ? 32 : 8; | ||||
259 | } | ||||
260 | |||||
261 | static inline unsigned int | ||||
262 | i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) | ||||
263 | { | ||||
264 | return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); | ||||
265 | } | ||||
266 | |||||
267 | static inline unsigned int | ||||
268 | i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) | ||||
269 | { | ||||
270 | return (i915_gem_object_get_stride(obj) * | ||||
271 | i915_gem_object_get_tile_height(obj)); | ||||
272 | } | ||||
273 | |||||
274 | int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, | ||||
275 | unsigned int tiling, unsigned int stride); | ||||
276 | |||||
277 | struct scatterlist * | ||||
278 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | ||||
279 | unsigned int n, unsigned int *offset); | ||||
280 | |||||
281 | struct vm_page * | ||||
282 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, | ||||
283 | unsigned int n); | ||||
284 | |||||
285 | struct vm_page * | ||||
286 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, | ||||
287 | unsigned int n); | ||||
288 | |||||
289 | dma_addr_t | ||||
290 | i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, | ||||
291 | unsigned long n, | ||||
292 | unsigned int *len); | ||||
293 | |||||
294 | dma_addr_t | ||||
295 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, | ||||
296 | unsigned long n); | ||||
297 | |||||
298 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, | ||||
299 | struct sg_table *pages, | ||||
300 | unsigned int sg_page_sizes); | ||||
301 | |||||
302 | int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
303 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
304 | |||||
305 | enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ | ||||
306 | I915_MM_NORMAL = 0, | ||||
307 | /* | ||||
308 | * Only used by struct_mutex, when called "recursively" from | ||||
309 | * direct-reclaim-esque. Safe because there is only every one | ||||
310 | * struct_mutex in the entire system. | ||||
311 | */ | ||||
312 | I915_MM_SHRINKER = 1, | ||||
313 | /* | ||||
314 | * Used for obj->mm.lock when allocating pages. Safe because the object | ||||
315 | * isn't yet on any LRU, and therefore the shrinker can't deadlock on | ||||
316 | * it. As soon as the object has pages, obj->mm.lock nests within | ||||
317 | * fs_reclaim. | ||||
318 | */ | ||||
319 | I915_MM_GET_PAGES = 1, | ||||
320 | }; | ||||
321 | |||||
322 | static inline int __must_check | ||||
323 | i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
324 | { | ||||
325 | might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); | ||||
326 | |||||
327 | if (atomic_inc_not_zero(&obj->mm.pages_pin_count)atomic_add_unless((&obj->mm.pages_pin_count), 1, 0)) | ||||
328 | return 0; | ||||
329 | |||||
330 | return __i915_gem_object_get_pages(obj); | ||||
331 | } | ||||
332 | |||||
333 | static inline bool_Bool | ||||
334 | i915_gem_object_has_pages(struct drm_i915_gem_object *obj) | ||||
335 | { | ||||
336 | return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)({ typeof(obj->mm.pages) __tmp = *(volatile typeof(obj-> mm.pages) *)&(obj->mm.pages); membar_datadep_consumer( ); __tmp; })); | ||||
337 | } | ||||
338 | |||||
339 | static inline void | ||||
340 | __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
341 | { | ||||
342 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
343 | |||||
344 | atomic_inc(&obj->mm.pages_pin_count)__sync_fetch_and_add(&obj->mm.pages_pin_count, 1); | ||||
345 | } | ||||
346 | |||||
347 | static inline bool_Bool | ||||
348 | i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) | ||||
349 | { | ||||
350 | return atomic_read(&obj->mm.pages_pin_count)({ typeof(*(&obj->mm.pages_pin_count)) __tmp = *(volatile typeof(*(&obj->mm.pages_pin_count)) *)&(*(&obj ->mm.pages_pin_count)); membar_datadep_consumer(); __tmp; } ); | ||||
351 | } | ||||
352 | |||||
353 | static inline void | ||||
354 | __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
355 | { | ||||
356 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
357 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); | ||||
358 | |||||
359 | atomic_dec(&obj->mm.pages_pin_count)__sync_fetch_and_sub(&obj->mm.pages_pin_count, 1); | ||||
360 | } | ||||
361 | |||||
362 | static inline void | ||||
363 | i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
364 | { | ||||
365 | __i915_gem_object_unpin_pages(obj); | ||||
366 | } | ||||
367 | |||||
368 | int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | ||||
369 | void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||||
370 | void i915_gem_object_writeback(struct drm_i915_gem_object *obj); | ||||
371 | |||||
372 | enum i915_map_type { | ||||
373 | I915_MAP_WB = 0, | ||||
374 | I915_MAP_WC, | ||||
375 | #define I915_MAP_OVERRIDE(1UL << (31)) BIT(31)(1UL << (31)) | ||||
376 | I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE(1UL << (31)), | ||||
377 | I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE(1UL << (31)), | ||||
378 | }; | ||||
379 | |||||
380 | /** | ||||
381 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object | ||||
382 | * @obj: the object to map into kernel address space | ||||
383 | * @type: the type of mapping, used to select pgprot_t | ||||
384 | * | ||||
385 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's | ||||
386 | * pages and then returns a contiguous mapping of the backing storage into | ||||
387 | * the kernel address space. Based on the @type of mapping, the PTE will be | ||||
388 | * set to either WriteBack or WriteCombine (via pgprot_t). | ||||
389 | * | ||||
390 | * The caller is responsible for calling i915_gem_object_unpin_map() when the | ||||
391 | * mapping is no longer required. | ||||
392 | * | ||||
393 | * Returns the pointer through which to access the mapped object, or an | ||||
394 | * ERR_PTR() on error. | ||||
395 | */ | ||||
396 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | ||||
397 | enum i915_map_type type); | ||||
398 | |||||
399 | void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, | ||||
400 | unsigned long offset, | ||||
401 | unsigned long size); | ||||
402 | static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) | ||||
403 | { | ||||
404 | __i915_gem_object_flush_map(obj, 0, obj->base.size); | ||||
405 | } | ||||
406 | |||||
407 | /** | ||||
408 | * i915_gem_object_unpin_map - releases an earlier mapping | ||||
409 | * @obj: the object to unmap | ||||
410 | * | ||||
411 | * After pinning the object and mapping its pages, once you are finished | ||||
412 | * with your access, call i915_gem_object_unpin_map() to release the pin | ||||
413 | * upon the mapping. Once the pin count reaches zero, that mapping may be | ||||
414 | * removed. | ||||
415 | */ | ||||
416 | static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) | ||||
417 | { | ||||
418 | i915_gem_object_unpin_pages(obj); | ||||
419 | } | ||||
420 | |||||
421 | void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); | ||||
422 | |||||
423 | void | ||||
424 | i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, | ||||
425 | unsigned int flush_domains); | ||||
426 | |||||
427 | int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, | ||||
428 | unsigned int *needs_clflush); | ||||
429 | int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, | ||||
430 | unsigned int *needs_clflush); | ||||
431 | #define CLFLUSH_BEFORE(1UL << (0)) BIT(0)(1UL << (0)) | ||||
432 | #define CLFLUSH_AFTER(1UL << (1)) BIT(1)(1UL << (1)) | ||||
433 | #define CLFLUSH_FLAGS((1UL << (0)) | (1UL << (1))) (CLFLUSH_BEFORE(1UL << (0)) | CLFLUSH_AFTER(1UL << (1))) | ||||
434 | |||||
435 | static inline void | ||||
436 | i915_gem_object_finish_access(struct drm_i915_gem_object *obj) | ||||
437 | { | ||||
438 | i915_gem_object_unpin_pages(obj); | ||||
439 | } | ||||
440 | |||||
441 | static inline struct intel_engine_cs * | ||||
442 | i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) | ||||
443 | { | ||||
444 | struct intel_engine_cs *engine = NULL((void *)0); | ||||
445 | struct dma_fence *fence; | ||||
446 | |||||
447 | rcu_read_lock(); | ||||
448 | fence = dma_resv_get_excl_rcu(obj->base.resv); | ||||
449 | rcu_read_unlock(); | ||||
450 | |||||
451 | if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) | ||||
452 | engine = to_request(fence)->engine; | ||||
453 | dma_fence_put(fence); | ||||
454 | |||||
455 | return engine; | ||||
456 | } | ||||
457 | |||||
458 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, | ||||
459 | unsigned int cache_level); | ||||
460 | void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); | ||||
461 | void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); | ||||
462 | |||||
463 | int __must_check | ||||
464 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
465 | int __must_check | ||||
466 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
467 | int __must_check | ||||
468 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
469 | struct i915_vma * __must_check | ||||
470 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | ||||
471 | u32 alignment, | ||||
472 | const struct i915_ggtt_view *view, | ||||
473 | unsigned int flags); | ||||
474 | |||||
475 | void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); | ||||
476 | void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); | ||||
477 | void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); | ||||
478 | |||||
479 | static inline bool_Bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) | ||||
480 | { | ||||
481 | if (obj->cache_dirty) | ||||
482 | return false0; | ||||
483 | |||||
484 | if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE(1UL << (1)))) | ||||
485 | return true1; | ||||
486 | |||||
487 | /* Currently in use by HW (display engine)? Keep flushed. */ | ||||
488 | return i915_gem_object_is_framebuffer(obj); | ||||
489 | } | ||||
490 | |||||
491 | static inline void __start_cpu_write(struct drm_i915_gem_object *obj) | ||||
492 | { | ||||
493 | obj->read_domains = I915_GEM_DOMAIN_CPU0x00000001; | ||||
494 | obj->write_domain = I915_GEM_DOMAIN_CPU0x00000001; | ||||
495 | if (cpu_write_needs_clflush(obj)) | ||||
496 | obj->cache_dirty = true1; | ||||
497 | } | ||||
498 | |||||
499 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, | ||||
500 | unsigned int flags, | ||||
501 | long timeout); | ||||
502 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | ||||
503 | unsigned int flags, | ||||
504 | const struct i915_sched_attr *attr); | ||||
505 | |||||
506 | void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
507 | enum fb_op_origin origin); | ||||
508 | void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
509 | enum fb_op_origin origin); | ||||
510 | |||||
511 | static inline void | ||||
512 | i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
513 | enum fb_op_origin origin) | ||||
514 | { | ||||
515 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
516 | __i915_gem_object_flush_frontbuffer(obj, origin); | ||||
517 | } | ||||
518 | |||||
519 | static inline void | ||||
520 | i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
521 | enum fb_op_origin origin) | ||||
522 | { | ||||
523 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
524 | __i915_gem_object_invalidate_frontbuffer(obj, origin); | ||||
525 | } | ||||
526 | |||||
527 | #endif |