| File: | dev/pci/drm/i915/gem/i915_gem_object.h |
| Warning: | line 194, column 17 Access to field 'contended' results in a dereference of a null pointer (loaded from variable 'ww') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT | |||
| 2 | /* | |||
| 3 | * Copyright © 2021 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #include "gem/i915_gem_domain.h" | |||
| 7 | #include "gem/i915_gem_internal.h" | |||
| 8 | #include "gt/gen8_ppgtt.h" | |||
| 9 | ||||
| 10 | #include "i915_drv.h" | |||
| 11 | #include "intel_display_types.h" | |||
| 12 | #include "intel_dpt.h" | |||
| 13 | #include "intel_fb.h" | |||
| 14 | ||||
| 15 | struct i915_dpt { | |||
| 16 | struct i915_address_space vm; | |||
| 17 | ||||
| 18 | struct drm_i915_gem_object *obj; | |||
| 19 | struct i915_vma *vma; | |||
| 20 | void __iomem *iomem; | |||
| 21 | }; | |||
| 22 | ||||
| 23 | #define i915_is_dpt(vm)((vm)->is_dpt) ((vm)->is_dpt) | |||
| 24 | ||||
| 25 | static inline struct i915_dpt * | |||
| 26 | i915_vm_to_dpt(struct i915_address_space *vm) | |||
| 27 | { | |||
| 28 | BUILD_BUG_ON(offsetof(struct i915_dpt, vm))extern char _ctassert[(!(__builtin_offsetof(struct i915_dpt, vm ))) ? 1 : -1 ] __attribute__((__unused__)); | |||
| 29 | GEM_BUG_ON(!i915_is_dpt(vm))((void)0); | |||
| 30 | return container_of(vm, struct i915_dpt, vm)({ const __typeof( ((struct i915_dpt *)0)->vm ) *__mptr = ( vm); (struct i915_dpt *)( (char *)__mptr - __builtin_offsetof (struct i915_dpt, vm) );}); | |||
| 31 | } | |||
| 32 | ||||
| 33 | #define dpt_total_entries(dpt)((dpt)->vm.total >> 12) ((dpt)->vm.total >> PAGE_SHIFT12) | |||
| 34 | ||||
| 35 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) | |||
| 36 | { | |||
| 37 | writeq(pte, addr)iowrite64(pte, addr); | |||
| 38 | } | |||
| 39 | ||||
| 40 | static void dpt_insert_page(struct i915_address_space *vm, | |||
| 41 | dma_addr_t addr, | |||
| 42 | u64 offset, | |||
| 43 | enum i915_cache_level level, | |||
| 44 | u32 flags) | |||
| 45 | { | |||
| 46 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 47 | gen8_pte_t __iomem *base = dpt->iomem; | |||
| 48 | ||||
| 49 | gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE(1ULL << (12)), | |||
| 50 | vm->pte_encode(addr, level, flags)); | |||
| 51 | } | |||
| 52 | ||||
| 53 | static void dpt_insert_entries(struct i915_address_space *vm, | |||
| 54 | struct i915_vma_resource *vma_res, | |||
| 55 | enum i915_cache_level level, | |||
| 56 | u32 flags) | |||
| 57 | { | |||
| 58 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 59 | gen8_pte_t __iomem *base = dpt->iomem; | |||
| 60 | const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); | |||
| 61 | struct sgt_iter sgt_iter; | |||
| 62 | dma_addr_t addr; | |||
| 63 | int i; | |||
| 64 | ||||
| 65 | /* | |||
| 66 | * Note that we ignore PTE_READ_ONLY here. The caller must be careful | |||
| 67 | * not to allow the user to override access to a read only page. | |||
| 68 | */ | |||
| 69 | ||||
| 70 | i = vma_res->start / I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 71 | for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)for ((sgt_iter) = __sgt_iter((vma_res->bi.pages)->sgl, 1 ); ((addr) = (sgt_iter).dma + (sgt_iter).curr), (sgt_iter).sgp ; (((sgt_iter).curr += ((1ULL << (12)))) >= (sgt_iter ).max) ? (sgt_iter) = __sgt_iter(__sg_next((sgt_iter).sgp), 1 ), 0 : 0) | |||
| 72 | gen8_set_pte(&base[i++], pte_encode | addr); | |||
| 73 | } | |||
| 74 | ||||
| 75 | static void dpt_clear_range(struct i915_address_space *vm, | |||
| 76 | u64 start, u64 length) | |||
| 77 | { | |||
| 78 | } | |||
| 79 | ||||
| 80 | static void dpt_bind_vma(struct i915_address_space *vm, | |||
| 81 | struct i915_vm_pt_stash *stash, | |||
| 82 | struct i915_vma_resource *vma_res, | |||
| 83 | enum i915_cache_level cache_level, | |||
| 84 | u32 flags) | |||
| 85 | { | |||
| 86 | u32 pte_flags; | |||
| 87 | ||||
| 88 | if (vma_res->bound_flags) | |||
| 89 | return; | |||
| 90 | ||||
| 91 | /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ | |||
| 92 | pte_flags = 0; | |||
| 93 | if (vm->has_read_only && vma_res->bi.readonly) | |||
| 94 | pte_flags |= PTE_READ_ONLY(1UL << (0)); | |||
| 95 | if (vma_res->bi.lmem) | |||
| 96 | pte_flags |= PTE_LM(1UL << (1)); | |||
| 97 | ||||
| 98 | vm->insert_entries(vm, vma_res, cache_level, pte_flags); | |||
| 99 | ||||
| 100 | vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 101 | ||||
| 102 | /* | |||
| 103 | * Without aliasing PPGTT there's no difference between | |||
| 104 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally | |||
| 105 | * upgrade to both bound if we bind either to avoid double-binding. | |||
| 106 | */ | |||
| 107 | vma_res->bound_flags = I915_VMA_GLOBAL_BIND((int)(1UL << (10))) | I915_VMA_LOCAL_BIND((int)(1UL << (11))); | |||
| 108 | } | |||
| 109 | ||||
| 110 | static void dpt_unbind_vma(struct i915_address_space *vm, | |||
| 111 | struct i915_vma_resource *vma_res) | |||
| 112 | { | |||
| 113 | vm->clear_range(vm, vma_res->start, vma_res->vma_size); | |||
| 114 | } | |||
| 115 | ||||
| 116 | static void dpt_cleanup(struct i915_address_space *vm) | |||
| 117 | { | |||
| 118 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 119 | ||||
| 120 | i915_gem_object_put(dpt->obj); | |||
| 121 | } | |||
| 122 | ||||
| 123 | struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) | |||
| 124 | { | |||
| 125 | struct drm_i915_privateinteldrm_softc *i915 = vm->i915; | |||
| 126 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 127 | intel_wakeref_t wakeref; | |||
| 128 | struct i915_vma *vma; | |||
| 129 | void __iomem *iomem; | |||
| 130 | struct i915_gem_ww_ctx ww; | |||
| 131 | u64 pin_flags = 0; | |||
| 132 | int err; | |||
| 133 | ||||
| 134 | if (i915_gem_object_is_stolen(dpt->obj)) | |||
| 135 | pin_flags |= PIN_MAPPABLE(1ULL << (3)); | |||
| 136 | ||||
| 137 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); | |||
| 138 | atomic_inc(&i915->gpu_error.pending_fb_pin)__sync_fetch_and_add(&i915->gpu_error.pending_fb_pin, 1 ); | |||
| 139 | ||||
| 140 | for_i915_gem_ww(&ww, err, true)for (i915_gem_ww_ctx_init(&ww, 1), (err) = -11; (err) == - 11; (err) = __i915_gem_ww_fini(&ww, err)) { | |||
| 141 | err = i915_gem_object_lock(dpt->obj, &ww); | |||
| 142 | if (err) | |||
| 143 | continue; | |||
| 144 | ||||
| 145 | vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL((void *)0), 0, 4096, | |||
| 146 | pin_flags); | |||
| 147 | if (IS_ERR(vma)) { | |||
| 148 | err = PTR_ERR(vma); | |||
| 149 | continue; | |||
| 150 | } | |||
| 151 | ||||
| 152 | iomem = i915_vma_pin_iomap(vma); | |||
| 153 | i915_vma_unpin(vma); | |||
| 154 | ||||
| 155 | if (IS_ERR(iomem)) { | |||
| 156 | err = PTR_ERR(iomem); | |||
| 157 | continue; | |||
| 158 | } | |||
| 159 | ||||
| 160 | dpt->vma = vma; | |||
| 161 | dpt->iomem = iomem; | |||
| 162 | ||||
| 163 | i915_vma_get(vma); | |||
| 164 | } | |||
| 165 | ||||
| 166 | dpt->obj->mm.dirty = true1; | |||
| 167 | ||||
| 168 | atomic_dec(&i915->gpu_error.pending_fb_pin)__sync_fetch_and_sub(&i915->gpu_error.pending_fb_pin, 1 ); | |||
| 169 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | |||
| 170 | ||||
| 171 | return err ? ERR_PTR(err) : vma; | |||
| 172 | } | |||
| 173 | ||||
| 174 | void intel_dpt_unpin(struct i915_address_space *vm) | |||
| 175 | { | |||
| 176 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 177 | ||||
| 178 | i915_vma_unpin_iomap(dpt->vma); | |||
| 179 | i915_vma_put(dpt->vma); | |||
| 180 | } | |||
| 181 | ||||
| 182 | /** | |||
| 183 | * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume | |||
| 184 | * @i915: device instance | |||
| 185 | * | |||
| 186 | * Restore the memory mapping during system resume for all framebuffers which | |||
| 187 | * are mapped to HW via a GGTT->DPT page table. The content of these page | |||
| 188 | * tables are not stored in the hibernation image during S4 and S3RST->S4 | |||
| 189 | * transitions, so here we reprogram the PTE entries in those tables. | |||
| 190 | * | |||
| 191 | * This function must be called after the mappings in GGTT have been restored calling | |||
| 192 | * i915_ggtt_resume(). | |||
| 193 | */ | |||
| 194 | void intel_dpt_resume(struct drm_i915_privateinteldrm_softc *i915) | |||
| 195 | { | |||
| 196 | struct drm_framebuffer *drm_fb; | |||
| 197 | ||||
| 198 | if (!HAS_DISPLAY(i915)((&(i915)->__runtime)->pipe_mask != 0)) | |||
| 199 | return; | |||
| 200 | ||||
| 201 | mutex_lock(&i915->drm.mode_config.fb_lock)rw_enter_write(&i915->drm.mode_config.fb_lock); | |||
| 202 | drm_for_each_fb(drm_fb, &i915->drm)for (({ int __ret = !!(!(rw_status(&(&i915->drm)-> mode_config.fb_lock) != 0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(rw_status(&(&i915->drm)->mode_config.fb_lock) != 0)" , "/usr/src/sys/dev/pci/drm/i915/display/intel_dpt.c", 202); __builtin_expect (!!(__ret), 0); }), drm_fb = ({ const __typeof( ((struct drm_framebuffer *)0)->head ) *__mptr = ((&(&i915->drm)->mode_config .fb_list)->next); (struct drm_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct drm_framebuffer, head) );}); & drm_fb->head != (&(&i915->drm)->mode_config. fb_list); drm_fb = ({ const __typeof( ((typeof(*(drm_fb)) *)0 )->head ) *__mptr = (((drm_fb)->head.next)); (typeof(*( drm_fb)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(drm_fb )), head) );})) { | |||
| 203 | struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb)({ const __typeof( ((struct intel_framebuffer *)0)->base ) *__mptr = (drm_fb); (struct intel_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct intel_framebuffer, base) );}); | |||
| 204 | ||||
| 205 | if (fb->dpt_vm) | |||
| 206 | i915_ggtt_resume_vm(fb->dpt_vm); | |||
| 207 | } | |||
| 208 | mutex_unlock(&i915->drm.mode_config.fb_lock)rw_exit_write(&i915->drm.mode_config.fb_lock); | |||
| 209 | } | |||
| 210 | ||||
| 211 | /** | |||
| 212 | * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend | |||
| 213 | * @i915: device instance | |||
| 214 | * | |||
| 215 | * Suspend the memory mapping during system suspend for all framebuffers which | |||
| 216 | * are mapped to HW via a GGTT->DPT page table. | |||
| 217 | * | |||
| 218 | * This function must be called before the mappings in GGTT are suspended calling | |||
| 219 | * i915_ggtt_suspend(). | |||
| 220 | */ | |||
| 221 | void intel_dpt_suspend(struct drm_i915_privateinteldrm_softc *i915) | |||
| 222 | { | |||
| 223 | struct drm_framebuffer *drm_fb; | |||
| 224 | ||||
| 225 | if (!HAS_DISPLAY(i915)((&(i915)->__runtime)->pipe_mask != 0)) | |||
| 226 | return; | |||
| 227 | ||||
| 228 | mutex_lock(&i915->drm.mode_config.fb_lock)rw_enter_write(&i915->drm.mode_config.fb_lock); | |||
| 229 | ||||
| 230 | drm_for_each_fb(drm_fb, &i915->drm)for (({ int __ret = !!(!(rw_status(&(&i915->drm)-> mode_config.fb_lock) != 0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(rw_status(&(&i915->drm)->mode_config.fb_lock) != 0)" , "/usr/src/sys/dev/pci/drm/i915/display/intel_dpt.c", 230); __builtin_expect (!!(__ret), 0); }), drm_fb = ({ const __typeof( ((struct drm_framebuffer *)0)->head ) *__mptr = ((&(&i915->drm)->mode_config .fb_list)->next); (struct drm_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct drm_framebuffer, head) );}); & drm_fb->head != (&(&i915->drm)->mode_config. fb_list); drm_fb = ({ const __typeof( ((typeof(*(drm_fb)) *)0 )->head ) *__mptr = (((drm_fb)->head.next)); (typeof(*( drm_fb)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(drm_fb )), head) );})) { | |||
| 231 | struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb)({ const __typeof( ((struct intel_framebuffer *)0)->base ) *__mptr = (drm_fb); (struct intel_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct intel_framebuffer, base) );}); | |||
| 232 | ||||
| 233 | if (fb->dpt_vm) | |||
| 234 | i915_ggtt_suspend_vm(fb->dpt_vm); | |||
| 235 | } | |||
| 236 | ||||
| 237 | mutex_unlock(&i915->drm.mode_config.fb_lock)rw_exit_write(&i915->drm.mode_config.fb_lock); | |||
| 238 | } | |||
| 239 | ||||
| 240 | struct i915_address_space * | |||
| 241 | intel_dpt_create(struct intel_framebuffer *fb) | |||
| 242 | { | |||
| 243 | struct drm_gem_object *obj = &intel_fb_obj(&fb->base)((&fb->base) ? to_intel_bo((&fb->base)->obj[ 0]) : ((void *)0))->base; | |||
| ||||
| 244 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->dev); | |||
| 245 | struct drm_i915_gem_object *dpt_obj; | |||
| 246 | struct i915_address_space *vm; | |||
| 247 | struct i915_dpt *dpt; | |||
| 248 | size_t size; | |||
| 249 | int ret; | |||
| 250 | ||||
| 251 | if (intel_fb_needs_pot_stride_remap(fb)) | |||
| 252 | size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); | |||
| 253 | else | |||
| 254 | size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE)(((obj->size) + (((1ULL << (12))) - 1)) / ((1ULL << (12)))); | |||
| 255 | ||||
| 256 | size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE)((((size * sizeof(gen8_pte_t)) + (((1ULL << (12))) - 1) ) / ((1ULL << (12)))) * ((1ULL << (12)))); | |||
| 257 | ||||
| 258 | dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS(1UL << (0))); | |||
| 259 | if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt)) | |||
| 260 | dpt_obj = i915_gem_object_create_stolen(i915, size); | |||
| 261 | if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)((&(i915)->__runtime)->memory_regions & ((1UL << (INTEL_REGION_LMEM_0))))) { | |||
| 262 | drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Allocating dpt from smem\n" ); | |||
| 263 | dpt_obj = i915_gem_object_create_shmem(i915, size); | |||
| 264 | } | |||
| 265 | if (IS_ERR(dpt_obj)) | |||
| 266 | return ERR_CAST(dpt_obj); | |||
| 267 | ||||
| 268 | ret = i915_gem_object_lock_interruptible(dpt_obj, NULL((void *)0)); | |||
| 269 | if (!ret) { | |||
| 270 | ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); | |||
| 271 | i915_gem_object_unlock(dpt_obj); | |||
| 272 | } | |||
| 273 | if (ret) { | |||
| 274 | i915_gem_object_put(dpt_obj); | |||
| 275 | return ERR_PTR(ret); | |||
| 276 | } | |||
| 277 | ||||
| 278 | dpt = kzalloc(sizeof(*dpt), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 279 | if (!dpt) { | |||
| 280 | i915_gem_object_put(dpt_obj); | |||
| 281 | return ERR_PTR(-ENOMEM12); | |||
| 282 | } | |||
| 283 | ||||
| 284 | vm = &dpt->vm; | |||
| 285 | ||||
| 286 | vm->gt = to_gt(i915); | |||
| 287 | vm->i915 = i915; | |||
| 288 | vm->dma = i915->drm.dev; | |||
| 289 | vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 290 | vm->is_dpt = true1; | |||
| 291 | ||||
| 292 | i915_address_space_init(vm, VM_CLASS_DPT2); | |||
| 293 | ||||
| 294 | vm->insert_page = dpt_insert_page; | |||
| 295 | vm->clear_range = dpt_clear_range; | |||
| 296 | vm->insert_entries = dpt_insert_entries; | |||
| 297 | vm->cleanup = dpt_cleanup; | |||
| 298 | ||||
| 299 | vm->vma_ops.bind_vma = dpt_bind_vma; | |||
| 300 | vm->vma_ops.unbind_vma = dpt_unbind_vma; | |||
| 301 | ||||
| 302 | vm->pte_encode = gen8_ggtt_pte_encode; | |||
| 303 | ||||
| 304 | dpt->obj = dpt_obj; | |||
| 305 | dpt->obj->is_dpt = true1; | |||
| 306 | ||||
| 307 | return &dpt->vm; | |||
| 308 | } | |||
| 309 | ||||
| 310 | void intel_dpt_destroy(struct i915_address_space *vm) | |||
| 311 | { | |||
| 312 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 313 | ||||
| 314 | dpt->obj->is_dpt = false0; | |||
| 315 | i915_vm_put(&dpt->vm); | |||
| 316 | } |
| 1 | /* | ||||
| 2 | * SPDX-License-Identifier: MIT | ||||
| 3 | * | ||||
| 4 | * Copyright © 2016 Intel Corporation | ||||
| 5 | */ | ||||
| 6 | |||||
| 7 | #ifndef __I915_GEM_OBJECT_H__ | ||||
| 8 | #define __I915_GEM_OBJECT_H__ | ||||
| 9 | |||||
| 10 | #include <drm/drm_gem.h> | ||||
| 11 | #include <drm/drm_file.h> | ||||
| 12 | #include <drm/drm_device.h> | ||||
| 13 | |||||
| 14 | #include "display/intel_frontbuffer.h" | ||||
| 15 | #include "intel_memory_region.h" | ||||
| 16 | #include "i915_gem_object_types.h" | ||||
| 17 | #include "i915_gem_gtt.h" | ||||
| 18 | #include "i915_gem_ww.h" | ||||
| 19 | #include "i915_vma_types.h" | ||||
| 20 | |||||
| 21 | enum intel_region_id; | ||||
| 22 | |||||
| 23 | /* | ||||
| 24 | * XXX: There is a prevalence of the assumption that we fit the | ||||
| 25 | * object's page count inside a 32bit _signed_ variable. Let's document | ||||
| 26 | * this and catch if we ever need to fix it. In the meantime, if you do | ||||
| 27 | * spot such a local variable, please consider fixing! | ||||
| 28 | * | ||||
| 29 | * Aside from our own locals (for which we have no excuse!): | ||||
| 30 | * - sg_table embeds unsigned int for num_pages | ||||
| 31 | * - get_user_pages*() mixed ints with longs | ||||
| 32 | */ | ||||
| 33 | #define GEM_CHECK_SIZE_OVERFLOW(sz)({ __builtin_expect(!!(!!((sz) >> 12 > 0x7fffffff)), 0); }) \ | ||||
| 34 | GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)({ __builtin_expect(!!(!!((sz) >> 12 > 0x7fffffff)), 0); }) | ||||
| 35 | |||||
| 36 | static inline bool_Bool i915_gem_object_size_2big(u64 size) | ||||
| 37 | { | ||||
| 38 | struct drm_i915_gem_object *obj; | ||||
| 39 | |||||
| 40 | if (GEM_CHECK_SIZE_OVERFLOW(size)({ __builtin_expect(!!(!!((size) >> 12 > 0x7fffffff) ), 0); })) | ||||
| 41 | return true1; | ||||
| 42 | |||||
| 43 | if (overflows_type(size, obj->base.size)(sizeof(size) > sizeof(obj->base.size) && (size ) >> (8 * sizeof(obj->base.size)))) | ||||
| 44 | return true1; | ||||
| 45 | |||||
| 46 | return false0; | ||||
| 47 | } | ||||
| 48 | |||||
| 49 | void i915_gem_init__objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
| 50 | |||||
| 51 | void i915_objects_module_exit(void); | ||||
| 52 | int i915_objects_module_init(void); | ||||
| 53 | |||||
| 54 | struct drm_i915_gem_object *i915_gem_object_alloc(void); | ||||
| 55 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | ||||
| 56 | |||||
| 57 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | ||||
| 58 | const struct drm_i915_gem_object_ops *ops, | ||||
| 59 | struct lock_class_key *key, | ||||
| 60 | unsigned alloc_flags); | ||||
| 61 | |||||
| 62 | void __i915_gem_object_fini(struct drm_i915_gem_object *obj); | ||||
| 63 | |||||
| 64 | struct drm_i915_gem_object * | ||||
| 65 | i915_gem_object_create_shmem(struct drm_i915_privateinteldrm_softc *i915, | ||||
| 66 | resource_size_t size); | ||||
| 67 | struct drm_i915_gem_object * | ||||
| 68 | i915_gem_object_create_shmem_from_data(struct drm_i915_privateinteldrm_softc *i915, | ||||
| 69 | const void *data, resource_size_t size); | ||||
| 70 | struct drm_i915_gem_object * | ||||
| 71 | __i915_gem_object_create_user(struct drm_i915_privateinteldrm_softc *i915, u64 size, | ||||
| 72 | struct intel_memory_region **placements, | ||||
| 73 | unsigned int n_placements); | ||||
| 74 | |||||
| 75 | extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; | ||||
| 76 | |||||
| 77 | void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, | ||||
| 78 | struct sg_table *pages, | ||||
| 79 | bool_Bool needs_clflush); | ||||
| 80 | |||||
| 81 | int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, | ||||
| 82 | const struct drm_i915_gem_pwrite *args); | ||||
| 83 | int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, | ||||
| 84 | const struct drm_i915_gem_pread *args); | ||||
| 85 | |||||
| 86 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); | ||||
| 87 | void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, | ||||
| 88 | struct sg_table *pages); | ||||
| 89 | void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, | ||||
| 90 | struct sg_table *pages); | ||||
| 91 | |||||
| 92 | void i915_gem_flush_free_objects(struct drm_i915_privateinteldrm_softc *i915); | ||||
| 93 | |||||
| 94 | struct sg_table * | ||||
| 95 | __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); | ||||
| 96 | |||||
| 97 | /** | ||||
| 98 | * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle | ||||
| 99 | * @filp: DRM file private date | ||||
| 100 | * @handle: userspace handle | ||||
| 101 | * | ||||
| 102 | * Returns: | ||||
| 103 | * | ||||
| 104 | * A pointer to the object named by the handle if such exists on @filp, NULL | ||||
| 105 | * otherwise. This object is only valid whilst under the RCU read lock, and | ||||
| 106 | * note carefully the object may be in the process of being destroyed. | ||||
| 107 | */ | ||||
| 108 | static inline struct drm_i915_gem_object * | ||||
| 109 | i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) | ||||
| 110 | { | ||||
| 111 | #ifdef CONFIG_LOCKDEP | ||||
| 112 | WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map))({ int __ret = !!(debug_locks && !lock_is_held(&rcu_lock_map )); if (__ret) printf("WARNING %s failed at %s:%d\n", "debug_locks && !lock_is_held(&rcu_lock_map)" , "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_object.h", 112) ; __builtin_expect(!!(__ret), 0); }); | ||||
| 113 | #endif | ||||
| 114 | return idr_find(&file->object_idr, handle); | ||||
| 115 | } | ||||
| 116 | |||||
| 117 | static inline struct drm_i915_gem_object * | ||||
| 118 | i915_gem_object_get_rcu(struct drm_i915_gem_object *obj) | ||||
| 119 | { | ||||
| 120 | if (obj && !kref_get_unless_zero(&obj->base.refcount)) | ||||
| 121 | obj = NULL((void *)0); | ||||
| 122 | |||||
| 123 | return obj; | ||||
| 124 | } | ||||
| 125 | |||||
| 126 | static inline struct drm_i915_gem_object * | ||||
| 127 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | ||||
| 128 | { | ||||
| 129 | struct drm_i915_gem_object *obj; | ||||
| 130 | |||||
| 131 | rcu_read_lock(); | ||||
| 132 | obj = i915_gem_object_lookup_rcu(file, handle); | ||||
| 133 | obj = i915_gem_object_get_rcu(obj); | ||||
| 134 | rcu_read_unlock(); | ||||
| 135 | |||||
| 136 | return obj; | ||||
| 137 | } | ||||
| 138 | |||||
| 139 | __deprecated | ||||
| 140 | struct drm_gem_object * | ||||
| 141 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | ||||
| 142 | |||||
| 143 | __attribute__((nonnull)) | ||||
| 144 | static inline struct drm_i915_gem_object * | ||||
| 145 | i915_gem_object_get(struct drm_i915_gem_object *obj) | ||||
| 146 | { | ||||
| 147 | drm_gem_object_get(&obj->base); | ||||
| 148 | return obj; | ||||
| 149 | } | ||||
| 150 | |||||
| 151 | __attribute__((nonnull)) | ||||
| 152 | static inline void | ||||
| 153 | i915_gem_object_put(struct drm_i915_gem_object *obj) | ||||
| 154 | { | ||||
| 155 | __drm_gem_object_put(&obj->base); | ||||
| 156 | } | ||||
| 157 | |||||
| 158 | #define assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0) dma_resv_assert_held((obj)->base.resv)do { (void)(&((obj)->base.resv)->lock.base); } while (0) | ||||
| 159 | |||||
| 160 | /* | ||||
| 161 | * If more than one potential simultaneous locker, assert held. | ||||
| 162 | */ | ||||
| 163 | static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj) | ||||
| 164 | { | ||||
| 165 | /* | ||||
| 166 | * Note mm list lookup is protected by | ||||
| 167 | * kref_get_unless_zero(). | ||||
| 168 | */ | ||||
| 169 | if (IS_ENABLED(CONFIG_LOCKDEP)0 && | ||||
| 170 | kref_read(&obj->base.refcount) > 0) | ||||
| 171 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); | ||||
| 172 | } | ||||
| 173 | |||||
| 174 | static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
| 175 | struct i915_gem_ww_ctx *ww, | ||||
| 176 | bool_Bool intr) | ||||
| 177 | { | ||||
| 178 | int ret; | ||||
| 179 | |||||
| 180 | if (intr
| ||||
| 181 | ret = dma_resv_lock_interruptible(obj->base.resv, ww
| ||||
| 182 | else | ||||
| 183 | ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL((void *)0)); | ||||
| 184 | |||||
| 185 | if (!ret && ww) { | ||||
| 186 | i915_gem_object_get(obj); | ||||
| 187 | list_add_tail(&obj->obj_link, &ww->obj_list); | ||||
| 188 | } | ||||
| 189 | if (ret == -EALREADY37) | ||||
| 190 | ret = 0; | ||||
| 191 | |||||
| 192 | if (ret == -EDEADLK11) { | ||||
| 193 | i915_gem_object_get(obj); | ||||
| 194 | ww->contended = obj; | ||||
| |||||
| 195 | } | ||||
| 196 | |||||
| 197 | return ret; | ||||
| 198 | } | ||||
| 199 | |||||
| 200 | static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, | ||||
| 201 | struct i915_gem_ww_ctx *ww) | ||||
| 202 | { | ||||
| 203 | return __i915_gem_object_lock(obj, ww, ww && ww->intr); | ||||
| 204 | } | ||||
| 205 | |||||
| 206 | static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, | ||||
| 207 | struct i915_gem_ww_ctx *ww) | ||||
| 208 | { | ||||
| 209 | WARN_ON(ww && !ww->intr)({ int __ret = !!(ww && !ww->intr); if (__ret) printf ("WARNING %s failed at %s:%d\n", "ww && !ww->intr" , "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_object.h", 209) ; __builtin_expect(!!(__ret), 0); }); | ||||
| 210 | return __i915_gem_object_lock(obj, ww, true1); | ||||
| 211 | } | ||||
| 212 | |||||
| 213 | static inline bool_Bool i915_gem_object_trylock(struct drm_i915_gem_object *obj, | ||||
| 214 | struct i915_gem_ww_ctx *ww) | ||||
| 215 | { | ||||
| 216 | if (!ww) | ||||
| 217 | return dma_resv_trylock(obj->base.resv); | ||||
| 218 | else | ||||
| 219 | return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx); | ||||
| 220 | } | ||||
| 221 | |||||
| 222 | static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) | ||||
| 223 | { | ||||
| 224 | if (obj->ops->adjust_lru) | ||||
| 225 | obj->ops->adjust_lru(obj); | ||||
| 226 | |||||
| 227 | dma_resv_unlock(obj->base.resv); | ||||
| 228 | } | ||||
| 229 | |||||
| 230 | static inline void | ||||
| 231 | i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) | ||||
| 232 | { | ||||
| 233 | obj->flags |= I915_BO_READONLY(1UL << (8)); | ||||
| 234 | } | ||||
| 235 | |||||
| 236 | static inline bool_Bool | ||||
| 237 | i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) | ||||
| 238 | { | ||||
| 239 | return obj->flags & I915_BO_READONLY(1UL << (8)); | ||||
| 240 | } | ||||
| 241 | |||||
| 242 | static inline bool_Bool | ||||
| 243 | i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) | ||||
| 244 | { | ||||
| 245 | return obj->flags & I915_BO_ALLOC_CONTIGUOUS(1UL << (0)); | ||||
| 246 | } | ||||
| 247 | |||||
| 248 | static inline bool_Bool | ||||
| 249 | i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) | ||||
| 250 | { | ||||
| 251 | return obj->flags & I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
| 252 | } | ||||
| 253 | |||||
| 254 | static inline void | ||||
| 255 | i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) | ||||
| 256 | { | ||||
| 257 | obj->flags |= I915_BO_ALLOC_VOLATILE(1UL << (1)); | ||||
| 258 | } | ||||
| 259 | |||||
| 260 | static inline bool_Bool | ||||
| 261 | i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) | ||||
| 262 | { | ||||
| 263 | return test_bit(I915_TILING_QUIRK_BIT9, &obj->flags); | ||||
| 264 | } | ||||
| 265 | |||||
| 266 | static inline void | ||||
| 267 | i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) | ||||
| 268 | { | ||||
| 269 | set_bit(I915_TILING_QUIRK_BIT9, &obj->flags); | ||||
| 270 | } | ||||
| 271 | |||||
| 272 | static inline void | ||||
| 273 | i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) | ||||
| 274 | { | ||||
| 275 | clear_bit(I915_TILING_QUIRK_BIT9, &obj->flags); | ||||
| 276 | } | ||||
| 277 | |||||
| 278 | static inline bool_Bool | ||||
| 279 | i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) | ||||
| 280 | { | ||||
| 281 | return obj->flags & I915_BO_PROTECTED(1UL << (10)); | ||||
| 282 | } | ||||
| 283 | |||||
| 284 | static inline bool_Bool | ||||
| 285 | i915_gem_object_type_has(const struct drm_i915_gem_object *obj, | ||||
| 286 | unsigned long flags) | ||||
| 287 | { | ||||
| 288 | return obj->ops->flags & flags; | ||||
| 289 | } | ||||
| 290 | |||||
| 291 | bool_Bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj); | ||||
| 292 | |||||
| 293 | bool_Bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj); | ||||
| 294 | |||||
| 295 | static inline bool_Bool | ||||
| 296 | i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) | ||||
| 297 | { | ||||
| 298 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE(1UL << (1))); | ||||
| 299 | } | ||||
| 300 | |||||
| 301 | static inline bool_Bool | ||||
| 302 | i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) | ||||
| 303 | { | ||||
| 304 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST(1UL << (2))); | ||||
| 305 | } | ||||
| 306 | |||||
| 307 | static inline bool_Bool | ||||
| 308 | i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) | ||||
| 309 | { | ||||
| 310 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY(1UL << (3))); | ||||
| 311 | } | ||||
| 312 | |||||
| 313 | static inline bool_Bool | ||||
| 314 | i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) | ||||
| 315 | { | ||||
| 316 | return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP(1UL << (4))); | ||||
| 317 | } | ||||
| 318 | |||||
| 319 | static inline bool_Bool | ||||
| 320 | i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) | ||||
| 321 | { | ||||
| 322 | return READ_ONCE(obj->frontbuffer)({ typeof(obj->frontbuffer) __tmp = *(volatile typeof(obj-> frontbuffer) *)&(obj->frontbuffer); membar_datadep_consumer (); __tmp; }) || obj->is_dpt; | ||||
| 323 | } | ||||
| 324 | |||||
| 325 | static inline unsigned int | ||||
| 326 | i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) | ||||
| 327 | { | ||||
| 328 | return obj->tiling_and_stride & TILING_MASK(128 - 1); | ||||
| 329 | } | ||||
| 330 | |||||
| 331 | static inline bool_Bool | ||||
| 332 | i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) | ||||
| 333 | { | ||||
| 334 | return i915_gem_object_get_tiling(obj) != I915_TILING_NONE0; | ||||
| 335 | } | ||||
| 336 | |||||
| 337 | static inline unsigned int | ||||
| 338 | i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) | ||||
| 339 | { | ||||
| 340 | return obj->tiling_and_stride & STRIDE_MASK(~(128 - 1)); | ||||
| 341 | } | ||||
| 342 | |||||
| 343 | static inline unsigned int | ||||
| 344 | i915_gem_tile_height(unsigned int tiling) | ||||
| 345 | { | ||||
| 346 | GEM_BUG_ON(!tiling)((void)0); | ||||
| 347 | return tiling == I915_TILING_Y2 ? 32 : 8; | ||||
| 348 | } | ||||
| 349 | |||||
| 350 | static inline unsigned int | ||||
| 351 | i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) | ||||
| 352 | { | ||||
| 353 | return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); | ||||
| 354 | } | ||||
| 355 | |||||
| 356 | static inline unsigned int | ||||
| 357 | i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) | ||||
| 358 | { | ||||
| 359 | return (i915_gem_object_get_stride(obj) * | ||||
| 360 | i915_gem_object_get_tile_height(obj)); | ||||
| 361 | } | ||||
| 362 | |||||
| 363 | int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, | ||||
| 364 | unsigned int tiling, unsigned int stride); | ||||
| 365 | |||||
| 366 | struct scatterlist * | ||||
| 367 | __i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | ||||
| 368 | struct i915_gem_object_page_iter *iter, | ||||
| 369 | unsigned int n, | ||||
| 370 | unsigned int *offset, bool_Bool dma); | ||||
| 371 | |||||
| 372 | static inline struct scatterlist * | ||||
| 373 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | ||||
| 374 | unsigned int n, | ||||
| 375 | unsigned int *offset) | ||||
| 376 | { | ||||
| 377 | return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false0); | ||||
| 378 | } | ||||
| 379 | |||||
| 380 | static inline struct scatterlist * | ||||
| 381 | i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, | ||||
| 382 | unsigned int n, | ||||
| 383 | unsigned int *offset) | ||||
| 384 | { | ||||
| 385 | return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true1); | ||||
| 386 | } | ||||
| 387 | |||||
| 388 | struct vm_page * | ||||
| 389 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, | ||||
| 390 | unsigned int n); | ||||
| 391 | |||||
| 392 | struct vm_page * | ||||
| 393 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, | ||||
| 394 | unsigned int n); | ||||
| 395 | |||||
| 396 | dma_addr_t | ||||
| 397 | i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, | ||||
| 398 | unsigned long n, | ||||
| 399 | unsigned int *len); | ||||
| 400 | |||||
| 401 | dma_addr_t | ||||
| 402 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, | ||||
| 403 | unsigned long n); | ||||
| 404 | |||||
| 405 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, | ||||
| 406 | struct sg_table *pages, | ||||
| 407 | unsigned int sg_page_sizes); | ||||
| 408 | |||||
| 409 | int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
| 410 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); | ||||
| 411 | |||||
| 412 | static inline int __must_check | ||||
| 413 | i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
| 414 | { | ||||
| 415 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); | ||||
| 416 | |||||
| 417 | if (atomic_inc_not_zero(&obj->mm.pages_pin_count)atomic_add_unless((&obj->mm.pages_pin_count), 1, 0)) | ||||
| 418 | return 0; | ||||
| 419 | |||||
| 420 | return __i915_gem_object_get_pages(obj); | ||||
| 421 | } | ||||
| 422 | |||||
| 423 | int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj); | ||||
| 424 | |||||
| 425 | static inline bool_Bool | ||||
| 426 | i915_gem_object_has_pages(struct drm_i915_gem_object *obj) | ||||
| 427 | { | ||||
| 428 | return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)({ typeof(obj->mm.pages) __tmp = *(volatile typeof(obj-> mm.pages) *)&(obj->mm.pages); membar_datadep_consumer( ); __tmp; })); | ||||
| 429 | } | ||||
| 430 | |||||
| 431 | static inline void | ||||
| 432 | __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) | ||||
| 433 | { | ||||
| 434 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
| 435 | |||||
| 436 | atomic_inc(&obj->mm.pages_pin_count)__sync_fetch_and_add(&obj->mm.pages_pin_count, 1); | ||||
| 437 | } | ||||
| 438 | |||||
| 439 | static inline bool_Bool | ||||
| 440 | i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) | ||||
| 441 | { | ||||
| 442 | return atomic_read(&obj->mm.pages_pin_count)({ typeof(*(&obj->mm.pages_pin_count)) __tmp = *(volatile typeof(*(&obj->mm.pages_pin_count)) *)&(*(&obj ->mm.pages_pin_count)); membar_datadep_consumer(); __tmp; } ); | ||||
| 443 | } | ||||
| 444 | |||||
| 445 | static inline void | ||||
| 446 | __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
| 447 | { | ||||
| 448 | GEM_BUG_ON(!i915_gem_object_has_pages(obj))((void)0); | ||||
| 449 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0); | ||||
| 450 | |||||
| 451 | atomic_dec(&obj->mm.pages_pin_count)__sync_fetch_and_sub(&obj->mm.pages_pin_count, 1); | ||||
| 452 | } | ||||
| 453 | |||||
| 454 | static inline void | ||||
| 455 | i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) | ||||
| 456 | { | ||||
| 457 | __i915_gem_object_unpin_pages(obj); | ||||
| 458 | } | ||||
| 459 | |||||
| 460 | int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | ||||
| 461 | int i915_gem_object_truncate(struct drm_i915_gem_object *obj); | ||||
| 462 | |||||
| 463 | /** | ||||
| 464 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object | ||||
| 465 | * @obj: the object to map into kernel address space | ||||
| 466 | * @type: the type of mapping, used to select pgprot_t | ||||
| 467 | * | ||||
| 468 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's | ||||
| 469 | * pages and then returns a contiguous mapping of the backing storage into | ||||
| 470 | * the kernel address space. Based on the @type of mapping, the PTE will be | ||||
| 471 | * set to either WriteBack or WriteCombine (via pgprot_t). | ||||
| 472 | * | ||||
| 473 | * The caller is responsible for calling i915_gem_object_unpin_map() when the | ||||
| 474 | * mapping is no longer required. | ||||
| 475 | * | ||||
| 476 | * Returns the pointer through which to access the mapped object, or an | ||||
| 477 | * ERR_PTR() on error. | ||||
| 478 | */ | ||||
| 479 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | ||||
| 480 | enum i915_map_type type); | ||||
| 481 | |||||
| 482 | void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj, | ||||
| 483 | enum i915_map_type type); | ||||
| 484 | |||||
| 485 | void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, | ||||
| 486 | unsigned long offset, | ||||
| 487 | unsigned long size); | ||||
| 488 | static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) | ||||
| 489 | { | ||||
| 490 | __i915_gem_object_flush_map(obj, 0, obj->base.size); | ||||
| 491 | } | ||||
| 492 | |||||
| 493 | /** | ||||
| 494 | * i915_gem_object_unpin_map - releases an earlier mapping | ||||
| 495 | * @obj: the object to unmap | ||||
| 496 | * | ||||
| 497 | * After pinning the object and mapping its pages, once you are finished | ||||
| 498 | * with your access, call i915_gem_object_unpin_map() to release the pin | ||||
| 499 | * upon the mapping. Once the pin count reaches zero, that mapping may be | ||||
| 500 | * removed. | ||||
| 501 | */ | ||||
| 502 | static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) | ||||
| 503 | { | ||||
| 504 | i915_gem_object_unpin_pages(obj); | ||||
| 505 | } | ||||
| 506 | |||||
| 507 | void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); | ||||
| 508 | |||||
| 509 | int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, | ||||
| 510 | unsigned int *needs_clflush); | ||||
| 511 | int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, | ||||
| 512 | unsigned int *needs_clflush); | ||||
| 513 | #define CLFLUSH_BEFORE(1UL << (0)) BIT(0)(1UL << (0)) | ||||
| 514 | #define CLFLUSH_AFTER(1UL << (1)) BIT(1)(1UL << (1)) | ||||
| 515 | #define CLFLUSH_FLAGS((1UL << (0)) | (1UL << (1))) (CLFLUSH_BEFORE(1UL << (0)) | CLFLUSH_AFTER(1UL << (1))) | ||||
| 516 | |||||
| 517 | static inline void | ||||
| 518 | i915_gem_object_finish_access(struct drm_i915_gem_object *obj) | ||||
| 519 | { | ||||
| 520 | i915_gem_object_unpin_pages(obj); | ||||
| 521 | } | ||||
| 522 | |||||
| 523 | int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, | ||||
| 524 | struct dma_fence **fence); | ||||
| 525 | int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, | ||||
| 526 | bool_Bool intr); | ||||
| 527 | bool_Bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj); | ||||
| 528 | |||||
| 529 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, | ||||
| 530 | unsigned int cache_level); | ||||
| 531 | bool_Bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); | ||||
| 532 | void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); | ||||
| 533 | void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); | ||||
| 534 | bool_Bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); | ||||
| 535 | |||||
| 536 | int __must_check | ||||
| 537 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
| 538 | int __must_check | ||||
| 539 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
| 540 | int __must_check | ||||
| 541 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool_Bool write); | ||||
| 542 | struct i915_vma * __must_check | ||||
| 543 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | ||||
| 544 | struct i915_gem_ww_ctx *ww, | ||||
| 545 | u32 alignment, | ||||
| 546 | const struct i915_gtt_view *view, | ||||
| 547 | unsigned int flags); | ||||
| 548 | |||||
| 549 | void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); | ||||
| 550 | void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); | ||||
| 551 | void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); | ||||
| 552 | void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); | ||||
| 553 | void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); | ||||
| 554 | |||||
| 555 | static inline void __start_cpu_write(struct drm_i915_gem_object *obj) | ||||
| 556 | { | ||||
| 557 | obj->read_domains = I915_GEM_DOMAIN_CPU0x00000001; | ||||
| 558 | obj->write_domain = I915_GEM_DOMAIN_CPU0x00000001; | ||||
| 559 | if (i915_gem_cpu_write_needs_clflush(obj)) | ||||
| 560 | obj->cache_dirty = true1; | ||||
| 561 | } | ||||
| 562 | |||||
| 563 | void i915_gem_fence_wait_priority(struct dma_fence *fence, | ||||
| 564 | const struct i915_sched_attr *attr); | ||||
| 565 | |||||
| 566 | int i915_gem_object_wait(struct drm_i915_gem_object *obj, | ||||
| 567 | unsigned int flags, | ||||
| 568 | long timeout); | ||||
| 569 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | ||||
| 570 | unsigned int flags, | ||||
| 571 | const struct i915_sched_attr *attr); | ||||
| 572 | |||||
| 573 | void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
| 574 | enum fb_op_origin origin); | ||||
| 575 | void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
| 576 | enum fb_op_origin origin); | ||||
| 577 | |||||
| 578 | static inline void | ||||
| 579 | i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, | ||||
| 580 | enum fb_op_origin origin) | ||||
| 581 | { | ||||
| 582 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
| 583 | __i915_gem_object_flush_frontbuffer(obj, origin); | ||||
| 584 | } | ||||
| 585 | |||||
| 586 | static inline void | ||||
| 587 | i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | ||||
| 588 | enum fb_op_origin origin) | ||||
| 589 | { | ||||
| 590 | if (unlikely(rcu_access_pointer(obj->frontbuffer))__builtin_expect(!!((obj->frontbuffer)), 0)) | ||||
| 591 | __i915_gem_object_invalidate_frontbuffer(obj, origin); | ||||
| 592 | } | ||||
| 593 | |||||
| 594 | int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size); | ||||
| 595 | |||||
| 596 | bool_Bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); | ||||
| 597 | |||||
| 598 | void __i915_gem_free_object_rcu(struct rcu_head *head); | ||||
| 599 | |||||
| 600 | void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj); | ||||
| 601 | |||||
| 602 | void __i915_gem_free_object(struct drm_i915_gem_object *obj); | ||||
| 603 | |||||
| 604 | bool_Bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); | ||||
| 605 | |||||
| 606 | bool_Bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); | ||||
| 607 | |||||
| 608 | int i915_gem_object_migrate(struct drm_i915_gem_object *obj, | ||||
| 609 | struct i915_gem_ww_ctx *ww, | ||||
| 610 | enum intel_region_id id); | ||||
| 611 | int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, | ||||
| 612 | struct i915_gem_ww_ctx *ww, | ||||
| 613 | enum intel_region_id id, | ||||
| 614 | unsigned int flags); | ||||
| 615 | |||||
| 616 | bool_Bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, | ||||
| 617 | enum intel_region_id id); | ||||
| 618 | |||||
| 619 | int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, | ||||
| 620 | unsigned int flags); | ||||
| 621 | |||||
| 622 | bool_Bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, | ||||
| 623 | enum intel_memory_type type); | ||||
| 624 | |||||
| 625 | bool_Bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj); | ||||
| 626 | |||||
| 627 | #ifdef __linux__ | ||||
| 628 | int shmem_sg_alloc_table(struct drm_i915_privateinteldrm_softc *i915, struct sg_table *st, | ||||
| 629 | size_t size, struct intel_memory_region *mr, | ||||
| 630 | struct address_space *mapping, | ||||
| 631 | unsigned int max_segment); | ||||
| 632 | void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, | ||||
| 633 | bool_Bool dirty, bool_Bool backup); | ||||
| 634 | #else | ||||
| 635 | int shmem_sg_alloc_table(struct drm_i915_privateinteldrm_softc *i915, struct sg_table *st, | ||||
| 636 | size_t size, struct intel_memory_region *mr, | ||||
| 637 | struct address_space *mapping, | ||||
| 638 | unsigned int max_segment, | ||||
| 639 | struct drm_i915_gem_object *obj); | ||||
| 640 | void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, | ||||
| 641 | bool_Bool dirty, bool_Bool backup, | ||||
| 642 | struct drm_i915_gem_object *obj); | ||||
| 643 | #endif | ||||
| 644 | void __shmem_writeback(size_t size, struct address_space *mapping); | ||||
| 645 | |||||
| 646 | #ifdef CONFIG_MMU_NOTIFIER | ||||
| 647 | static inline bool_Bool | ||||
| 648 | i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) | ||||
| 649 | { | ||||
| 650 | return obj->userptr.notifier.mm; | ||||
| 651 | } | ||||
| 652 | |||||
| 653 | int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); | ||||
| 654 | int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); | ||||
| 655 | int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); | ||||
| 656 | #else | ||||
| 657 | static inline bool_Bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false0; } | ||||
| 658 | |||||
| 659 | static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1)((void)0); return -ENODEV19; } | ||||
| 660 | static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1)((void)0); return -ENODEV19; } | ||||
| 661 | static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1)((void)0); return -ENODEV19; } | ||||
| 662 | |||||
| 663 | #endif | ||||
| 664 | |||||
| 665 | #endif |