| File: | dev/pci/drm/i915/display/intel_dpt.c |
| Warning: | line 244, column 42 Access to field 'dev' results in a dereference of a null pointer (loaded from variable 'obj') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT | |||
| 2 | /* | |||
| 3 | * Copyright © 2021 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #include "gem/i915_gem_domain.h" | |||
| 7 | #include "gem/i915_gem_internal.h" | |||
| 8 | #include "gt/gen8_ppgtt.h" | |||
| 9 | ||||
| 10 | #include "i915_drv.h" | |||
| 11 | #include "intel_display_types.h" | |||
| 12 | #include "intel_dpt.h" | |||
| 13 | #include "intel_fb.h" | |||
| 14 | ||||
| 15 | struct i915_dpt { | |||
| 16 | struct i915_address_space vm; | |||
| 17 | ||||
| 18 | struct drm_i915_gem_object *obj; | |||
| 19 | struct i915_vma *vma; | |||
| 20 | void __iomem *iomem; | |||
| 21 | }; | |||
| 22 | ||||
| 23 | #define i915_is_dpt(vm)((vm)->is_dpt) ((vm)->is_dpt) | |||
| 24 | ||||
| 25 | static inline struct i915_dpt * | |||
| 26 | i915_vm_to_dpt(struct i915_address_space *vm) | |||
| 27 | { | |||
| 28 | BUILD_BUG_ON(offsetof(struct i915_dpt, vm))extern char _ctassert[(!(__builtin_offsetof(struct i915_dpt, vm ))) ? 1 : -1 ] __attribute__((__unused__)); | |||
| 29 | GEM_BUG_ON(!i915_is_dpt(vm))((void)0); | |||
| 30 | return container_of(vm, struct i915_dpt, vm)({ const __typeof( ((struct i915_dpt *)0)->vm ) *__mptr = ( vm); (struct i915_dpt *)( (char *)__mptr - __builtin_offsetof (struct i915_dpt, vm) );}); | |||
| 31 | } | |||
| 32 | ||||
| 33 | #define dpt_total_entries(dpt)((dpt)->vm.total >> 12) ((dpt)->vm.total >> PAGE_SHIFT12) | |||
| 34 | ||||
| 35 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) | |||
| 36 | { | |||
| 37 | writeq(pte, addr)iowrite64(pte, addr); | |||
| 38 | } | |||
| 39 | ||||
| 40 | static void dpt_insert_page(struct i915_address_space *vm, | |||
| 41 | dma_addr_t addr, | |||
| 42 | u64 offset, | |||
| 43 | enum i915_cache_level level, | |||
| 44 | u32 flags) | |||
| 45 | { | |||
| 46 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 47 | gen8_pte_t __iomem *base = dpt->iomem; | |||
| 48 | ||||
| 49 | gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE(1ULL << (12)), | |||
| 50 | vm->pte_encode(addr, level, flags)); | |||
| 51 | } | |||
| 52 | ||||
| 53 | static void dpt_insert_entries(struct i915_address_space *vm, | |||
| 54 | struct i915_vma_resource *vma_res, | |||
| 55 | enum i915_cache_level level, | |||
| 56 | u32 flags) | |||
| 57 | { | |||
| 58 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 59 | gen8_pte_t __iomem *base = dpt->iomem; | |||
| 60 | const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); | |||
| 61 | struct sgt_iter sgt_iter; | |||
| 62 | dma_addr_t addr; | |||
| 63 | int i; | |||
| 64 | ||||
| 65 | /* | |||
| 66 | * Note that we ignore PTE_READ_ONLY here. The caller must be careful | |||
| 67 | * not to allow the user to override access to a read only page. | |||
| 68 | */ | |||
| 69 | ||||
| 70 | i = vma_res->start / I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 71 | for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)for ((sgt_iter) = __sgt_iter((vma_res->bi.pages)->sgl, 1 ); ((addr) = (sgt_iter).dma + (sgt_iter).curr), (sgt_iter).sgp ; (((sgt_iter).curr += ((1ULL << (12)))) >= (sgt_iter ).max) ? (sgt_iter) = __sgt_iter(__sg_next((sgt_iter).sgp), 1 ), 0 : 0) | |||
| 72 | gen8_set_pte(&base[i++], pte_encode | addr); | |||
| 73 | } | |||
| 74 | ||||
| 75 | static void dpt_clear_range(struct i915_address_space *vm, | |||
| 76 | u64 start, u64 length) | |||
| 77 | { | |||
| 78 | } | |||
| 79 | ||||
| 80 | static void dpt_bind_vma(struct i915_address_space *vm, | |||
| 81 | struct i915_vm_pt_stash *stash, | |||
| 82 | struct i915_vma_resource *vma_res, | |||
| 83 | enum i915_cache_level cache_level, | |||
| 84 | u32 flags) | |||
| 85 | { | |||
| 86 | u32 pte_flags; | |||
| 87 | ||||
| 88 | if (vma_res->bound_flags) | |||
| 89 | return; | |||
| 90 | ||||
| 91 | /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ | |||
| 92 | pte_flags = 0; | |||
| 93 | if (vm->has_read_only && vma_res->bi.readonly) | |||
| 94 | pte_flags |= PTE_READ_ONLY(1UL << (0)); | |||
| 95 | if (vma_res->bi.lmem) | |||
| 96 | pte_flags |= PTE_LM(1UL << (1)); | |||
| 97 | ||||
| 98 | vm->insert_entries(vm, vma_res, cache_level, pte_flags); | |||
| 99 | ||||
| 100 | vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 101 | ||||
| 102 | /* | |||
| 103 | * Without aliasing PPGTT there's no difference between | |||
| 104 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally | |||
| 105 | * upgrade to both bound if we bind either to avoid double-binding. | |||
| 106 | */ | |||
| 107 | vma_res->bound_flags = I915_VMA_GLOBAL_BIND((int)(1UL << (10))) | I915_VMA_LOCAL_BIND((int)(1UL << (11))); | |||
| 108 | } | |||
| 109 | ||||
| 110 | static void dpt_unbind_vma(struct i915_address_space *vm, | |||
| 111 | struct i915_vma_resource *vma_res) | |||
| 112 | { | |||
| 113 | vm->clear_range(vm, vma_res->start, vma_res->vma_size); | |||
| 114 | } | |||
| 115 | ||||
| 116 | static void dpt_cleanup(struct i915_address_space *vm) | |||
| 117 | { | |||
| 118 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 119 | ||||
| 120 | i915_gem_object_put(dpt->obj); | |||
| 121 | } | |||
| 122 | ||||
| 123 | struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) | |||
| 124 | { | |||
| 125 | struct drm_i915_privateinteldrm_softc *i915 = vm->i915; | |||
| 126 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 127 | intel_wakeref_t wakeref; | |||
| 128 | struct i915_vma *vma; | |||
| 129 | void __iomem *iomem; | |||
| 130 | struct i915_gem_ww_ctx ww; | |||
| 131 | u64 pin_flags = 0; | |||
| 132 | int err; | |||
| 133 | ||||
| 134 | if (i915_gem_object_is_stolen(dpt->obj)) | |||
| 135 | pin_flags |= PIN_MAPPABLE(1ULL << (3)); | |||
| 136 | ||||
| 137 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); | |||
| 138 | atomic_inc(&i915->gpu_error.pending_fb_pin)__sync_fetch_and_add(&i915->gpu_error.pending_fb_pin, 1 ); | |||
| 139 | ||||
| 140 | for_i915_gem_ww(&ww, err, true)for (i915_gem_ww_ctx_init(&ww, 1), (err) = -11; (err) == - 11; (err) = __i915_gem_ww_fini(&ww, err)) { | |||
| 141 | err = i915_gem_object_lock(dpt->obj, &ww); | |||
| 142 | if (err) | |||
| 143 | continue; | |||
| 144 | ||||
| 145 | vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL((void *)0), 0, 4096, | |||
| 146 | pin_flags); | |||
| 147 | if (IS_ERR(vma)) { | |||
| 148 | err = PTR_ERR(vma); | |||
| 149 | continue; | |||
| 150 | } | |||
| 151 | ||||
| 152 | iomem = i915_vma_pin_iomap(vma); | |||
| 153 | i915_vma_unpin(vma); | |||
| 154 | ||||
| 155 | if (IS_ERR(iomem)) { | |||
| 156 | err = PTR_ERR(iomem); | |||
| 157 | continue; | |||
| 158 | } | |||
| 159 | ||||
| 160 | dpt->vma = vma; | |||
| 161 | dpt->iomem = iomem; | |||
| 162 | ||||
| 163 | i915_vma_get(vma); | |||
| 164 | } | |||
| 165 | ||||
| 166 | dpt->obj->mm.dirty = true1; | |||
| 167 | ||||
| 168 | atomic_dec(&i915->gpu_error.pending_fb_pin)__sync_fetch_and_sub(&i915->gpu_error.pending_fb_pin, 1 ); | |||
| 169 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | |||
| 170 | ||||
| 171 | return err ? ERR_PTR(err) : vma; | |||
| 172 | } | |||
| 173 | ||||
| 174 | void intel_dpt_unpin(struct i915_address_space *vm) | |||
| 175 | { | |||
| 176 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 177 | ||||
| 178 | i915_vma_unpin_iomap(dpt->vma); | |||
| 179 | i915_vma_put(dpt->vma); | |||
| 180 | } | |||
| 181 | ||||
| 182 | /** | |||
| 183 | * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume | |||
| 184 | * @i915: device instance | |||
| 185 | * | |||
| 186 | * Restore the memory mapping during system resume for all framebuffers which | |||
| 187 | * are mapped to HW via a GGTT->DPT page table. The content of these page | |||
| 188 | * tables are not stored in the hibernation image during S4 and S3RST->S4 | |||
| 189 | * transitions, so here we reprogram the PTE entries in those tables. | |||
| 190 | * | |||
| 191 | * This function must be called after the mappings in GGTT have been restored calling | |||
| 192 | * i915_ggtt_resume(). | |||
| 193 | */ | |||
| 194 | void intel_dpt_resume(struct drm_i915_privateinteldrm_softc *i915) | |||
| 195 | { | |||
| 196 | struct drm_framebuffer *drm_fb; | |||
| 197 | ||||
| 198 | if (!HAS_DISPLAY(i915)((&(i915)->__runtime)->pipe_mask != 0)) | |||
| 199 | return; | |||
| 200 | ||||
| 201 | mutex_lock(&i915->drm.mode_config.fb_lock)rw_enter_write(&i915->drm.mode_config.fb_lock); | |||
| 202 | drm_for_each_fb(drm_fb, &i915->drm)for (({ int __ret = !!(!(rw_status(&(&i915->drm)-> mode_config.fb_lock) != 0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(rw_status(&(&i915->drm)->mode_config.fb_lock) != 0)" , "/usr/src/sys/dev/pci/drm/i915/display/intel_dpt.c", 202); __builtin_expect (!!(__ret), 0); }), drm_fb = ({ const __typeof( ((struct drm_framebuffer *)0)->head ) *__mptr = ((&(&i915->drm)->mode_config .fb_list)->next); (struct drm_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct drm_framebuffer, head) );}); & drm_fb->head != (&(&i915->drm)->mode_config. fb_list); drm_fb = ({ const __typeof( ((typeof(*(drm_fb)) *)0 )->head ) *__mptr = (((drm_fb)->head.next)); (typeof(*( drm_fb)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(drm_fb )), head) );})) { | |||
| 203 | struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb)({ const __typeof( ((struct intel_framebuffer *)0)->base ) *__mptr = (drm_fb); (struct intel_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct intel_framebuffer, base) );}); | |||
| 204 | ||||
| 205 | if (fb->dpt_vm) | |||
| 206 | i915_ggtt_resume_vm(fb->dpt_vm); | |||
| 207 | } | |||
| 208 | mutex_unlock(&i915->drm.mode_config.fb_lock)rw_exit_write(&i915->drm.mode_config.fb_lock); | |||
| 209 | } | |||
| 210 | ||||
| 211 | /** | |||
| 212 | * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend | |||
| 213 | * @i915: device instance | |||
| 214 | * | |||
| 215 | * Suspend the memory mapping during system suspend for all framebuffers which | |||
| 216 | * are mapped to HW via a GGTT->DPT page table. | |||
| 217 | * | |||
| 218 | * This function must be called before the mappings in GGTT are suspended calling | |||
| 219 | * i915_ggtt_suspend(). | |||
| 220 | */ | |||
| 221 | void intel_dpt_suspend(struct drm_i915_privateinteldrm_softc *i915) | |||
| 222 | { | |||
| 223 | struct drm_framebuffer *drm_fb; | |||
| 224 | ||||
| 225 | if (!HAS_DISPLAY(i915)((&(i915)->__runtime)->pipe_mask != 0)) | |||
| 226 | return; | |||
| 227 | ||||
| 228 | mutex_lock(&i915->drm.mode_config.fb_lock)rw_enter_write(&i915->drm.mode_config.fb_lock); | |||
| 229 | ||||
| 230 | drm_for_each_fb(drm_fb, &i915->drm)for (({ int __ret = !!(!(rw_status(&(&i915->drm)-> mode_config.fb_lock) != 0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(rw_status(&(&i915->drm)->mode_config.fb_lock) != 0)" , "/usr/src/sys/dev/pci/drm/i915/display/intel_dpt.c", 230); __builtin_expect (!!(__ret), 0); }), drm_fb = ({ const __typeof( ((struct drm_framebuffer *)0)->head ) *__mptr = ((&(&i915->drm)->mode_config .fb_list)->next); (struct drm_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct drm_framebuffer, head) );}); & drm_fb->head != (&(&i915->drm)->mode_config. fb_list); drm_fb = ({ const __typeof( ((typeof(*(drm_fb)) *)0 )->head ) *__mptr = (((drm_fb)->head.next)); (typeof(*( drm_fb)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(drm_fb )), head) );})) { | |||
| 231 | struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb)({ const __typeof( ((struct intel_framebuffer *)0)->base ) *__mptr = (drm_fb); (struct intel_framebuffer *)( (char *)__mptr - __builtin_offsetof(struct intel_framebuffer, base) );}); | |||
| 232 | ||||
| 233 | if (fb->dpt_vm) | |||
| 234 | i915_ggtt_suspend_vm(fb->dpt_vm); | |||
| 235 | } | |||
| 236 | ||||
| 237 | mutex_unlock(&i915->drm.mode_config.fb_lock)rw_exit_write(&i915->drm.mode_config.fb_lock); | |||
| 238 | } | |||
| 239 | ||||
| 240 | struct i915_address_space * | |||
| 241 | intel_dpt_create(struct intel_framebuffer *fb) | |||
| 242 | { | |||
| 243 | struct drm_gem_object *obj = &intel_fb_obj(&fb->base)((&fb->base) ? to_intel_bo((&fb->base)->obj[ 0]) : ((void *)0))->base; | |||
| ||||
| 244 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->dev); | |||
| ||||
| 245 | struct drm_i915_gem_object *dpt_obj; | |||
| 246 | struct i915_address_space *vm; | |||
| 247 | struct i915_dpt *dpt; | |||
| 248 | size_t size; | |||
| 249 | int ret; | |||
| 250 | ||||
| 251 | if (intel_fb_needs_pot_stride_remap(fb)) | |||
| 252 | size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); | |||
| 253 | else | |||
| 254 | size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE)(((obj->size) + (((1ULL << (12))) - 1)) / ((1ULL << (12)))); | |||
| 255 | ||||
| 256 | size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE)((((size * sizeof(gen8_pte_t)) + (((1ULL << (12))) - 1) ) / ((1ULL << (12)))) * ((1ULL << (12)))); | |||
| 257 | ||||
| 258 | dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS(1UL << (0))); | |||
| 259 | if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt)) | |||
| 260 | dpt_obj = i915_gem_object_create_stolen(i915, size); | |||
| 261 | if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)((&(i915)->__runtime)->memory_regions & ((1UL << (INTEL_REGION_LMEM_0))))) { | |||
| 262 | drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Allocating dpt from smem\n" ); | |||
| 263 | dpt_obj = i915_gem_object_create_shmem(i915, size); | |||
| 264 | } | |||
| 265 | if (IS_ERR(dpt_obj)) | |||
| 266 | return ERR_CAST(dpt_obj); | |||
| 267 | ||||
| 268 | ret = i915_gem_object_lock_interruptible(dpt_obj, NULL((void *)0)); | |||
| 269 | if (!ret) { | |||
| 270 | ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); | |||
| 271 | i915_gem_object_unlock(dpt_obj); | |||
| 272 | } | |||
| 273 | if (ret) { | |||
| 274 | i915_gem_object_put(dpt_obj); | |||
| 275 | return ERR_PTR(ret); | |||
| 276 | } | |||
| 277 | ||||
| 278 | dpt = kzalloc(sizeof(*dpt), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 279 | if (!dpt) { | |||
| 280 | i915_gem_object_put(dpt_obj); | |||
| 281 | return ERR_PTR(-ENOMEM12); | |||
| 282 | } | |||
| 283 | ||||
| 284 | vm = &dpt->vm; | |||
| 285 | ||||
| 286 | vm->gt = to_gt(i915); | |||
| 287 | vm->i915 = i915; | |||
| 288 | vm->dma = i915->drm.dev; | |||
| 289 | vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE(1ULL << (12)); | |||
| 290 | vm->is_dpt = true1; | |||
| 291 | ||||
| 292 | i915_address_space_init(vm, VM_CLASS_DPT2); | |||
| 293 | ||||
| 294 | vm->insert_page = dpt_insert_page; | |||
| 295 | vm->clear_range = dpt_clear_range; | |||
| 296 | vm->insert_entries = dpt_insert_entries; | |||
| 297 | vm->cleanup = dpt_cleanup; | |||
| 298 | ||||
| 299 | vm->vma_ops.bind_vma = dpt_bind_vma; | |||
| 300 | vm->vma_ops.unbind_vma = dpt_unbind_vma; | |||
| 301 | ||||
| 302 | vm->pte_encode = gen8_ggtt_pte_encode; | |||
| 303 | ||||
| 304 | dpt->obj = dpt_obj; | |||
| 305 | dpt->obj->is_dpt = true1; | |||
| 306 | ||||
| 307 | return &dpt->vm; | |||
| 308 | } | |||
| 309 | ||||
| 310 | void intel_dpt_destroy(struct i915_address_space *vm) | |||
| 311 | { | |||
| 312 | struct i915_dpt *dpt = i915_vm_to_dpt(vm); | |||
| 313 | ||||
| 314 | dpt->obj->is_dpt = false0; | |||
| 315 | i915_vm_put(&dpt->vm); | |||
| 316 | } |