| File: | dev/pci/drm/i915/gt/intel_ggtt.c |
| Warning: | line 862, column 18 Value stored to 'pdev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2020 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include <linux/stop_machine.h> |
| 7 | |
| 8 | #include <asm/set_memory.h> |
| 9 | #include <asm/smp.h> |
| 10 | |
| 11 | #include <drm/i915_drm.h> |
| 12 | |
| 13 | #include "intel_gt.h" |
| 14 | #include "i915_drv.h" |
| 15 | #include "i915_scatterlist.h" |
| 16 | #include "i915_vgpu.h" |
| 17 | |
| 18 | #include "intel_gtt.h" |
| 19 | |
| 20 | #include <dev/pci/pcivar.h> |
| 21 | #include <dev/pci/agpvar.h> |
| 22 | |
| 23 | static int |
| 24 | i915_get_ggtt_vma_pages(struct i915_vma *vma); |
| 25 | |
| 26 | static void i915_ggtt_color_adjust(const struct drm_mm_node *node, |
| 27 | unsigned long color, |
| 28 | u64 *start, |
| 29 | u64 *end) |
| 30 | { |
| 31 | if (i915_node_color_differs(node, color)) |
| 32 | *start += I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 33 | |
| 34 | /* |
| 35 | * Also leave a space between the unallocated reserved node after the |
| 36 | * GTT and any objects within the GTT, i.e. we use the color adjustment |
| 37 | * to insert a guard page to prevent prefetches crossing over the |
| 38 | * GTT boundary. |
| 39 | */ |
| 40 | node = list_next_entry(node, node_list)({ const __typeof( ((typeof(*(node)) *)0)->node_list ) *__mptr = (((node)->node_list.next)); (typeof(*(node)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(node)), node_list) );}); |
| 41 | if (node->color != color) |
| 42 | *end -= I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 43 | } |
| 44 | |
| 45 | static int ggtt_init_hw(struct i915_ggtt *ggtt) |
| 46 | { |
| 47 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 48 | int i; |
| 49 | |
| 50 | i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT0); |
| 51 | |
| 52 | ggtt->vm.is_ggtt = true1; |
| 53 | |
| 54 | /* Only VLV supports read-only GGTT mappings */ |
| 55 | ggtt->vm.has_read_only = IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW); |
| 56 | |
| 57 | if (!HAS_LLC(i915)((&(i915)->__info)->has_llc) && !HAS_PPGTT(i915)(((&(i915)->__info)->ppgtt_type) != INTEL_PPGTT_NONE )) |
| 58 | ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; |
| 59 | |
| 60 | if (ggtt->mappable_end) { |
| 61 | #ifdef __linux__ |
| 62 | if (!io_mapping_init_wc(&ggtt->iomap, |
| 63 | ggtt->gmadr.start, |
| 64 | ggtt->mappable_end)) { |
| 65 | ggtt->vm.cleanup(&ggtt->vm); |
| 66 | return -EIO5; |
| 67 | } |
| 68 | |
| 69 | ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, |
| 70 | ggtt->mappable_end); |
| 71 | #else |
| 72 | /* XXX would be a lot nicer to get agp info before now */ |
| 73 | uvm_page_physload(atop(i915->ggtt.gmadr.start)((i915->ggtt.gmadr.start) >> 12), |
| 74 | atop(i915->ggtt.gmadr.start + i915->ggtt.mappable_end)((i915->ggtt.gmadr.start + i915->ggtt.mappable_end) >> 12), |
| 75 | atop(i915->ggtt.gmadr.start)((i915->ggtt.gmadr.start) >> 12), |
| 76 | atop(i915->ggtt.gmadr.start + i915->ggtt.mappable_end)((i915->ggtt.gmadr.start + i915->ggtt.mappable_end) >> 12), |
| 77 | PHYSLOAD_DEVICE0x01); |
| 78 | /* array of vm pages that physload introduced. */ |
| 79 | i915->pgs = PHYS_TO_VM_PAGE(i915->ggtt.gmadr.start); |
| 80 | KASSERT(i915->pgs != NULL)((i915->pgs != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/pci/drm/i915/gt/intel_ggtt.c", 80, "i915->pgs != NULL" )); |
| 81 | /* |
| 82 | * XXX mark all pages write combining so user mmaps get the |
| 83 | * right bits. We really need a proper MI api for doing this, |
| 84 | * but for now this allows us to use PAT where available. |
| 85 | */ |
| 86 | for (i = 0; i < atop(i915->ggtt.mappable_end)((i915->ggtt.mappable_end) >> 12); i++) |
| 87 | atomic_setbits_intx86_atomic_setbits_u32(&(i915->pgs[i].pg_flags), |
| 88 | PG_PMAP_WC0x04000000); |
| 89 | if (agp_init_map(i915->bst, i915->ggtt.gmadr.start, |
| 90 | i915->ggtt.mappable_end, |
| 91 | BUS_SPACE_MAP_LINEAR0x0002 | BUS_SPACE_MAP_PREFETCHABLE0x0008, |
| 92 | &i915->agph)) |
| 93 | panic("can't map aperture"); |
| 94 | #endif |
| 95 | } |
| 96 | |
| 97 | intel_ggtt_init_fences(ggtt); |
| 98 | |
| 99 | return 0; |
| 100 | } |
| 101 | |
| 102 | /** |
| 103 | * i915_ggtt_init_hw - Initialize GGTT hardware |
| 104 | * @i915: i915 device |
| 105 | */ |
| 106 | int i915_ggtt_init_hw(struct drm_i915_privateinteldrm_softc *i915) |
| 107 | { |
| 108 | int ret; |
| 109 | |
| 110 | /* |
| 111 | * Note that we use page colouring to enforce a guard page at the |
| 112 | * end of the address space. This is required as the CS may prefetch |
| 113 | * beyond the end of the batch buffer, across the page boundary, |
| 114 | * and beyond the end of the GTT if we do not provide a guard. |
| 115 | */ |
| 116 | ret = ggtt_init_hw(&i915->ggtt); |
| 117 | if (ret) |
| 118 | return ret; |
| 119 | |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Certain Gen5 chipsets require require idling the GPU before |
| 125 | * unmapping anything from the GTT when VT-d is enabled. |
| 126 | */ |
| 127 | static bool_Bool needs_idle_maps(struct drm_i915_privateinteldrm_softc *i915) |
| 128 | { |
| 129 | /* |
| 130 | * Query intel_iommu to see if we need the workaround. Presumably that |
| 131 | * was loaded first. |
| 132 | */ |
| 133 | return IS_GEN(i915, 5)(0 + (&(i915)->__info)->gen == (5)) && IS_MOBILE(i915)((&(i915)->__info)->is_mobile) && intel_vtd_active(); |
| 134 | } |
| 135 | |
| 136 | void i915_ggtt_suspend(struct i915_ggtt *ggtt) |
| 137 | { |
| 138 | struct i915_vma *vma, *vn; |
| 139 | int open; |
| 140 | |
| 141 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); |
| 142 | |
| 143 | /* Skip rewriting PTE on VMA unbind. */ |
| 144 | open = atomic_xchg(&ggtt->vm.open, 0); |
| 145 | |
| 146 | list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof (*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma) , vm_link) );}), vn = ({ const __typeof( ((__typeof(*vma) *)0 )->vm_link ) *__mptr = (vma->vm_link.next); (__typeof(* vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link ) );}); &vma->vm_link != (&ggtt->vm.bound_list) ; vma = vn, vn = ({ const __typeof( ((__typeof(*vn) *)0)-> vm_link ) *__mptr = (vn->vm_link.next); (__typeof(*vn) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vn), vm_link) ) ;})) { |
| 147 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node))((void)0); |
| 148 | i915_vma_wait_for_bind(vma); |
| 149 | |
| 150 | if (i915_vma_is_pinned(vma)) |
| 151 | continue; |
| 152 | |
| 153 | if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND((int)(1UL << (10))))) { |
| 154 | __i915_vma_evict(vma); |
| 155 | drm_mm_remove_node(&vma->node); |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); |
| 160 | ggtt->invalidate(ggtt); |
| 161 | atomic_set(&ggtt->vm.open, open)({ typeof(*(&ggtt->vm.open)) __tmp = ((open)); *(volatile typeof(*(&ggtt->vm.open)) *)&(*(&ggtt->vm. open)) = __tmp; __tmp; }); |
| 162 | |
| 163 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); |
| 164 | |
| 165 | intel_gt_check_and_clear_faults(ggtt->vm.gt); |
| 166 | } |
| 167 | |
| 168 | void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) |
| 169 | { |
| 170 | struct intel_uncore *uncore = ggtt->vm.gt->uncore; |
| 171 | |
| 172 | spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock); |
| 173 | intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x101008 ) }), (1 << 0)); |
| 174 | intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x101008 ) })); |
| 175 | spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock); |
| 176 | } |
| 177 | |
| 178 | static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt) |
| 179 | { |
| 180 | struct intel_uncore *uncore = ggtt->vm.gt->uncore; |
| 181 | |
| 182 | /* |
| 183 | * Note that as an uncached mmio write, this will flush the |
| 184 | * WCB of the writes into the GGTT before it triggers the invalidate. |
| 185 | */ |
| 186 | intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x101008 ) }), (1 << 0)); |
| 187 | } |
| 188 | |
| 189 | static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) |
| 190 | { |
| 191 | struct intel_uncore *uncore = ggtt->vm.gt->uncore; |
| 192 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 193 | |
| 194 | gen8_ggtt_invalidate(ggtt); |
| 195 | |
| 196 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 12) |
| 197 | intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0xcee8 ) }), (1 << 0)) |
| 198 | GEN12_GUC_TLB_INV_CR_INVALIDATE)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0xcee8 ) }), (1 << 0)); |
| 199 | else |
| 200 | intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x4274 ) }), (1<<0)); |
| 201 | } |
| 202 | |
| 203 | static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) |
| 204 | { |
| 205 | intel_gtt_chipset_flush(); |
| 206 | } |
| 207 | |
| 208 | static u64 gen8_ggtt_pte_encode(dma_addr_t addr, |
| 209 | enum i915_cache_level level, |
| 210 | u32 flags) |
| 211 | { |
| 212 | return addr | _PAGE_PRESENT0x0000000000000001UL; |
| 213 | } |
| 214 | |
| 215 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
| 216 | { |
| 217 | writeq(pte, addr)iowrite64(pte, addr); |
| 218 | } |
| 219 | |
| 220 | static void gen8_ggtt_insert_page(struct i915_address_space *vm, |
| 221 | dma_addr_t addr, |
| 222 | u64 offset, |
| 223 | enum i915_cache_level level, |
| 224 | u32 unused) |
| 225 | { |
| 226 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 227 | gen8_pte_t __iomem *pte = |
| 228 | (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 229 | |
| 230 | gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0)); |
| 231 | |
| 232 | ggtt->invalidate(ggtt); |
| 233 | } |
| 234 | |
| 235 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
| 236 | struct i915_vma *vma, |
| 237 | enum i915_cache_level level, |
| 238 | u32 flags) |
| 239 | { |
| 240 | const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); |
| 241 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 242 | gen8_pte_t __iomem *gte; |
| 243 | gen8_pte_t __iomem *end; |
| 244 | struct sgt_iter iter; |
| 245 | dma_addr_t addr; |
| 246 | |
| 247 | /* |
| 248 | * Note that we ignore PTE_READ_ONLY here. The caller must be careful |
| 249 | * not to allow the user to override access to a read only page. |
| 250 | */ |
| 251 | |
| 252 | gte = (gen8_pte_t __iomem *)ggtt->gsm; |
| 253 | gte += vma->node.start / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 254 | end = gte + vma->node.size / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 255 | |
| 256 | for_each_sgt_daddr(addr, iter, vma->pages)for ((iter) = __sgt_iter((vma->pages)->sgl, 1); ((addr) = (iter).dma + (iter).curr), (iter).sgp; (((iter).curr += (( 1ULL << (12)))) >= (iter).max) ? (iter) = __sgt_iter (__sg_next((iter).sgp), 1), 0 : 0) |
| 257 | gen8_set_pte(gte++, pte_encode | addr); |
| 258 | GEM_BUG_ON(gte > end)((void)0); |
| 259 | |
| 260 | /* Fill the allocated but "unused" space beyond the end of the buffer */ |
| 261 | while (gte < end) |
| 262 | gen8_set_pte(gte++, vm->scratch[0]->encode); |
| 263 | |
| 264 | /* |
| 265 | * We want to flush the TLBs only after we're certain all the PTE |
| 266 | * updates have finished. |
| 267 | */ |
| 268 | ggtt->invalidate(ggtt); |
| 269 | } |
| 270 | |
| 271 | static void gen6_ggtt_insert_page(struct i915_address_space *vm, |
| 272 | dma_addr_t addr, |
| 273 | u64 offset, |
| 274 | enum i915_cache_level level, |
| 275 | u32 flags) |
| 276 | { |
| 277 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 278 | gen6_pte_t __iomem *pte = |
| 279 | (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 280 | |
| 281 | iowrite32(vm->pte_encode(addr, level, flags), pte); |
| 282 | |
| 283 | ggtt->invalidate(ggtt); |
| 284 | } |
| 285 | |
| 286 | /* |
| 287 | * Binds an object into the global gtt with the specified cache level. |
| 288 | * The object will be accessible to the GPU via commands whose operands |
| 289 | * reference offsets within the global GTT as well as accessible by the GPU |
| 290 | * through the GMADR mapped BAR (i915->mm.gtt->gtt). |
| 291 | */ |
| 292 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
| 293 | struct i915_vma *vma, |
| 294 | enum i915_cache_level level, |
| 295 | u32 flags) |
| 296 | { |
| 297 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 298 | gen6_pte_t __iomem *gte; |
| 299 | gen6_pte_t __iomem *end; |
| 300 | struct sgt_iter iter; |
| 301 | dma_addr_t addr; |
| 302 | |
| 303 | gte = (gen6_pte_t __iomem *)ggtt->gsm; |
| 304 | gte += vma->node.start / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 305 | end = gte + vma->node.size / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 306 | |
| 307 | for_each_sgt_daddr(addr, iter, vma->pages)for ((iter) = __sgt_iter((vma->pages)->sgl, 1); ((addr) = (iter).dma + (iter).curr), (iter).sgp; (((iter).curr += (( 1ULL << (12)))) >= (iter).max) ? (iter) = __sgt_iter (__sg_next((iter).sgp), 1), 0 : 0) |
| 308 | iowrite32(vm->pte_encode(addr, level, flags), gte++); |
| 309 | GEM_BUG_ON(gte > end)((void)0); |
| 310 | |
| 311 | /* Fill the allocated but "unused" space beyond the end of the buffer */ |
| 312 | while (gte < end) |
| 313 | iowrite32(vm->scratch[0]->encode, gte++); |
| 314 | |
| 315 | /* |
| 316 | * We want to flush the TLBs only after we're certain all the PTE |
| 317 | * updates have finished. |
| 318 | */ |
| 319 | ggtt->invalidate(ggtt); |
| 320 | } |
| 321 | |
| 322 | static void nop_clear_range(struct i915_address_space *vm, |
| 323 | u64 start, u64 length) |
| 324 | { |
| 325 | } |
| 326 | |
| 327 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
| 328 | u64 start, u64 length) |
| 329 | { |
| 330 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 331 | unsigned int first_entry = start / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 332 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 333 | const gen8_pte_t scratch_pte = vm->scratch[0]->encode; |
| 334 | gen8_pte_t __iomem *gtt_base = |
| 335 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; |
| 336 | const int max_entries = ggtt_total_entries(ggtt)((ggtt)->vm.total >> 12) - first_entry; |
| 337 | int i; |
| 338 | |
| 339 | if (WARN(num_entries > max_entries,({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } ) |
| 340 | "First entry = %d; Num entries = %d (max=%d)\n",({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } ) |
| 341 | first_entry, num_entries, max_entries)({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } )) |
| 342 | num_entries = max_entries; |
| 343 | |
| 344 | for (i = 0; i < num_entries; i++) |
| 345 | gen8_set_pte(>t_base[i], scratch_pte); |
| 346 | } |
| 347 | |
| 348 | static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) |
| 349 | { |
| 350 | /* |
| 351 | * Make sure the internal GAM fifo has been cleared of all GTT |
| 352 | * writes before exiting stop_machine(). This guarantees that |
| 353 | * any aperture accesses waiting to start in another process |
| 354 | * cannot back up behind the GTT writes causing a hang. |
| 355 | * The register can be any arbitrary GAM register. |
| 356 | */ |
| 357 | intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6)((void)__raw_uncore_read32(vm->gt->uncore, ((const i915_reg_t ){ .reg = (0x101008) }))); |
| 358 | } |
| 359 | |
| 360 | struct insert_page { |
| 361 | struct i915_address_space *vm; |
| 362 | dma_addr_t addr; |
| 363 | u64 offset; |
| 364 | enum i915_cache_level level; |
| 365 | }; |
| 366 | |
| 367 | static int bxt_vtd_ggtt_insert_page__cb(void *_arg) |
| 368 | { |
| 369 | struct insert_page *arg = _arg; |
| 370 | |
| 371 | gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); |
| 372 | bxt_vtd_ggtt_wa(arg->vm); |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, |
| 378 | dma_addr_t addr, |
| 379 | u64 offset, |
| 380 | enum i915_cache_level level, |
| 381 | u32 unused) |
| 382 | { |
| 383 | struct insert_page arg = { vm, addr, offset, level }; |
| 384 | |
| 385 | stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL((void *)0)); |
| 386 | } |
| 387 | |
| 388 | struct insert_entries { |
| 389 | struct i915_address_space *vm; |
| 390 | struct i915_vma *vma; |
| 391 | enum i915_cache_level level; |
| 392 | u32 flags; |
| 393 | }; |
| 394 | |
| 395 | static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) |
| 396 | { |
| 397 | struct insert_entries *arg = _arg; |
| 398 | |
| 399 | gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); |
| 400 | bxt_vtd_ggtt_wa(arg->vm); |
| 401 | |
| 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, |
| 406 | struct i915_vma *vma, |
| 407 | enum i915_cache_level level, |
| 408 | u32 flags) |
| 409 | { |
| 410 | struct insert_entries arg = { vm, vma, level, flags }; |
| 411 | |
| 412 | stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL((void *)0)); |
| 413 | } |
| 414 | |
| 415 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
| 416 | u64 start, u64 length) |
| 417 | { |
| 418 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 419 | unsigned int first_entry = start / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 420 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 421 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
| 422 | (gen6_pte_t __iomem *)ggtt->gsm + first_entry; |
| 423 | const int max_entries = ggtt_total_entries(ggtt)((ggtt)->vm.total >> 12) - first_entry; |
| 424 | int i; |
| 425 | |
| 426 | if (WARN(num_entries > max_entries,({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } ) |
| 427 | "First entry = %d; Num entries = %d (max=%d)\n",({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } ) |
| 428 | first_entry, num_entries, max_entries)({ int __ret = !!(num_entries > max_entries); if (__ret) printf ("First entry = %d; Num entries = %d (max=%d)\n", first_entry , num_entries, max_entries); __builtin_expect(!!(__ret), 0); } )) |
| 429 | num_entries = max_entries; |
| 430 | |
| 431 | scratch_pte = vm->scratch[0]->encode; |
| 432 | for (i = 0; i < num_entries; i++) |
| 433 | iowrite32(scratch_pte, >t_base[i]); |
| 434 | } |
| 435 | |
| 436 | static void i915_ggtt_insert_page(struct i915_address_space *vm, |
| 437 | dma_addr_t addr, |
| 438 | u64 offset, |
| 439 | enum i915_cache_level cache_level, |
| 440 | u32 unused) |
| 441 | { |
| 442 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
| 443 | AGP_USER_MEMORY0 : AGP_USER_CACHED_MEMORY0x0004; |
| 444 | |
| 445 | intel_gtt_insert_page(addr, offset >> PAGE_SHIFT12, flags); |
| 446 | } |
| 447 | |
| 448 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
| 449 | struct i915_vma *vma, |
| 450 | enum i915_cache_level cache_level, |
| 451 | u32 unused) |
| 452 | { |
| 453 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
| 454 | AGP_USER_MEMORY0 : AGP_USER_CACHED_MEMORY0x0004; |
| 455 | |
| 456 | intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT12, |
| 457 | flags); |
| 458 | } |
| 459 | |
| 460 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
| 461 | u64 start, u64 length) |
| 462 | { |
| 463 | intel_gtt_clear_range(start >> PAGE_SHIFT12, length >> PAGE_SHIFT12); |
| 464 | } |
| 465 | |
| 466 | static void ggtt_bind_vma(struct i915_address_space *vm, |
| 467 | struct i915_vm_pt_stash *stash, |
| 468 | struct i915_vma *vma, |
| 469 | enum i915_cache_level cache_level, |
| 470 | u32 flags) |
| 471 | { |
| 472 | struct drm_i915_gem_object *obj = vma->obj; |
| 473 | u32 pte_flags; |
| 474 | |
| 475 | if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11)))))) |
| 476 | return; |
| 477 | |
| 478 | /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ |
| 479 | pte_flags = 0; |
| 480 | if (i915_gem_object_is_readonly(obj)) |
| 481 | pte_flags |= PTE_READ_ONLY(1UL << (0)); |
| 482 | |
| 483 | vm->insert_entries(vm, vma, cache_level, pte_flags); |
| 484 | vma->page_sizes.gtt = I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 485 | } |
| 486 | |
| 487 | static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) |
| 488 | { |
| 489 | vm->clear_range(vm, vma->node.start, vma->size); |
| 490 | } |
| 491 | |
| 492 | static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) |
| 493 | { |
| 494 | u64 size; |
| 495 | int ret; |
| 496 | |
| 497 | if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) |
| 498 | return 0; |
| 499 | |
| 500 | GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP)((void)0); |
| 501 | size = ggtt->vm.total - GUC_GGTT_TOP0xFEE00000; |
| 502 | |
| 503 | ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, |
| 504 | GUC_GGTT_TOP0xFEE00000, I915_COLOR_UNEVICTABLE(-1), |
| 505 | PIN_NOEVICT(1ULL << (0))); |
| 506 | if (ret) |
| 507 | drm_dbg(&ggtt->vm.i915->drm,drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Failed to reserve top of GGTT for GuC\n") |
| 508 | "Failed to reserve top of GGTT for GuC\n")drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Failed to reserve top of GGTT for GuC\n"); |
| 509 | |
| 510 | return ret; |
| 511 | } |
| 512 | |
| 513 | static void ggtt_release_guc_top(struct i915_ggtt *ggtt) |
| 514 | { |
| 515 | if (drm_mm_node_allocated(&ggtt->uc_fw)) |
| 516 | drm_mm_remove_node(&ggtt->uc_fw); |
| 517 | } |
| 518 | |
| 519 | static void cleanup_init_ggtt(struct i915_ggtt *ggtt) |
| 520 | { |
| 521 | ggtt_release_guc_top(ggtt); |
| 522 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
| 523 | drm_mm_remove_node(&ggtt->error_capture); |
| 524 | mutex_destroy(&ggtt->error_mutex); |
| 525 | } |
| 526 | |
| 527 | static int init_ggtt(struct i915_ggtt *ggtt) |
| 528 | { |
| 529 | /* |
| 530 | * Let GEM Manage all of the aperture. |
| 531 | * |
| 532 | * However, leave one page at the end still bound to the scratch page. |
| 533 | * There are a number of places where the hardware apparently prefetches |
| 534 | * past the end of the object, and we've seen multiple hangs with the |
| 535 | * GPU head pointer stuck in a batchbuffer bound at the last page of the |
| 536 | * aperture. One page should be enough to keep any prefetching inside |
| 537 | * of the aperture. |
| 538 | */ |
| 539 | unsigned long hole_start, hole_end; |
| 540 | struct drm_mm_node *entry; |
| 541 | int ret; |
| 542 | |
| 543 | /* |
| 544 | * GuC requires all resources that we're sharing with it to be placed in |
| 545 | * non-WOPCM memory. If GuC is not present or not in use we still need a |
| 546 | * small bias as ring wraparound at offset 0 sometimes hangs. No idea |
| 547 | * why. |
| 548 | */ |
| 549 | ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,({ u32 __max_a = ((1ULL << (12))); u32 __max_b = (intel_wopcm_guc_size (&ggtt->vm.i915->wopcm)); __max_a > __max_b ? __max_a : __max_b; }) |
| 550 | intel_wopcm_guc_size(&ggtt->vm.i915->wopcm))({ u32 __max_a = ((1ULL << (12))); u32 __max_b = (intel_wopcm_guc_size (&ggtt->vm.i915->wopcm)); __max_a > __max_b ? __max_a : __max_b; }); |
| 551 | |
| 552 | ret = intel_vgt_balloon(ggtt); |
| 553 | if (ret) |
| 554 | return ret; |
| 555 | |
| 556 | rw_init(&ggtt->error_mutex, "ggtter")_rw_init_flags(&ggtt->error_mutex, "ggtter", 0, ((void *)0)); |
| 557 | if (ggtt->mappable_end) { |
| 558 | /* |
| 559 | * Reserve a mappable slot for our lockless error capture. |
| 560 | * |
| 561 | * We strongly prefer taking address 0x0 in order to protect |
| 562 | * other critical buffers against accidental overwrites, |
| 563 | * as writing to address 0 is a very common mistake. |
| 564 | * |
| 565 | * Since 0 may already be in use by the system (e.g. the BIOS |
| 566 | * framebuffer), we let the reservation fail quietly and hope |
| 567 | * 0 remains reserved always. |
| 568 | * |
| 569 | * If we fail to reserve 0, and then fail to find any space |
| 570 | * for an error-capture, remain silent. We can afford not |
| 571 | * to reserve an error_capture node as we have fallback |
| 572 | * paths, and we trust that 0 will remain reserved. However, |
| 573 | * the only likely reason for failure to insert is a driver |
| 574 | * bug, which we expect to cause other failures... |
| 575 | */ |
| 576 | ggtt->error_capture.size = I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 577 | ggtt->error_capture.color = I915_COLOR_UNEVICTABLE(-1); |
| 578 | if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) |
| 579 | drm_mm_insert_node_in_range(&ggtt->vm.mm, |
| 580 | &ggtt->error_capture, |
| 581 | ggtt->error_capture.size, 0, |
| 582 | ggtt->error_capture.color, |
| 583 | 0, ggtt->mappable_end, |
| 584 | DRM_MM_INSERT_LOW); |
| 585 | } |
| 586 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
| 587 | drm_dbg(&ggtt->vm.i915->drm,drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Reserved GGTT:[%llx, %llx] for use by error capture\n", ggtt ->error_capture.start, ggtt->error_capture.start + ggtt ->error_capture.size) |
| 588 | "Reserved GGTT:[%llx, %llx] for use by error capture\n",drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Reserved GGTT:[%llx, %llx] for use by error capture\n", ggtt ->error_capture.start, ggtt->error_capture.start + ggtt ->error_capture.size) |
| 589 | ggtt->error_capture.start,drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Reserved GGTT:[%llx, %llx] for use by error capture\n", ggtt ->error_capture.start, ggtt->error_capture.start + ggtt ->error_capture.size) |
| 590 | ggtt->error_capture.start + ggtt->error_capture.size)drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "Reserved GGTT:[%llx, %llx] for use by error capture\n", ggtt ->error_capture.start, ggtt->error_capture.start + ggtt ->error_capture.size); |
| 591 | |
| 592 | /* |
| 593 | * The upper portion of the GuC address space has a sizeable hole |
| 594 | * (several MB) that is inaccessible by GuC. Reserve this range within |
| 595 | * GGTT as it can comfortably hold GuC/HuC firmware images. |
| 596 | */ |
| 597 | ret = ggtt_reserve_guc_top(ggtt); |
| 598 | if (ret) |
| 599 | goto err; |
| 600 | |
| 601 | /* Clear any non-preallocated blocks */ |
| 602 | drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end)for (entry = ({ const __typeof( ((typeof(*entry) *)0)->hole_stack ) *__mptr = ((&(&ggtt->vm.mm)->hole_stack)-> next); (typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (typeof(*entry), hole_stack) );}); &entry->hole_stack != &(&ggtt->vm.mm)->hole_stack ? hole_start = drm_mm_hole_node_start (entry), hole_end = hole_start + entry->hole_size, 1 : 0; entry = ({ const __typeof( ((typeof(*(entry)) *)0)->hole_stack ) *__mptr = (((entry)->hole_stack.next)); (typeof(*(entry)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(entry)), hole_stack ) );})) { |
| 603 | drm_dbg(&ggtt->vm.i915->drm,drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end ) |
| 604 | "clearing unused GTT space: [%lx, %lx]\n",drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end ) |
| 605 | hole_start, hole_end)drm_dev_dbg((&ggtt->vm.i915->drm)->dev, DRM_UT_DRIVER , "clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end ); |
| 606 | ggtt->vm.clear_range(&ggtt->vm, hole_start, |
| 607 | hole_end - hole_start); |
| 608 | } |
| 609 | |
| 610 | /* And finally clear the reserved guard page */ |
| 611 | ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE(1 << 12), PAGE_SIZE(1 << 12)); |
| 612 | |
| 613 | return 0; |
| 614 | |
| 615 | err: |
| 616 | cleanup_init_ggtt(ggtt); |
| 617 | return ret; |
| 618 | } |
| 619 | |
| 620 | static void aliasing_gtt_bind_vma(struct i915_address_space *vm, |
| 621 | struct i915_vm_pt_stash *stash, |
| 622 | struct i915_vma *vma, |
| 623 | enum i915_cache_level cache_level, |
| 624 | u32 flags) |
| 625 | { |
| 626 | u32 pte_flags; |
| 627 | |
| 628 | /* Currently applicable only to VLV */ |
| 629 | pte_flags = 0; |
| 630 | if (i915_gem_object_is_readonly(vma->obj)) |
| 631 | pte_flags |= PTE_READ_ONLY(1UL << (0)); |
| 632 | |
| 633 | if (flags & I915_VMA_LOCAL_BIND((int)(1UL << (11)))) |
| 634 | ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, |
| 635 | stash, vma, cache_level, flags); |
| 636 | |
| 637 | if (flags & I915_VMA_GLOBAL_BIND((int)(1UL << (10)))) |
| 638 | vm->insert_entries(vm, vma, cache_level, pte_flags); |
| 639 | } |
| 640 | |
| 641 | static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, |
| 642 | struct i915_vma *vma) |
| 643 | { |
| 644 | if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND((int)(1UL << (10))))) |
| 645 | vm->clear_range(vm, vma->node.start, vma->size); |
| 646 | |
| 647 | if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND((int)(1UL << (11))))) |
| 648 | ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); |
| 649 | } |
| 650 | |
| 651 | static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) |
| 652 | { |
| 653 | struct i915_vm_pt_stash stash = {}; |
| 654 | struct i915_ppgtt *ppgtt; |
| 655 | int err; |
| 656 | |
| 657 | ppgtt = i915_ppgtt_create(ggtt->vm.gt); |
| 658 | if (IS_ERR(ppgtt)) |
| 659 | return PTR_ERR(ppgtt); |
| 660 | |
| 661 | if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)({ __builtin_expect(!!(!!(ppgtt->vm.total < ggtt->vm .total)), 0); })) { |
| 662 | err = -ENODEV19; |
| 663 | goto err_ppgtt; |
| 664 | } |
| 665 | |
| 666 | err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); |
| 667 | if (err) |
| 668 | goto err_ppgtt; |
| 669 | |
| 670 | err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); |
| 671 | if (err) |
| 672 | goto err_stash; |
| 673 | |
| 674 | /* |
| 675 | * Note we only pre-allocate as far as the end of the global |
| 676 | * GTT. On 48b / 4-level page-tables, the difference is very, |
| 677 | * very significant! We have to preallocate as GVT/vgpu does |
| 678 | * not like the page directory disappearing. |
| 679 | */ |
| 680 | ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); |
| 681 | |
| 682 | ggtt->alias = ppgtt; |
| 683 | ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; |
| 684 | |
| 685 | GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma)((void)0); |
| 686 | ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; |
| 687 | |
| 688 | GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma)((void)0); |
| 689 | ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; |
| 690 | |
| 691 | i915_vm_free_pt_stash(&ppgtt->vm, &stash); |
| 692 | return 0; |
| 693 | |
| 694 | err_stash: |
| 695 | i915_vm_free_pt_stash(&ppgtt->vm, &stash); |
| 696 | err_ppgtt: |
| 697 | i915_vm_put(&ppgtt->vm); |
| 698 | return err; |
| 699 | } |
| 700 | |
| 701 | static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) |
| 702 | { |
| 703 | struct i915_ppgtt *ppgtt; |
| 704 | |
| 705 | ppgtt = fetch_and_zero(&ggtt->alias)({ typeof(*&ggtt->alias) __T = *(&ggtt->alias); *(&ggtt->alias) = (typeof(*&ggtt->alias))0; __T ; }); |
| 706 | if (!ppgtt) |
| 707 | return; |
| 708 | |
| 709 | i915_vm_put(&ppgtt->vm); |
| 710 | |
| 711 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
| 712 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; |
| 713 | } |
| 714 | |
| 715 | int i915_init_ggtt(struct drm_i915_privateinteldrm_softc *i915) |
| 716 | { |
| 717 | int ret; |
| 718 | |
| 719 | ret = init_ggtt(&i915->ggtt); |
| 720 | if (ret) |
| 721 | return ret; |
| 722 | |
| 723 | if (INTEL_PPGTT(i915)((&(i915)->__info)->ppgtt_type) == INTEL_PPGTT_ALIASING) { |
| 724 | ret = init_aliasing_ppgtt(&i915->ggtt); |
| 725 | if (ret) |
| 726 | cleanup_init_ggtt(&i915->ggtt); |
| 727 | } |
| 728 | |
| 729 | return 0; |
| 730 | } |
| 731 | |
| 732 | static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) |
| 733 | { |
| 734 | struct i915_vma *vma, *vn; |
| 735 | |
| 736 | atomic_set(&ggtt->vm.open, 0)({ typeof(*(&ggtt->vm.open)) __tmp = ((0)); *(volatile typeof(*(&ggtt->vm.open)) *)&(*(&ggtt->vm. open)) = __tmp; __tmp; }); |
| 737 | |
| 738 | rcu_barrier()__asm volatile("" : : : "memory"); /* flush the RCU'ed__i915_vm_release */ |
| 739 | flush_workqueue(ggtt->vm.i915->wq); |
| 740 | |
| 741 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); |
| 742 | |
| 743 | list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof (*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma) , vm_link) );}), vn = ({ const __typeof( ((__typeof(*vma) *)0 )->vm_link ) *__mptr = (vma->vm_link.next); (__typeof(* vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link ) );}); &vma->vm_link != (&ggtt->vm.bound_list) ; vma = vn, vn = ({ const __typeof( ((__typeof(*vn) *)0)-> vm_link ) *__mptr = (vn->vm_link.next); (__typeof(*vn) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vn), vm_link) ) ;})) |
| 744 | WARN_ON(__i915_vma_unbind(vma))({ int __ret = !!((__i915_vma_unbind(vma))); if (__ret) printf ("%s", "WARN_ON(" "__i915_vma_unbind(vma)" ")"); __builtin_expect (!!(__ret), 0); }); |
| 745 | |
| 746 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
| 747 | drm_mm_remove_node(&ggtt->error_capture); |
| 748 | mutex_destroy(&ggtt->error_mutex); |
| 749 | |
| 750 | ggtt_release_guc_top(ggtt); |
| 751 | intel_vgt_deballoon(ggtt); |
| 752 | |
| 753 | ggtt->vm.cleanup(&ggtt->vm); |
| 754 | |
| 755 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); |
| 756 | i915_address_space_fini(&ggtt->vm); |
| 757 | |
| 758 | #ifdef notyet |
| 759 | arch_phys_wc_del(ggtt->mtrr); |
| 760 | |
| 761 | if (ggtt->iomap.size) |
| 762 | io_mapping_fini(&ggtt->iomap); |
| 763 | #endif |
| 764 | } |
| 765 | |
| 766 | /** |
| 767 | * i915_ggtt_driver_release - Clean up GGTT hardware initialization |
| 768 | * @i915: i915 device |
| 769 | */ |
| 770 | void i915_ggtt_driver_release(struct drm_i915_privateinteldrm_softc *i915) |
| 771 | { |
| 772 | struct i915_ggtt *ggtt = &i915->ggtt; |
| 773 | |
| 774 | fini_aliasing_ppgtt(ggtt); |
| 775 | |
| 776 | intel_ggtt_fini_fences(ggtt); |
| 777 | ggtt_cleanup_hw(ggtt); |
| 778 | } |
| 779 | |
| 780 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
| 781 | { |
| 782 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT8; |
| 783 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK0x3; |
| 784 | return snb_gmch_ctl << 20; |
| 785 | } |
| 786 | |
| 787 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
| 788 | { |
| 789 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT6; |
| 790 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK0x3; |
| 791 | if (bdw_gmch_ctl) |
| 792 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
| 793 | |
| 794 | #ifdef CONFIG_X86_32 |
| 795 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ |
| 796 | if (bdw_gmch_ctl > 4) |
| 797 | bdw_gmch_ctl = 4; |
| 798 | #endif |
| 799 | |
| 800 | return bdw_gmch_ctl << 20; |
| 801 | } |
| 802 | |
| 803 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
| 804 | { |
| 805 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT8; |
| 806 | gmch_ctrl &= SNB_GMCH_GGMS_MASK0x3; |
| 807 | |
| 808 | if (gmch_ctrl) |
| 809 | return 1 << (20 + gmch_ctrl); |
| 810 | |
| 811 | return 0; |
| 812 | } |
| 813 | |
| 814 | #ifdef __linux__ |
| 815 | |
| 816 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
| 817 | { |
| 818 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 819 | struct pci_dev *pdev = i915->drm.pdev; |
| 820 | phys_addr_t phys_addr; |
| 821 | int ret; |
| 822 | |
| 823 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
| 824 | phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; |
| 825 | |
| 826 | /* |
| 827 | * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range |
| 828 | * will be dropped. For WC mappings in general we have 64 byte burst |
| 829 | * writes when the WC buffer is flushed, so we can't use it, but have to |
| 830 | * resort to an uncached mapping. The WC issue is easily caught by the |
| 831 | * readback check when writing GTT PTE entries. |
| 832 | */ |
| 833 | if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((& (i915)->__info)->is_lp)) || INTEL_GEN(i915)((&(i915)->__info)->gen) >= 10) |
| 834 | ggtt->gsm = ioremap(phys_addr, size); |
| 835 | else |
| 836 | ggtt->gsm = ioremap_wc(phys_addr, size); |
| 837 | if (!ggtt->gsm) { |
| 838 | drm_err(&i915->drm, "Failed to map the ggtt page table\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to map the ggtt page table\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 839 | return -ENOMEM12; |
| 840 | } |
| 841 | |
| 842 | ret = setup_scratch_page(&ggtt->vm); |
| 843 | if (ret) { |
| 844 | drm_err(&i915->drm, "Scratch setup failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Scratch setup failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 845 | /* iounmap will also get called at remove, but meh */ |
| 846 | iounmap(ggtt->gsm); |
| 847 | return ret; |
| 848 | } |
| 849 | |
| 850 | ggtt->vm.scratch[0]->encode = |
| 851 | ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0])(__px_dma(__builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct drm_i915_gem_object * ) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[ 0]), const struct drm_i915_gem_object *), ({ struct drm_i915_gem_object * __x = (struct drm_i915_gem_object *)(ggtt->vm.scratch[0 ]); __x; }), __builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct i915_page_table *) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[0]), const struct i915_page_table *), ({ struct i915_page_table * __x = (struct i915_page_table *)(ggtt->vm.scratch[0]); __x ->base; }), __builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct i915_page_directory * ) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[ 0]), const struct i915_page_directory *), ({ struct i915_page_directory * __x = (struct i915_page_directory *)(ggtt->vm.scratch[0 ]); __x->pt.base; }), (void)0))))), |
| 852 | I915_CACHE_NONE, 0); |
| 853 | |
| 854 | return 0; |
| 855 | } |
| 856 | |
| 857 | #else |
| 858 | |
| 859 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
| 860 | { |
| 861 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 862 | struct pci_dev *pdev = i915->drm.pdev; |
Value stored to 'pdev' during its initialization is never read | |
| 863 | phys_addr_t phys_addr; |
| 864 | bus_addr_t addr; |
| 865 | bus_size_t len; |
| 866 | pcireg_t type; |
| 867 | int flags; |
| 868 | int ret; |
| 869 | |
| 870 | /* For Modern GENs the PTEs and register space are split in the BAR */ |
| 871 | type = pci_mapreg_type(i915->pc, i915->tag, 0x10); |
| 872 | ret = -pci_mapreg_info(i915->pc, i915->tag, 0x10, type, |
| 873 | &addr, &len, NULL((void *)0)); |
| 874 | if (ret) |
| 875 | return ret; |
| 876 | |
| 877 | /* |
| 878 | * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range |
| 879 | * will be dropped. For WC mappings in general we have 64 byte burst |
| 880 | * writes when the WC buffer is flushed, so we can't use it, but have to |
| 881 | * resort to an uncached mapping. The WC issue is easily caught by the |
| 882 | * readback check when writing GTT PTE entries. |
| 883 | */ |
| 884 | if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((& (i915)->__info)->is_lp)) || INTEL_GEN(i915)((&(i915)->__info)->gen) >= 10) |
| 885 | flags = 0; |
| 886 | else |
| 887 | flags = BUS_SPACE_MAP_PREFETCHABLE0x0008; |
| 888 | ret = -bus_space_map(i915->bst, addr + len / 2, size, |
| 889 | flags | BUS_SPACE_MAP_LINEAR0x0002, &ggtt->gsm_bsh); |
| 890 | if (ret) { |
| 891 | drm_err(&i915->drm, "Failed to map the ggtt page table\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to map the ggtt page table\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 892 | return ret; |
| 893 | } |
| 894 | ggtt->gsm = bus_space_vaddr(i915->bst, ggtt->gsm_bsh)((i915->bst)->vaddr((ggtt->gsm_bsh))); |
| 895 | ggtt->gsm_size = size; |
| 896 | if (!ggtt->gsm) { |
| 897 | DRM_ERROR("Failed to map the ggtt page table\n")__drm_err("Failed to map the ggtt page table\n"); |
| 898 | return -ENOMEM12; |
| 899 | } |
| 900 | |
| 901 | ret = setup_scratch_page(&ggtt->vm); |
| 902 | if (ret) { |
| 903 | drm_err(&i915->drm, "Scratch setup failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Scratch setup failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 904 | /* iounmap will also get called at remove, but meh */ |
| 905 | bus_space_unmap(i915->bst, ggtt->gsm_bsh, size); |
| 906 | return ret; |
| 907 | } |
| 908 | |
| 909 | ggtt->vm.scratch[0]->encode = |
| 910 | ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0])(__px_dma(__builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct drm_i915_gem_object * ) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[ 0]), const struct drm_i915_gem_object *), ({ struct drm_i915_gem_object * __x = (struct drm_i915_gem_object *)(ggtt->vm.scratch[0 ]); __x; }), __builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct i915_page_table *) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[0]), const struct i915_page_table *), ({ struct i915_page_table * __x = (struct i915_page_table *)(ggtt->vm.scratch[0]); __x ->base; }), __builtin_choose_expr( __builtin_types_compatible_p (typeof(ggtt->vm.scratch[0]), struct i915_page_directory * ) || __builtin_types_compatible_p(typeof(ggtt->vm.scratch[ 0]), const struct i915_page_directory *), ({ struct i915_page_directory * __x = (struct i915_page_directory *)(ggtt->vm.scratch[0 ]); __x->pt.base; }), (void)0))))), |
| 911 | I915_CACHE_NONE, 0); |
| 912 | |
| 913 | return 0; |
| 914 | } |
| 915 | |
| 916 | #endif |
| 917 | |
| 918 | int ggtt_set_pages(struct i915_vma *vma) |
| 919 | { |
| 920 | int ret; |
| 921 | |
| 922 | GEM_BUG_ON(vma->pages)((void)0); |
| 923 | |
| 924 | ret = i915_get_ggtt_vma_pages(vma); |
| 925 | if (ret) |
| 926 | return ret; |
| 927 | |
| 928 | vma->page_sizes = vma->obj->mm.page_sizes; |
| 929 | |
| 930 | return 0; |
| 931 | } |
| 932 | |
| 933 | static void gen6_gmch_remove(struct i915_address_space *vm) |
| 934 | { |
| 935 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
| 936 | |
| 937 | #ifdef __linux__ |
| 938 | iounmap(ggtt->gsm); |
| 939 | #else |
| 940 | bus_space_unmap(vm->i915->bst, ggtt->gsm_bsh, ggtt->gsm_size); |
| 941 | #endif |
| 942 | free_scratch(vm); |
| 943 | } |
| 944 | |
| 945 | #ifdef __linux__ |
| 946 | static struct resource pci_resource(struct pci_dev *pdev, int bar) |
| 947 | { |
| 948 | return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),{ .start = (pci_resource_start(pdev, bar)), .end = (pci_resource_start (pdev, bar)) + (pci_resource_len(pdev, bar)) - 1, } |
| 949 | pci_resource_len(pdev, bar)){ .start = (pci_resource_start(pdev, bar)), .end = (pci_resource_start (pdev, bar)) + (pci_resource_len(pdev, bar)) - 1, }; |
| 950 | } |
| 951 | #endif |
| 952 | |
| 953 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) |
| 954 | { |
| 955 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 956 | struct pci_dev *pdev = i915->drm.pdev; |
| 957 | unsigned int size; |
| 958 | u16 snb_gmch_ctl; |
| 959 | |
| 960 | /* TODO: We're not aware of mappable constraints on gen8 yet */ |
| 961 | if (!IS_DGFX(i915)((&(i915)->__info)->is_dgfx)) { |
| 962 | #ifdef __linux__ |
| 963 | ggtt->gmadr = pci_resource(pdev, 2); |
| 964 | ggtt->mappable_end = resource_size(&ggtt->gmadr); |
| 965 | #else |
| 966 | bus_addr_t base; |
| 967 | bus_size_t sz; |
| 968 | pcireg_t type; |
| 969 | int err; |
| 970 | |
| 971 | type = pci_mapreg_type(i915->pc, i915->tag, 0x18); |
| 972 | err = -pci_mapreg_info(i915->pc, i915->tag, 0x18, type, |
| 973 | &base, &sz, NULL((void *)0)); |
| 974 | if (err) |
| 975 | return err; |
| 976 | ggtt->gmadr.start = base; |
| 977 | ggtt->mappable_end = sz; |
| 978 | #endif |
| 979 | } |
| 980 | |
| 981 | pci_read_config_word(pdev, SNB_GMCH_CTRL0x50, &snb_gmch_ctl); |
| 982 | if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) |
| 983 | size = chv_get_total_gtt_size(snb_gmch_ctl); |
| 984 | else |
| 985 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
| 986 | |
| 987 | ggtt->vm.alloc_pt_dma = alloc_pt_dma; |
| 988 | |
| 989 | ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 990 | ggtt->vm.cleanup = gen6_gmch_remove; |
| 991 | ggtt->vm.insert_page = gen8_ggtt_insert_page; |
| 992 | ggtt->vm.clear_range = nop_clear_range; |
| 993 | if (intel_scanout_needs_vtd_wa(i915)) |
| 994 | ggtt->vm.clear_range = gen8_ggtt_clear_range; |
| 995 | |
| 996 | ggtt->vm.insert_entries = gen8_ggtt_insert_entries; |
| 997 | |
| 998 | /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ |
| 999 | if (intel_ggtt_update_needs_vtd_wa(i915) || |
| 1000 | IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW) /* fails with concurrent use/update */) { |
| 1001 | ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; |
| 1002 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; |
| 1003 | ggtt->vm.bind_async_flags = |
| 1004 | I915_VMA_GLOBAL_BIND((int)(1UL << (10))) | I915_VMA_LOCAL_BIND((int)(1UL << (11))); |
| 1005 | } |
| 1006 | |
| 1007 | ggtt->invalidate = gen8_ggtt_invalidate; |
| 1008 | |
| 1009 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
| 1010 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; |
| 1011 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; |
| 1012 | ggtt->vm.vma_ops.clear_pages = clear_pages; |
| 1013 | |
| 1014 | ggtt->vm.pte_encode = gen8_ggtt_pte_encode; |
| 1015 | |
| 1016 | setup_private_pat(ggtt->vm.gt->uncore); |
| 1017 | |
| 1018 | return ggtt_probe_common(ggtt, size); |
| 1019 | } |
| 1020 | |
| 1021 | static u64 snb_pte_encode(dma_addr_t addr, |
| 1022 | enum i915_cache_level level, |
| 1023 | u32 flags) |
| 1024 | { |
| 1025 | gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr)((addr) | (((addr) >> 28) & 0xff0)) | GEN6_PTE_VALID((u32)((1UL << (0)) + 0)); |
| 1026 | |
| 1027 | switch (level) { |
| 1028 | case I915_CACHE_L3_LLC: |
| 1029 | case I915_CACHE_LLC: |
| 1030 | pte |= GEN6_PTE_CACHE_LLC(2 << 1); |
| 1031 | break; |
| 1032 | case I915_CACHE_NONE: |
| 1033 | pte |= GEN6_PTE_UNCACHED(1 << 1); |
| 1034 | break; |
| 1035 | default: |
| 1036 | MISSING_CASE(level)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "level", (long)(level)); __builtin_expect(!!(__ret), 0); }); |
| 1037 | } |
| 1038 | |
| 1039 | return pte; |
| 1040 | } |
| 1041 | |
| 1042 | static u64 ivb_pte_encode(dma_addr_t addr, |
| 1043 | enum i915_cache_level level, |
| 1044 | u32 flags) |
| 1045 | { |
| 1046 | gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr)((addr) | (((addr) >> 28) & 0xff0)) | GEN6_PTE_VALID((u32)((1UL << (0)) + 0)); |
| 1047 | |
| 1048 | switch (level) { |
| 1049 | case I915_CACHE_L3_LLC: |
| 1050 | pte |= GEN7_PTE_CACHE_L3_LLC(3 << 1); |
| 1051 | break; |
| 1052 | case I915_CACHE_LLC: |
| 1053 | pte |= GEN6_PTE_CACHE_LLC(2 << 1); |
| 1054 | break; |
| 1055 | case I915_CACHE_NONE: |
| 1056 | pte |= GEN6_PTE_UNCACHED(1 << 1); |
| 1057 | break; |
| 1058 | default: |
| 1059 | MISSING_CASE(level)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "level", (long)(level)); __builtin_expect(!!(__ret), 0); }); |
| 1060 | } |
| 1061 | |
| 1062 | return pte; |
| 1063 | } |
| 1064 | |
| 1065 | static u64 byt_pte_encode(dma_addr_t addr, |
| 1066 | enum i915_cache_level level, |
| 1067 | u32 flags) |
| 1068 | { |
| 1069 | gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr)((addr) | (((addr) >> 28) & 0xff0)) | GEN6_PTE_VALID((u32)((1UL << (0)) + 0)); |
| 1070 | |
| 1071 | if (!(flags & PTE_READ_ONLY(1UL << (0)))) |
| 1072 | pte |= BYT_PTE_WRITEABLE((u32)((1UL << (1)) + 0)); |
| 1073 | |
| 1074 | if (level != I915_CACHE_NONE) |
| 1075 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES((u32)((1UL << (2)) + 0)); |
| 1076 | |
| 1077 | return pte; |
| 1078 | } |
| 1079 | |
| 1080 | static u64 hsw_pte_encode(dma_addr_t addr, |
| 1081 | enum i915_cache_level level, |
| 1082 | u32 flags) |
| 1083 | { |
| 1084 | gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr)((addr) | (((addr) >> 28) & 0x7f0)) | GEN6_PTE_VALID((u32)((1UL << (0)) + 0)); |
| 1085 | |
| 1086 | if (level != I915_CACHE_NONE) |
| 1087 | pte |= HSW_WB_LLC_AGE3((((0x2) & 0x7) << 1) | (((0x2) & 0x8) << (11 - 3))); |
| 1088 | |
| 1089 | return pte; |
| 1090 | } |
| 1091 | |
| 1092 | static u64 iris_pte_encode(dma_addr_t addr, |
| 1093 | enum i915_cache_level level, |
| 1094 | u32 flags) |
| 1095 | { |
| 1096 | gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr)((addr) | (((addr) >> 28) & 0x7f0)) | GEN6_PTE_VALID((u32)((1UL << (0)) + 0)); |
| 1097 | |
| 1098 | switch (level) { |
| 1099 | case I915_CACHE_NONE: |
| 1100 | break; |
| 1101 | case I915_CACHE_WT: |
| 1102 | pte |= HSW_WT_ELLC_LLC_AGE3((((0x7) & 0x7) << 1) | (((0x7) & 0x8) << (11 - 3))); |
| 1103 | break; |
| 1104 | default: |
| 1105 | pte |= HSW_WB_ELLC_LLC_AGE3((((0x8) & 0x7) << 1) | (((0x8) & 0x8) << (11 - 3))); |
| 1106 | break; |
| 1107 | } |
| 1108 | |
| 1109 | return pte; |
| 1110 | } |
| 1111 | |
| 1112 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) |
| 1113 | { |
| 1114 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 1115 | struct pci_dev *pdev = i915->drm.pdev; |
| 1116 | unsigned int size; |
| 1117 | u16 snb_gmch_ctl; |
| 1118 | |
| 1119 | #ifdef __linux__ |
| 1120 | ggtt->gmadr = pci_resource(pdev, 2); |
| 1121 | ggtt->mappable_end = resource_size(&ggtt->gmadr); |
| 1122 | #else |
| 1123 | { |
| 1124 | bus_addr_t base; |
| 1125 | bus_size_t sz; |
| 1126 | pcireg_t type; |
| 1127 | int err; |
| 1128 | |
| 1129 | type = pci_mapreg_type(i915->pc, i915->tag, 0x18); |
| 1130 | err = -pci_mapreg_info(i915->pc, i915->tag, 0x18, type, |
| 1131 | &base, &sz, NULL((void *)0)); |
| 1132 | if (err) |
| 1133 | return err; |
| 1134 | ggtt->gmadr.start = base; |
| 1135 | ggtt->mappable_end = sz; |
| 1136 | } |
| 1137 | #endif |
| 1138 | |
| 1139 | /* |
| 1140 | * 64/512MB is the current min/max we actually know of, but this is |
| 1141 | * just a coarse sanity check. |
| 1142 | */ |
| 1143 | if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { |
| 1144 | drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unknown GMADR size (%pa)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &ggtt ->mappable_end) |
| 1145 | &ggtt->mappable_end)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unknown GMADR size (%pa)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &ggtt ->mappable_end); |
| 1146 | return -ENXIO6; |
| 1147 | } |
| 1148 | |
| 1149 | pci_read_config_word(pdev, SNB_GMCH_CTRL0x50, &snb_gmch_ctl); |
| 1150 | |
| 1151 | size = gen6_get_total_gtt_size(snb_gmch_ctl); |
| 1152 | ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 1153 | |
| 1154 | ggtt->vm.alloc_pt_dma = alloc_pt_dma; |
| 1155 | |
| 1156 | ggtt->vm.clear_range = nop_clear_range; |
| 1157 | if (!HAS_FULL_PPGTT(i915)(((&(i915)->__info)->ppgtt_type) >= INTEL_PPGTT_FULL ) || intel_scanout_needs_vtd_wa(i915)) |
| 1158 | ggtt->vm.clear_range = gen6_ggtt_clear_range; |
| 1159 | ggtt->vm.insert_page = gen6_ggtt_insert_page; |
| 1160 | ggtt->vm.insert_entries = gen6_ggtt_insert_entries; |
| 1161 | ggtt->vm.cleanup = gen6_gmch_remove; |
| 1162 | |
| 1163 | ggtt->invalidate = gen6_ggtt_invalidate; |
| 1164 | |
| 1165 | if (HAS_EDRAM(i915)((i915)->edram_size_mb)) |
| 1166 | ggtt->vm.pte_encode = iris_pte_encode; |
| 1167 | else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) |
| 1168 | ggtt->vm.pte_encode = hsw_pte_encode; |
| 1169 | else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) |
| 1170 | ggtt->vm.pte_encode = byt_pte_encode; |
| 1171 | else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 7) |
| 1172 | ggtt->vm.pte_encode = ivb_pte_encode; |
| 1173 | else |
| 1174 | ggtt->vm.pte_encode = snb_pte_encode; |
| 1175 | |
| 1176 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
| 1177 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; |
| 1178 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; |
| 1179 | ggtt->vm.vma_ops.clear_pages = clear_pages; |
| 1180 | |
| 1181 | return ggtt_probe_common(ggtt, size); |
| 1182 | } |
| 1183 | |
| 1184 | static void i915_gmch_remove(struct i915_address_space *vm) |
| 1185 | { |
| 1186 | intel_gmch_remove(); |
| 1187 | } |
| 1188 | |
| 1189 | static int i915_gmch_probe(struct i915_ggtt *ggtt) |
| 1190 | { |
| 1191 | struct drm_i915_privateinteldrm_softc *i915 = ggtt->vm.i915; |
| 1192 | phys_addr_t gmadr_base; |
| 1193 | int ret; |
| 1194 | |
| 1195 | ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL((void *)0)); |
| 1196 | if (!ret) { |
| 1197 | drm_err(&i915->drm, "failed to set up gmch\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set up gmch\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 1198 | return -EIO5; |
| 1199 | } |
| 1200 | |
| 1201 | intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); |
| 1202 | |
| 1203 | ggtt->gmadr = |
| 1204 | (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end){ .start = (gmadr_base), .end = (gmadr_base) + (ggtt->mappable_end ) - 1, }; |
| 1205 | |
| 1206 | ggtt->vm.alloc_pt_dma = alloc_pt_dma; |
| 1207 | |
| 1208 | ggtt->do_idle_maps = needs_idle_maps(i915); |
| 1209 | ggtt->vm.insert_page = i915_ggtt_insert_page; |
| 1210 | ggtt->vm.insert_entries = i915_ggtt_insert_entries; |
| 1211 | ggtt->vm.clear_range = i915_ggtt_clear_range; |
| 1212 | ggtt->vm.cleanup = i915_gmch_remove; |
| 1213 | |
| 1214 | ggtt->invalidate = gmch_ggtt_invalidate; |
| 1215 | |
| 1216 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
| 1217 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; |
| 1218 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; |
| 1219 | ggtt->vm.vma_ops.clear_pages = clear_pages; |
| 1220 | |
| 1221 | if (unlikely(ggtt->do_idle_maps)__builtin_expect(!!(ggtt->do_idle_maps), 0)) |
| 1222 | drm_notice(&i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "Applying Ironlake quirks for intel_iommu\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
| 1223 | "Applying Ironlake quirks for intel_iommu\n")printf("drm:pid%d:%s *NOTICE* " "[drm] " "Applying Ironlake quirks for intel_iommu\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 1224 | |
| 1225 | return 0; |
| 1226 | } |
| 1227 | |
| 1228 | static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) |
| 1229 | { |
| 1230 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; |
| 1231 | int ret; |
| 1232 | |
| 1233 | ggtt->vm.gt = gt; |
| 1234 | ggtt->vm.i915 = i915; |
| 1235 | #ifdef notyet |
| 1236 | ggtt->vm.dma = &i915->drm.pdev->dev; |
| 1237 | #endif |
| 1238 | |
| 1239 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) <= 5) |
| 1240 | ret = i915_gmch_probe(ggtt); |
| 1241 | else if (INTEL_GEN(i915)((&(i915)->__info)->gen) < 8) |
| 1242 | ret = gen6_gmch_probe(ggtt); |
| 1243 | else |
| 1244 | ret = gen8_gmch_probe(ggtt); |
| 1245 | if (ret) |
| 1246 | return ret; |
| 1247 | |
| 1248 | if ((ggtt->vm.total - 1) >> 32) { |
| 1249 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , ggtt->vm.total >> 20) |
| 1250 | "We never expected a Global GTT with more than 32bits"printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , ggtt->vm.total >> 20) |
| 1251 | " of address space! Found %lldM!\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , ggtt->vm.total >> 20) |
| 1252 | ggtt->vm.total >> 20)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , ggtt->vm.total >> 20); |
| 1253 | ggtt->vm.total = 1ULL << 32; |
| 1254 | ggtt->mappable_end = |
| 1255 | min_t(u64, ggtt->mappable_end, ggtt->vm.total)({ u64 __min_a = (ggtt->mappable_end); u64 __min_b = (ggtt ->vm.total); __min_a < __min_b ? __min_a : __min_b; }); |
| 1256 | } |
| 1257 | |
| 1258 | if (ggtt->mappable_end > ggtt->vm.total) { |
| 1259 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , &ggtt->mappable_end, ggtt->vm.total) |
| 1260 | "mappable aperture extends past end of GGTT,"printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , &ggtt->mappable_end, ggtt->vm.total) |
| 1261 | " aperture=%pa, total=%llx\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , &ggtt->mappable_end, ggtt->vm.total) |
| 1262 | &ggtt->mappable_end, ggtt->vm.total)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", ({struct cpu_info *__ci; asm volatile ("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid , __func__ , &ggtt->mappable_end, ggtt->vm.total); |
| 1263 | ggtt->mappable_end = ggtt->vm.total; |
| 1264 | } |
| 1265 | |
| 1266 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
| 1267 | drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "GGTT size = %lluM\n" , ggtt->vm.total >> 20); |
| 1268 | drm_dbg(&i915->drm, "GMADR size = %lluM\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "GMADR size = %lluM\n" , (u64)ggtt->mappable_end >> 20) |
| 1269 | (u64)ggtt->mappable_end >> 20)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "GMADR size = %lluM\n" , (u64)ggtt->mappable_end >> 20); |
| 1270 | drm_dbg(&i915->drm, "DSM size = %lluM\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "DSM size = %lluM\n" , (u64)resource_size(&intel_graphics_stolen_res) >> 20) |
| 1271 | (u64)resource_size(&intel_graphics_stolen_res) >> 20)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "DSM size = %lluM\n" , (u64)resource_size(&intel_graphics_stolen_res) >> 20); |
| 1272 | |
| 1273 | return 0; |
| 1274 | } |
| 1275 | |
| 1276 | /** |
| 1277 | * i915_ggtt_probe_hw - Probe GGTT hardware location |
| 1278 | * @i915: i915 device |
| 1279 | */ |
| 1280 | int i915_ggtt_probe_hw(struct drm_i915_privateinteldrm_softc *i915) |
| 1281 | { |
| 1282 | int ret; |
| 1283 | |
| 1284 | ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); |
| 1285 | if (ret) |
| 1286 | return ret; |
| 1287 | |
| 1288 | if (intel_vtd_active()) |
| 1289 | drm_info(&i915->drm, "VT-d active for gfx access\n")do { } while(0); |
| 1290 | |
| 1291 | return 0; |
| 1292 | } |
| 1293 | |
| 1294 | int i915_ggtt_enable_hw(struct drm_i915_privateinteldrm_softc *i915) |
| 1295 | { |
| 1296 | if (INTEL_GEN(i915)((&(i915)->__info)->gen) < 6 && !intel_enable_gtt()) |
| 1297 | return -EIO5; |
| 1298 | |
| 1299 | return 0; |
| 1300 | } |
| 1301 | |
| 1302 | void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) |
| 1303 | { |
| 1304 | GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate)((void)0); |
| 1305 | |
| 1306 | ggtt->invalidate = guc_ggtt_invalidate; |
| 1307 | |
| 1308 | ggtt->invalidate(ggtt); |
| 1309 | } |
| 1310 | |
| 1311 | void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) |
| 1312 | { |
| 1313 | /* XXX Temporary pardon for error unload */ |
| 1314 | if (ggtt->invalidate == gen8_ggtt_invalidate) |
| 1315 | return; |
| 1316 | |
| 1317 | /* We should only be called after i915_ggtt_enable_guc() */ |
| 1318 | GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate)((void)0); |
| 1319 | |
| 1320 | ggtt->invalidate = gen8_ggtt_invalidate; |
| 1321 | |
| 1322 | ggtt->invalidate(ggtt); |
| 1323 | } |
| 1324 | |
| 1325 | void i915_ggtt_resume(struct i915_ggtt *ggtt) |
| 1326 | { |
| 1327 | struct i915_vma *vma; |
| 1328 | bool_Bool flush = false0; |
| 1329 | int open; |
| 1330 | |
| 1331 | intel_gt_check_and_clear_faults(ggtt->vm.gt); |
| 1332 | |
| 1333 | /* First fill our portion of the GTT with scratch pages */ |
| 1334 | ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); |
| 1335 | |
| 1336 | /* Skip rewriting PTE on VMA unbind. */ |
| 1337 | open = atomic_xchg(&ggtt->vm.open, 0); |
| 1338 | |
| 1339 | /* clflush objects bound into the GGTT and rebind them. */ |
| 1340 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof (*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma) , vm_link) );}); &vma->vm_link != (&ggtt->vm.bound_list ); vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );})) { |
| 1341 | struct drm_i915_gem_object *obj = vma->obj; |
| 1342 | unsigned int was_bound = |
| 1343 | atomic_read(&vma->flags)({ typeof(*(&vma->flags)) __tmp = *(volatile typeof(*( &vma->flags)) *)&(*(&vma->flags)); membar_datadep_consumer (); __tmp; }) & I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11)))); |
| 1344 | |
| 1345 | GEM_BUG_ON(!was_bound)((void)0); |
| 1346 | vma->ops->bind_vma(&ggtt->vm, NULL((void *)0), vma, |
| 1347 | obj ? obj->cache_level : 0, |
| 1348 | was_bound); |
| 1349 | if (obj) { /* only used during resume => exclusive access */ |
| 1350 | flush |= fetch_and_zero(&obj->write_domain)({ typeof(*&obj->write_domain) __T = *(&obj->write_domain ); *(&obj->write_domain) = (typeof(*&obj->write_domain ))0; __T; }); |
| 1351 | obj->read_domains |= I915_GEM_DOMAIN_GTT0x00000040; |
| 1352 | } |
| 1353 | } |
| 1354 | |
| 1355 | atomic_set(&ggtt->vm.open, open)({ typeof(*(&ggtt->vm.open)) __tmp = ((open)); *(volatile typeof(*(&ggtt->vm.open)) *)&(*(&ggtt->vm. open)) = __tmp; __tmp; }); |
| 1356 | ggtt->invalidate(ggtt); |
| 1357 | |
| 1358 | if (flush) |
| 1359 | wbinvd_on_all_cpus(); |
| 1360 | |
| 1361 | if (INTEL_GEN(ggtt->vm.i915)((&(ggtt->vm.i915)->__info)->gen) >= 8) |
| 1362 | setup_private_pat(ggtt->vm.gt->uncore); |
| 1363 | |
| 1364 | intel_ggtt_restore_fences(ggtt); |
| 1365 | } |
| 1366 | |
| 1367 | static struct scatterlist * |
| 1368 | rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, |
| 1369 | unsigned int width, unsigned int height, |
| 1370 | unsigned int stride, |
| 1371 | struct sg_table *st, struct scatterlist *sg) |
| 1372 | { |
| 1373 | unsigned int column, row; |
| 1374 | unsigned int src_idx; |
| 1375 | |
| 1376 | for (column = 0; column < width; column++) { |
| 1377 | src_idx = stride * (height - 1) + column + offset; |
| 1378 | for (row = 0; row < height; row++) { |
| 1379 | st->nents++; |
| 1380 | /* |
| 1381 | * We don't need the pages, but need to initialize |
| 1382 | * the entries so the sg list can be happily traversed. |
| 1383 | * The only thing we need are DMA addresses. |
| 1384 | */ |
| 1385 | sg_set_page(sg, NULL((void *)0), I915_GTT_PAGE_SIZE(1ULL << (12)), 0); |
| 1386 | sg_dma_address(sg)((sg)->dma_address) = |
| 1387 | i915_gem_object_get_dma_address(obj, src_idx); |
| 1388 | sg_dma_len(sg)((sg)->length) = I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 1389 | sg = sg_next(sg); |
| 1390 | src_idx -= stride; |
| 1391 | } |
| 1392 | } |
| 1393 | |
| 1394 | return sg; |
| 1395 | } |
| 1396 | |
| 1397 | static noinline__attribute__((__noinline__)) struct sg_table * |
| 1398 | intel_rotate_pages(struct intel_rotation_info *rot_info, |
| 1399 | struct drm_i915_gem_object *obj) |
| 1400 | { |
| 1401 | unsigned int size = intel_rotation_info_size(rot_info); |
| 1402 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
| 1403 | struct sg_table *st; |
| 1404 | struct scatterlist *sg; |
| 1405 | int ret = -ENOMEM12; |
| 1406 | int i; |
| 1407 | |
| 1408 | /* Allocate target SG list. */ |
| 1409 | st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004)); |
| 1410 | if (!st) |
| 1411 | goto err_st_alloc; |
| 1412 | |
| 1413 | ret = sg_alloc_table(st, size, GFP_KERNEL(0x0001 | 0x0004)); |
| 1414 | if (ret) |
| 1415 | goto err_sg_alloc; |
| 1416 | |
| 1417 | st->nents = 0; |
| 1418 | sg = st->sgl; |
| 1419 | |
| 1420 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane)(sizeof((rot_info->plane)) / sizeof((rot_info->plane)[0 ])); i++) { |
| 1421 | sg = rotate_pages(obj, rot_info->plane[i].offset, |
| 1422 | rot_info->plane[i].width, rot_info->plane[i].height, |
| 1423 | rot_info->plane[i].stride, st, sg); |
| 1424 | } |
| 1425 | |
| 1426 | return st; |
| 1427 | |
| 1428 | err_sg_alloc: |
| 1429 | kfree(st); |
| 1430 | err_st_alloc: |
| 1431 | |
| 1432 | drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rot_info->plane[0].width, rot_info-> plane[0].height, size) |
| 1433 | obj->base.size, rot_info->plane[0].width,drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rot_info->plane[0].width, rot_info-> plane[0].height, size) |
| 1434 | rot_info->plane[0].height, size)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rot_info->plane[0].width, rot_info-> plane[0].height, size); |
| 1435 | |
| 1436 | return ERR_PTR(ret); |
| 1437 | } |
| 1438 | |
| 1439 | static struct scatterlist * |
| 1440 | remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, |
| 1441 | unsigned int width, unsigned int height, |
| 1442 | unsigned int stride, |
| 1443 | struct sg_table *st, struct scatterlist *sg) |
| 1444 | { |
| 1445 | unsigned int row; |
| 1446 | |
| 1447 | for (row = 0; row < height; row++) { |
| 1448 | unsigned int left = width * I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 1449 | |
| 1450 | while (left) { |
| 1451 | dma_addr_t addr; |
| 1452 | unsigned int length; |
| 1453 | |
| 1454 | /* |
| 1455 | * We don't need the pages, but need to initialize |
| 1456 | * the entries so the sg list can be happily traversed. |
| 1457 | * The only thing we need are DMA addresses. |
| 1458 | */ |
| 1459 | |
| 1460 | addr = i915_gem_object_get_dma_address_len(obj, offset, &length); |
| 1461 | |
| 1462 | length = min(left, length)(((left)<(length))?(left):(length)); |
| 1463 | |
| 1464 | st->nents++; |
| 1465 | |
| 1466 | sg_set_page(sg, NULL((void *)0), length, 0); |
| 1467 | sg_dma_address(sg)((sg)->dma_address) = addr; |
| 1468 | sg_dma_len(sg)((sg)->length) = length; |
| 1469 | sg = sg_next(sg); |
| 1470 | |
| 1471 | offset += length / I915_GTT_PAGE_SIZE(1ULL << (12)); |
| 1472 | left -= length; |
| 1473 | } |
| 1474 | |
| 1475 | offset += stride - width; |
| 1476 | } |
| 1477 | |
| 1478 | return sg; |
| 1479 | } |
| 1480 | |
| 1481 | static noinline__attribute__((__noinline__)) struct sg_table * |
| 1482 | intel_remap_pages(struct intel_remapped_info *rem_info, |
| 1483 | struct drm_i915_gem_object *obj) |
| 1484 | { |
| 1485 | unsigned int size = intel_remapped_info_size(rem_info); |
| 1486 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
| 1487 | struct sg_table *st; |
| 1488 | struct scatterlist *sg; |
| 1489 | int ret = -ENOMEM12; |
| 1490 | int i; |
| 1491 | |
| 1492 | /* Allocate target SG list. */ |
| 1493 | st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004)); |
| 1494 | if (!st) |
| 1495 | goto err_st_alloc; |
| 1496 | |
| 1497 | ret = sg_alloc_table(st, size, GFP_KERNEL(0x0001 | 0x0004)); |
| 1498 | if (ret) |
| 1499 | goto err_sg_alloc; |
| 1500 | |
| 1501 | st->nents = 0; |
| 1502 | sg = st->sgl; |
| 1503 | |
| 1504 | for (i = 0 ; i < ARRAY_SIZE(rem_info->plane)(sizeof((rem_info->plane)) / sizeof((rem_info->plane)[0 ])); i++) { |
| 1505 | sg = remap_pages(obj, rem_info->plane[i].offset, |
| 1506 | rem_info->plane[i].width, rem_info->plane[i].height, |
| 1507 | rem_info->plane[i].stride, st, sg); |
| 1508 | } |
| 1509 | |
| 1510 | i915_sg_trim(st); |
| 1511 | |
| 1512 | return st; |
| 1513 | |
| 1514 | err_sg_alloc: |
| 1515 | kfree(st); |
| 1516 | err_st_alloc: |
| 1517 | |
| 1518 | drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rem_info->plane[0].width, rem_info-> plane[0].height, size) |
| 1519 | obj->base.size, rem_info->plane[0].width,drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rem_info->plane[0].width, rem_info-> plane[0].height, size) |
| 1520 | rem_info->plane[0].height, size)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n" , obj->base.size, rem_info->plane[0].width, rem_info-> plane[0].height, size); |
| 1521 | |
| 1522 | return ERR_PTR(ret); |
| 1523 | } |
| 1524 | |
| 1525 | static noinline__attribute__((__noinline__)) struct sg_table * |
| 1526 | intel_partial_pages(const struct i915_ggtt_view *view, |
| 1527 | struct drm_i915_gem_object *obj) |
| 1528 | { |
| 1529 | struct sg_table *st; |
| 1530 | struct scatterlist *sg, *iter; |
| 1531 | unsigned int count = view->partial.size; |
| 1532 | unsigned int offset; |
| 1533 | int ret = -ENOMEM12; |
| 1534 | |
| 1535 | st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004)); |
| 1536 | if (!st) |
| 1537 | goto err_st_alloc; |
| 1538 | |
| 1539 | ret = sg_alloc_table(st, count, GFP_KERNEL(0x0001 | 0x0004)); |
| 1540 | if (ret) |
| 1541 | goto err_sg_alloc; |
| 1542 | |
| 1543 | iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); |
| 1544 | GEM_BUG_ON(!iter)((void)0); |
| 1545 | |
| 1546 | sg = st->sgl; |
| 1547 | st->nents = 0; |
| 1548 | do { |
| 1549 | unsigned int len; |
| 1550 | |
| 1551 | len = min(iter->length - (offset << PAGE_SHIFT),(((iter->length - (offset << 12))<(count << 12))?(iter->length - (offset << 12)):(count << 12)) |
| 1552 | count << PAGE_SHIFT)(((iter->length - (offset << 12))<(count << 12))?(iter->length - (offset << 12)):(count << 12)); |
| 1553 | sg_set_page(sg, NULL((void *)0), len, 0); |
| 1554 | sg_dma_address(sg)((sg)->dma_address) = |
| 1555 | sg_dma_address(iter)((iter)->dma_address) + (offset << PAGE_SHIFT12); |
| 1556 | sg_dma_len(sg)((sg)->length) = len; |
| 1557 | |
| 1558 | st->nents++; |
| 1559 | count -= len >> PAGE_SHIFT12; |
| 1560 | if (count == 0) { |
| 1561 | sg_mark_end(sg); |
| 1562 | i915_sg_trim(st); /* Drop any unused tail entries. */ |
| 1563 | |
| 1564 | return st; |
| 1565 | } |
| 1566 | |
| 1567 | sg = __sg_next(sg); |
| 1568 | iter = __sg_next(iter); |
| 1569 | offset = 0; |
| 1570 | } while (1); |
| 1571 | |
| 1572 | err_sg_alloc: |
| 1573 | kfree(st); |
| 1574 | err_st_alloc: |
| 1575 | return ERR_PTR(ret); |
| 1576 | } |
| 1577 | |
| 1578 | static int |
| 1579 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
| 1580 | { |
| 1581 | int ret; |
| 1582 | |
| 1583 | /* |
| 1584 | * The vma->pages are only valid within the lifespan of the borrowed |
| 1585 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so |
| 1586 | * must be the vma->pages. A simple rule is that vma->pages must only |
| 1587 | * be accessed when the obj->mm.pages are pinned. |
| 1588 | */ |
| 1589 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj))((void)0); |
| 1590 | |
| 1591 | switch (vma->ggtt_view.type) { |
| 1592 | default: |
| 1593 | GEM_BUG_ON(vma->ggtt_view.type)((void)0); |
| 1594 | fallthroughdo {} while (0); |
| 1595 | case I915_GGTT_VIEW_NORMAL: |
| 1596 | vma->pages = vma->obj->mm.pages; |
| 1597 | return 0; |
| 1598 | |
| 1599 | case I915_GGTT_VIEW_ROTATED: |
| 1600 | vma->pages = |
| 1601 | intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); |
| 1602 | break; |
| 1603 | |
| 1604 | case I915_GGTT_VIEW_REMAPPED: |
| 1605 | vma->pages = |
| 1606 | intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); |
| 1607 | break; |
| 1608 | |
| 1609 | case I915_GGTT_VIEW_PARTIAL: |
| 1610 | vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); |
| 1611 | break; |
| 1612 | } |
| 1613 | |
| 1614 | ret = 0; |
| 1615 | if (IS_ERR(vma->pages)) { |
| 1616 | ret = PTR_ERR(vma->pages); |
| 1617 | vma->pages = NULL((void *)0); |
| 1618 | drm_err(&vma->vm->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to get pages for VMA view type %u (%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , vma-> ggtt_view.type, ret) |
| 1619 | "Failed to get pages for VMA view type %u (%d)!\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to get pages for VMA view type %u (%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , vma-> ggtt_view.type, ret) |
| 1620 | vma->ggtt_view.type, ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to get pages for VMA view type %u (%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , vma-> ggtt_view.type, ret); |
| 1621 | } |
| 1622 | return ret; |
| 1623 | } |