| File: | dev/pci/drm/i915/i915_reg_defs.h |
| Warning: | line 111, column 2 Undefined or garbage value returned to caller |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT | |||
| 2 | /* | |||
| 3 | * Copyright © 2014-2018 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #include "i915_drv.h" | |||
| 7 | #include "intel_context.h" | |||
| 8 | #include "intel_engine_pm.h" | |||
| 9 | #include "intel_engine_regs.h" | |||
| 10 | #include "intel_gpu_commands.h" | |||
| 11 | #include "intel_gt.h" | |||
| 12 | #include "intel_gt_mcr.h" | |||
| 13 | #include "intel_gt_regs.h" | |||
| 14 | #include "intel_ring.h" | |||
| 15 | #include "intel_workarounds.h" | |||
| 16 | ||||
| 17 | /** | |||
| 18 | * DOC: Hardware workarounds | |||
| 19 | * | |||
| 20 | * This file is intended as a central place to implement most [1]_ of the | |||
| 21 | * required workarounds for hardware to work as originally intended. They fall | |||
| 22 | * in five basic categories depending on how/when they are applied: | |||
| 23 | * | |||
| 24 | * - Workarounds that touch registers that are saved/restored to/from the HW | |||
| 25 | * context image. The list is emitted (via Load Register Immediate commands) | |||
| 26 | * everytime a new context is created. | |||
| 27 | * - GT workarounds. The list of these WAs is applied whenever these registers | |||
| 28 | * revert to default values (on GPU reset, suspend/resume [2]_, etc..). | |||
| 29 | * - Display workarounds. The list is applied during display clock-gating | |||
| 30 | * initialization. | |||
| 31 | * - Workarounds that whitelist a privileged register, so that UMDs can manage | |||
| 32 | * them directly. This is just a special case of a MMMIO workaround (as we | |||
| 33 | * write the list of these to/be-whitelisted registers to some special HW | |||
| 34 | * registers). | |||
| 35 | * - Workaround batchbuffers, that get executed automatically by the hardware | |||
| 36 | * on every HW context restore. | |||
| 37 | * | |||
| 38 | * .. [1] Please notice that there are other WAs that, due to their nature, | |||
| 39 | * cannot be applied from a central place. Those are peppered around the rest | |||
| 40 | * of the code, as needed. | |||
| 41 | * | |||
| 42 | * .. [2] Technically, some registers are powercontext saved & restored, so they | |||
| 43 | * survive a suspend/resume. In practice, writing them again is not too | |||
| 44 | * costly and simplifies things. We can revisit this in the future. | |||
| 45 | * | |||
| 46 | * Layout | |||
| 47 | * ~~~~~~ | |||
| 48 | * | |||
| 49 | * Keep things in this file ordered by WA type, as per the above (context, GT, | |||
| 50 | * display, register whitelist, batchbuffer). Then, inside each type, keep the | |||
| 51 | * following order: | |||
| 52 | * | |||
| 53 | * - Infrastructure functions and macros | |||
| 54 | * - WAs per platform in standard gen/chrono order | |||
| 55 | * - Public functions to init or apply the given workaround type. | |||
| 56 | */ | |||
| 57 | ||||
| 58 | static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) | |||
| 59 | { | |||
| 60 | wal->name = name; | |||
| 61 | wal->engine_name = engine_name; | |||
| 62 | } | |||
| 63 | ||||
| 64 | #define WA_LIST_CHUNK(1 << 4) (1 << 4) | |||
| 65 | ||||
| 66 | static void wa_init_finish(struct i915_wa_list *wal) | |||
| 67 | { | |||
| 68 | /* Trim unused entries. */ | |||
| 69 | if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)(((wal->count) & (((1 << 4)) - 1)) == 0)) { | |||
| 70 | struct i915_wa *list = kmemdup(wal->list, | |||
| 71 | wal->count * sizeof(*list), | |||
| 72 | GFP_KERNEL(0x0001 | 0x0004)); | |||
| 73 | ||||
| 74 | if (list) { | |||
| 75 | kfree(wal->list); | |||
| 76 | wal->list = list; | |||
| 77 | } | |||
| 78 | } | |||
| 79 | ||||
| 80 | if (!wal->count) | |||
| 81 | return; | |||
| 82 | ||||
| 83 | DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "Initialized %u %s workarounds on %s\n" , wal->wa_count, wal->name, wal->engine_name) | |||
| 84 | wal->wa_count, wal->name, wal->engine_name)___drm_dbg(((void *)0), DRM_UT_DRIVER, "Initialized %u %s workarounds on %s\n" , wal->wa_count, wal->name, wal->engine_name); | |||
| 85 | } | |||
| 86 | ||||
| 87 | static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) | |||
| 88 | { | |||
| 89 | unsigned int addr = i915_mmio_reg_offset(wa->reg); | |||
| 90 | unsigned int start = 0, end = wal->count; | |||
| 91 | const unsigned int grow = WA_LIST_CHUNK(1 << 4); | |||
| 92 | struct i915_wa *wa_; | |||
| 93 | ||||
| 94 | GEM_BUG_ON(!is_power_of_2(grow))((void)0); | |||
| 95 | ||||
| 96 | if (IS_ALIGNED(wal->count, grow)(((wal->count) & ((grow) - 1)) == 0)) { /* Either uninitialized or full. */ | |||
| 97 | struct i915_wa *list; | |||
| 98 | ||||
| 99 | list = kmalloc_array(roundup2(wal->count + 1, grow)(((wal->count + 1) + ((grow) - 1)) & (~((__typeof(wal-> count + 1))(grow) - 1))), sizeof(*wa), | |||
| 100 | GFP_KERNEL(0x0001 | 0x0004)); | |||
| 101 | if (!list) { | |||
| 102 | DRM_ERROR("No space for workaround init!\n")__drm_err("No space for workaround init!\n"); | |||
| 103 | return; | |||
| 104 | } | |||
| 105 | ||||
| 106 | if (wal->list) { | |||
| 107 | memcpy(list, wal->list, sizeof(*wa) * wal->count)__builtin_memcpy((list), (wal->list), (sizeof(*wa) * wal-> count)); | |||
| 108 | kfree(wal->list); | |||
| 109 | } | |||
| 110 | ||||
| 111 | wal->list = list; | |||
| 112 | } | |||
| 113 | ||||
| 114 | while (start < end) { | |||
| 115 | unsigned int mid = start + (end - start) / 2; | |||
| 116 | ||||
| 117 | if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { | |||
| 118 | start = mid + 1; | |||
| 119 | } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { | |||
| 120 | end = mid; | |||
| 121 | } else { | |||
| 122 | wa_ = &wal->list[mid]; | |||
| 123 | ||||
| 124 | if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) { | |||
| 125 | DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",__drm_err("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n" , i915_mmio_reg_offset(wa_->reg), wa_->clr, wa_->set ) | |||
| 126 | i915_mmio_reg_offset(wa_->reg),__drm_err("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n" , i915_mmio_reg_offset(wa_->reg), wa_->clr, wa_->set ) | |||
| 127 | wa_->clr, wa_->set)__drm_err("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n" , i915_mmio_reg_offset(wa_->reg), wa_->clr, wa_->set ); | |||
| 128 | ||||
| 129 | wa_->set &= ~wa->clr; | |||
| 130 | } | |||
| 131 | ||||
| 132 | wal->wa_count++; | |||
| 133 | wa_->set |= wa->set; | |||
| 134 | wa_->clr |= wa->clr; | |||
| 135 | wa_->read |= wa->read; | |||
| 136 | return; | |||
| 137 | } | |||
| 138 | } | |||
| 139 | ||||
| 140 | wal->wa_count++; | |||
| 141 | wa_ = &wal->list[wal->count++]; | |||
| 142 | *wa_ = *wa; | |||
| 143 | ||||
| 144 | while (wa_-- > wal->list) { | |||
| 145 | GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==((void)0) | |||
| 146 | i915_mmio_reg_offset(wa_[1].reg))((void)0); | |||
| 147 | if (i915_mmio_reg_offset(wa_[1].reg) > | |||
| 148 | i915_mmio_reg_offset(wa_[0].reg)) | |||
| 149 | break; | |||
| 150 | ||||
| 151 | swap(wa_[1], wa_[0])do { __typeof(wa_[1]) __tmp = (wa_[1]); (wa_[1]) = (wa_[0]); ( wa_[0]) = __tmp; } while(0); | |||
| 152 | } | |||
| 153 | } | |||
| 154 | ||||
| 155 | static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, | |||
| 156 | u32 clear, u32 set, u32 read_mask, bool_Bool masked_reg) | |||
| 157 | { | |||
| 158 | struct i915_wa wa = { | |||
| 159 | .reg = reg, | |||
| 160 | .clr = clear, | |||
| 161 | .set = set, | |||
| 162 | .read = read_mask, | |||
| 163 | .masked_reg = masked_reg, | |||
| 164 | }; | |||
| 165 | ||||
| 166 | _wa_add(wal, &wa); | |||
| 167 | } | |||
| 168 | ||||
| 169 | static void | |||
| 170 | wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) | |||
| 171 | { | |||
| 172 | wa_add(wal, reg, clear, set, clear, false0); | |||
| 173 | } | |||
| 174 | ||||
| 175 | static void | |||
| 176 | wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) | |||
| 177 | { | |||
| 178 | wa_write_clr_set(wal, reg, ~0, set); | |||
| 179 | } | |||
| 180 | ||||
| 181 | static void | |||
| 182 | wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) | |||
| 183 | { | |||
| 184 | wa_write_clr_set(wal, reg, set, set); | |||
| 185 | } | |||
| 186 | ||||
| 187 | static void | |||
| 188 | wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr) | |||
| 189 | { | |||
| 190 | wa_write_clr_set(wal, reg, clr, 0); | |||
| 191 | } | |||
| 192 | ||||
| 193 | /* | |||
| 194 | * WA operations on "masked register". A masked register has the upper 16 bits | |||
| 195 | * documented as "masked" in b-spec. Its purpose is to allow writing to just a | |||
| 196 | * portion of the register without a rmw: you simply write in the upper 16 bits | |||
| 197 | * the mask of bits you are going to modify. | |||
| 198 | * | |||
| 199 | * The wa_masked_* family of functions already does the necessary operations to | |||
| 200 | * calculate the mask based on the parameters passed, so user only has to | |||
| 201 | * provide the lower 16 bits of that register. | |||
| 202 | */ | |||
| 203 | ||||
| 204 | static void | |||
| 205 | wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | |||
| 206 | { | |||
| 207 | wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val)({ typeof(val) _a = (val); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0 ); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), val, true1); | |||
| 208 | } | |||
| 209 | ||||
| 210 | static void | |||
| 211 | wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val) | |||
| 212 | { | |||
| 213 | wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val)(({ if (__builtin_constant_p((val))) do { } while (0); if (__builtin_constant_p (0)) do { } while (0); if (__builtin_constant_p((val)) && __builtin_constant_p(0)) do { } while (0); (((val)) << 16 | (0)); })), val, true1); | |||
| 214 | } | |||
| 215 | ||||
| 216 | static void | |||
| 217 | wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg, | |||
| 218 | u32 mask, u32 val) | |||
| 219 | { | |||
| 220 | wa_add(wal, reg, 0, _MASKED_FIELD(mask, val)({ if (__builtin_constant_p(mask)) do { } while (0); if (__builtin_constant_p (val)) do { } while (0); if (__builtin_constant_p(mask) && __builtin_constant_p(val)) do { } while (0); ((mask) << 16 | (val)); }), mask, true1); | |||
| 221 | } | |||
| 222 | ||||
| 223 | static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 224 | struct i915_wa_list *wal) | |||
| 225 | { | |||
| 226 | wa_masked_en(wal, INSTPM((const i915_reg_t){ .reg = (0x20c0) }), INSTPM_FORCE_ORDERING(1 << 7)); | |||
| 227 | } | |||
| 228 | ||||
| 229 | static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 230 | struct i915_wa_list *wal) | |||
| 231 | { | |||
| 232 | wa_masked_en(wal, INSTPM((const i915_reg_t){ .reg = (0x20c0) }), INSTPM_FORCE_ORDERING(1 << 7)); | |||
| 233 | } | |||
| 234 | ||||
| 235 | static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 236 | struct i915_wa_list *wal) | |||
| 237 | { | |||
| 238 | wa_masked_en(wal, INSTPM((const i915_reg_t){ .reg = (0x20c0) }), INSTPM_FORCE_ORDERING(1 << 7)); | |||
| 239 | ||||
| 240 | /* WaDisableAsyncFlipPerfMode:bdw,chv */ | |||
| 241 | wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x9c) }), ASYNC_FLIP_PERF_DISABLE((u32)((1UL << (14)) + 0))); | |||
| 242 | ||||
| 243 | /* WaDisablePartialInstShootdown:bdw,chv */ | |||
| 244 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), | |||
| 245 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE((u32)((1UL << (8)) + 0))); | |||
| 246 | ||||
| 247 | /* Use Force Non-Coherent whenever executing a 3D context. This is a | |||
| 248 | * workaround for a possible hang in the unlikely event a TLB | |||
| 249 | * invalidation occurs during a PSD flush. | |||
| 250 | */ | |||
| 251 | /* WaForceEnableNonCoherent:bdw,chv */ | |||
| 252 | /* WaHdcDisableFetchWhenMasked:bdw,chv */ | |||
| 253 | wa_masked_en(wal, HDC_CHICKEN0((const i915_reg_t){ .reg = (0x7300) }), | |||
| 254 | HDC_DONOT_FETCH_MEM_WHEN_MASKED(1 << 11) | | |||
| 255 | HDC_FORCE_NON_COHERENT(1 << 4)); | |||
| 256 | ||||
| 257 | /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: | |||
| 258 | * "The Hierarchical Z RAW Stall Optimization allows non-overlapping | |||
| 259 | * polygons in the same 8x4 pixel/sample area to be processed without | |||
| 260 | * stalling waiting for the earlier ones to write to Hierarchical Z | |||
| 261 | * buffer." | |||
| 262 | * | |||
| 263 | * This optimization is off by default for BDW and CHV; turn it on. | |||
| 264 | */ | |||
| 265 | wa_masked_dis(wal, CACHE_MODE_0_GEN7((const i915_reg_t){ .reg = (0x7000) }), HIZ_RAW_STALL_OPT_DISABLE(1 << 2)); | |||
| 266 | ||||
| 267 | /* Wa4x4STCOptimizationDisable:bdw,chv */ | |||
| 268 | wa_masked_en(wal, CACHE_MODE_1((const i915_reg_t){ .reg = (0x7004) }), GEN8_4x4_STC_OPTIMIZATION_DISABLE(1 << 6)); | |||
| 269 | ||||
| 270 | /* | |||
| 271 | * BSpec recommends 8x4 when MSAA is used, | |||
| 272 | * however in practice 16x4 seems fastest. | |||
| 273 | * | |||
| 274 | * Note that PS/WM thread counts depend on the WIZ hashing | |||
| 275 | * disable bit, which we don't touch here, but it's good | |||
| 276 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |||
| 277 | */ | |||
| 278 | wa_masked_field_set(wal, GEN7_GT_MODE((const i915_reg_t){ .reg = (0x7008) }), | |||
| 279 | GEN6_WIZ_HASHING_MASK(((1) << 9) | ((1) << 7)), | |||
| 280 | GEN6_WIZ_HASHING_16x4(((1) << 9) | ((0) << 7))); | |||
| 281 | } | |||
| 282 | ||||
| 283 | static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 284 | struct i915_wa_list *wal) | |||
| 285 | { | |||
| 286 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 287 | ||||
| 288 | gen8_ctx_workarounds_init(engine, wal); | |||
| 289 | ||||
| 290 | /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ | |||
| 291 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), STALL_DOP_GATING_DISABLE((u32)((1UL << (5)) + 0))); | |||
| 292 | ||||
| 293 | /* WaDisableDopClockGating:bdw | |||
| 294 | * | |||
| 295 | * Also see the related UCGTCL1 write in bdw_init_clock_gating() | |||
| 296 | * to disable EUTC clock gating. | |||
| 297 | */ | |||
| 298 | wa_masked_en(wal, GEN7_ROW_CHICKEN2((const i915_reg_t){ .reg = (0xe4f4) }), | |||
| 299 | DOP_CLOCK_GATING_DISABLE(1 << 0)); | |||
| 300 | ||||
| 301 | wa_masked_en(wal, HALF_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0xe184) }), | |||
| 302 | GEN8_SAMPLER_POWER_BYPASS_DIS(1 << 1)); | |||
| 303 | ||||
| 304 | wa_masked_en(wal, HDC_CHICKEN0((const i915_reg_t){ .reg = (0x7300) }), | |||
| 305 | /* WaForceContextSaveRestoreNonCoherent:bdw */ | |||
| 306 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT(1 << 5) | | |||
| 307 | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ | |||
| 308 | (IS_BDW_GT3(i915)(IS_PLATFORM(i915, INTEL_BROADWELL) && (&(i915)-> __info)->gt == 3) ? HDC_FENCE_DEST_SLM_DISABLE(1 << 14) : 0)); | |||
| 309 | } | |||
| 310 | ||||
| 311 | static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 312 | struct i915_wa_list *wal) | |||
| 313 | { | |||
| 314 | gen8_ctx_workarounds_init(engine, wal); | |||
| 315 | ||||
| 316 | /* WaDisableThreadStallDopClockGating:chv */ | |||
| 317 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), STALL_DOP_GATING_DISABLE((u32)((1UL << (5)) + 0))); | |||
| 318 | ||||
| 319 | /* Improve HiZ throughput on CHV. */ | |||
| 320 | wa_masked_en(wal, HIZ_CHICKEN((const i915_reg_t){ .reg = (0x7018) }), CHV_HZ_8X8_MODE_IN_1X((u32)((1UL << (15)) + 0))); | |||
| 321 | } | |||
| 322 | ||||
| 323 | static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 324 | struct i915_wa_list *wal) | |||
| 325 | { | |||
| 326 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 327 | ||||
| 328 | if (HAS_LLC(i915)((&(i915)->__info)->has_llc)) { | |||
| 329 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl | |||
| 330 | * | |||
| 331 | * Must match Display Engine. See | |||
| 332 | * WaCompressedResourceDisplayNewHashMode. | |||
| 333 | */ | |||
| 334 | wa_masked_en(wal, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) }), | |||
| 335 | GEN9_PBE_COMPRESSED_HASH_SELECTION(1 << 13)); | |||
| 336 | wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7((const i915_reg_t){ .reg = (0xe194) }), | |||
| 337 | GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR((u32)((1UL << (8)) + 0))); | |||
| 338 | } | |||
| 339 | ||||
| 340 | /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ | |||
| 341 | /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ | |||
| 342 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), | |||
| 343 | FLOW_CONTROL_ENABLE((u32)((1UL << (15)) + 0)) | | |||
| 344 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE((u32)((1UL << (8)) + 0))); | |||
| 345 | ||||
| 346 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ | |||
| 347 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ | |||
| 348 | wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7((const i915_reg_t){ .reg = (0xe194) }), | |||
| 349 | GEN9_ENABLE_YV12_BUGFIX((u32)((1UL << (4)) + 0)) | | |||
| 350 | GEN9_ENABLE_GPGPU_PREEMPTION((u32)((1UL << (2)) + 0))); | |||
| 351 | ||||
| 352 | /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ | |||
| 353 | /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ | |||
| 354 | wa_masked_en(wal, CACHE_MODE_1((const i915_reg_t){ .reg = (0x7004) }), | |||
| 355 | GEN8_4x4_STC_OPTIMIZATION_DISABLE(1 << 6) | | |||
| 356 | GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE(1 << 1)); | |||
| 357 | ||||
| 358 | /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ | |||
| 359 | wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5((const i915_reg_t){ .reg = (0xe188) }), | |||
| 360 | GEN9_CCS_TLB_PREFETCH_ENABLE(1 << 3)); | |||
| 361 | ||||
| 362 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ | |||
| 363 | wa_masked_en(wal, HDC_CHICKEN0((const i915_reg_t){ .reg = (0x7300) }), | |||
| 364 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT(1 << 5) | | |||
| 365 | HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE(1 << 15)); | |||
| 366 | ||||
| 367 | /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are | |||
| 368 | * both tied to WaForceContextSaveRestoreNonCoherent | |||
| 369 | * in some hsds for skl. We keep the tie for all gen9. The | |||
| 370 | * documentation is a bit hazy and so we want to get common behaviour, | |||
| 371 | * even though there is no clear evidence we would need both on kbl/bxt. | |||
| 372 | * This area has been source of system hangs so we play it safe | |||
| 373 | * and mimic the skl regardless of what bspec says. | |||
| 374 | * | |||
| 375 | * Use Force Non-Coherent whenever executing a 3D context. This | |||
| 376 | * is a workaround for a possible hang in the unlikely event | |||
| 377 | * a TLB invalidation occurs during a PSD flush. | |||
| 378 | */ | |||
| 379 | ||||
| 380 | /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ | |||
| 381 | wa_masked_en(wal, HDC_CHICKEN0((const i915_reg_t){ .reg = (0x7300) }), | |||
| 382 | HDC_FORCE_NON_COHERENT(1 << 4)); | |||
| 383 | ||||
| 384 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ | |||
| 385 | if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE) || | |||
| 386 | IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE) || | |||
| 387 | IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || | |||
| 388 | IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) | |||
| 389 | wa_masked_en(wal, HALF_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0xe184) }), | |||
| 390 | GEN8_SAMPLER_POWER_BYPASS_DIS(1 << 1)); | |||
| 391 | ||||
| 392 | /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ | |||
| 393 | wa_masked_en(wal, HALF_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0xe180) }), GEN8_ST_PO_DISABLE(1 << 13)); | |||
| 394 | ||||
| 395 | /* | |||
| 396 | * Supporting preemption with fine-granularity requires changes in the | |||
| 397 | * batch buffer programming. Since we can't break old userspace, we | |||
| 398 | * need to set our default preemption level to safe value. Userspace is | |||
| 399 | * still able to use more fine-grained preemption levels, since in | |||
| 400 | * WaEnablePreemptionGranularityControlByUMD we're whitelisting the | |||
| 401 | * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are | |||
| 402 | * not real HW workarounds, but merely a way to start using preemption | |||
| 403 | * while maintaining old contract with userspace. | |||
| 404 | */ | |||
| 405 | ||||
| 406 | /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ | |||
| 407 | wa_masked_dis(wal, GEN8_CS_CHICKEN1((const i915_reg_t){ .reg = (0x2580) }), GEN9_PREEMPT_3D_OBJECT_LEVEL(1 << 0)); | |||
| 408 | ||||
| 409 | /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ | |||
| 410 | wa_masked_field_set(wal, GEN8_CS_CHICKEN1((const i915_reg_t){ .reg = (0x2580) }), | |||
| 411 | GEN9_PREEMPT_GPGPU_LEVEL_MASK(((1) << 2) | ((1) << 1)), | |||
| 412 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL(((1) << 2) | ((0) << 1))); | |||
| 413 | ||||
| 414 | /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ | |||
| 415 | if (IS_GEN9_LP(i915)(((&(i915)->__runtime)->graphics.ip.ver) == 9 && ((&(i915)->__info)->is_lp))) | |||
| 416 | wa_masked_en(wal, GEN9_WM_CHICKEN3((const i915_reg_t){ .reg = (0x5588) }), GEN9_FACTOR_IN_CLR_VAL_HIZ(1 << 9)); | |||
| 417 | } | |||
| 418 | ||||
| 419 | static void skl_tune_iz_hashing(struct intel_engine_cs *engine, | |||
| 420 | struct i915_wa_list *wal) | |||
| 421 | { | |||
| 422 | struct intel_gt *gt = engine->gt; | |||
| 423 | u8 vals[3] = { 0, 0, 0 }; | |||
| 424 | unsigned int i; | |||
| 425 | ||||
| 426 | for (i = 0; i < 3; i++) { | |||
| 427 | u8 ss; | |||
| 428 | ||||
| 429 | /* | |||
| 430 | * Only consider slices where one, and only one, subslice has 7 | |||
| 431 | * EUs | |||
| 432 | */ | |||
| 433 | if (!is_power_of_2(gt->info.sseu.subslice_7eu[i])(((gt->info.sseu.subslice_7eu[i]) != 0) && (((gt-> info.sseu.subslice_7eu[i]) - 1) & (gt->info.sseu.subslice_7eu [i])) == 0)) | |||
| 434 | continue; | |||
| 435 | ||||
| 436 | /* | |||
| 437 | * subslice_7eu[i] != 0 (because of the check above) and | |||
| 438 | * ss_max == 4 (maximum number of subslices possible per slice) | |||
| 439 | * | |||
| 440 | * -> 0 <= ss <= 3; | |||
| 441 | */ | |||
| 442 | ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1; | |||
| 443 | vals[i] = 3 - ss; | |||
| 444 | } | |||
| 445 | ||||
| 446 | if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) | |||
| 447 | return; | |||
| 448 | ||||
| 449 | /* Tune IZ hashing. See intel_device_info_runtime_init() */ | |||
| 450 | wa_masked_field_set(wal, GEN7_GT_MODE((const i915_reg_t){ .reg = (0x7008) }), | |||
| 451 | GEN9_IZ_HASHING_MASK(2)(0x3 << ((2) * 2)) | | |||
| 452 | GEN9_IZ_HASHING_MASK(1)(0x3 << ((1) * 2)) | | |||
| 453 | GEN9_IZ_HASHING_MASK(0)(0x3 << ((0) * 2)), | |||
| 454 | GEN9_IZ_HASHING(2, vals[2])((vals[2]) << ((2) * 2)) | | |||
| 455 | GEN9_IZ_HASHING(1, vals[1])((vals[1]) << ((1) * 2)) | | |||
| 456 | GEN9_IZ_HASHING(0, vals[0])((vals[0]) << ((0) * 2))); | |||
| 457 | } | |||
| 458 | ||||
| 459 | static void skl_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 460 | struct i915_wa_list *wal) | |||
| 461 | { | |||
| 462 | gen9_ctx_workarounds_init(engine, wal); | |||
| 463 | skl_tune_iz_hashing(engine, wal); | |||
| 464 | } | |||
| 465 | ||||
| 466 | static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 467 | struct i915_wa_list *wal) | |||
| 468 | { | |||
| 469 | gen9_ctx_workarounds_init(engine, wal); | |||
| 470 | ||||
| 471 | /* WaDisableThreadStallDopClockGating:bxt */ | |||
| 472 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), | |||
| 473 | STALL_DOP_GATING_DISABLE((u32)((1UL << (5)) + 0))); | |||
| 474 | ||||
| 475 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | |||
| 476 | wa_masked_en(wal, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) }), | |||
| 477 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION(1 << 8)); | |||
| 478 | } | |||
| 479 | ||||
| 480 | static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 481 | struct i915_wa_list *wal) | |||
| 482 | { | |||
| 483 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 484 | ||||
| 485 | gen9_ctx_workarounds_init(engine, wal); | |||
| 486 | ||||
| 487 | /* WaToEnableHwFixForPushConstHWBug:kbl */ | |||
| 488 | if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)(IS_PLATFORM(i915, INTEL_KABYLAKE) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_C0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER )))) | |||
| 489 | wa_masked_en(wal, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) }), | |||
| 490 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION(1 << 8)); | |||
| 491 | ||||
| 492 | /* WaDisableSbeCacheDispatchPortSharing:kbl */ | |||
| 493 | wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0xe100) }), | |||
| 494 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE(1 << 4)); | |||
| 495 | } | |||
| 496 | ||||
| 497 | static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 498 | struct i915_wa_list *wal) | |||
| 499 | { | |||
| 500 | gen9_ctx_workarounds_init(engine, wal); | |||
| 501 | ||||
| 502 | /* WaToEnableHwFixForPushConstHWBug:glk */ | |||
| 503 | wa_masked_en(wal, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) }), | |||
| 504 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION(1 << 8)); | |||
| 505 | } | |||
| 506 | ||||
| 507 | static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 508 | struct i915_wa_list *wal) | |||
| 509 | { | |||
| 510 | gen9_ctx_workarounds_init(engine, wal); | |||
| 511 | ||||
| 512 | /* WaToEnableHwFixForPushConstHWBug:cfl */ | |||
| 513 | wa_masked_en(wal, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) }), | |||
| 514 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION(1 << 8)); | |||
| 515 | ||||
| 516 | /* WaDisableSbeCacheDispatchPortSharing:cfl */ | |||
| 517 | wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0xe100) }), | |||
| 518 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE(1 << 4)); | |||
| 519 | } | |||
| 520 | ||||
| 521 | static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 522 | struct i915_wa_list *wal) | |||
| 523 | { | |||
| 524 | /* Wa_1406697149 (WaDisableBankHangMode:icl) */ | |||
| 525 | wa_write(wal, | |||
| 526 | GEN8_L3CNTLREG((const i915_reg_t){ .reg = (0x7034) }), | |||
| 527 | intel_uncore_read(engine->uncore, GEN8_L3CNTLREG((const i915_reg_t){ .reg = (0x7034) })) | | |||
| 528 | GEN8_ERRDETBCTRL(1 << 9)); | |||
| 529 | ||||
| 530 | /* WaForceEnableNonCoherent:icl | |||
| 531 | * This is not the same workaround as in early Gen9 platforms, where | |||
| 532 | * lacking this could cause system hangs, but coherency performance | |||
| 533 | * overhead is high and only a few compute workloads really need it | |||
| 534 | * (the register is whitelisted in hardware now, so UMDs can opt in | |||
| 535 | * for coherency if they have a good reason). | |||
| 536 | */ | |||
| 537 | wa_masked_en(wal, ICL_HDC_MODE((const i915_reg_t){ .reg = (0xe5f4) }), HDC_FORCE_NON_COHERENT(1 << 4)); | |||
| 538 | ||||
| 539 | /* WaEnableFloatBlendOptimization:icl */ | |||
| 540 | wa_add(wal, GEN10_CACHE_MODE_SS((const i915_reg_t){ .reg = (0xe420) }), 0, | |||
| 541 | _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)({ typeof(((u32)((1UL << (4)) + 0))) _a = (((u32)((1UL << (4)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while ( 0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }), | |||
| 542 | 0 /* write-only, so skip validation */, | |||
| 543 | true1); | |||
| 544 | ||||
| 545 | /* WaDisableGPGPUMidThreadPreemption:icl */ | |||
| 546 | wa_masked_field_set(wal, GEN8_CS_CHICKEN1((const i915_reg_t){ .reg = (0x2580) }), | |||
| 547 | GEN9_PREEMPT_GPGPU_LEVEL_MASK(((1) << 2) | ((1) << 1)), | |||
| 548 | GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL(((0) << 2) | ((1) << 1))); | |||
| 549 | ||||
| 550 | /* allow headerless messages for preemptible GPGPU context */ | |||
| 551 | wa_masked_en(wal, GEN10_SAMPLER_MODE((const i915_reg_t){ .reg = (0xe18c) }), | |||
| 552 | GEN11_SAMPLER_ENABLE_HEADLESS_MSG((u32)((1UL << (5)) + 0))); | |||
| 553 | ||||
| 554 | /* Wa_1604278689:icl,ehl */ | |||
| 555 | wa_write(wal, IVB_FBC_RT_BASE((const i915_reg_t){ .reg = (0x7020) }), 0xFFFFFFFF & ~ILK_FBC_RT_VALID((u32)((1UL << (0)) + 0))); | |||
| 556 | wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER((const i915_reg_t){ .reg = (0x7024) }), | |||
| 557 | 0, /* write-only register; skip validation */ | |||
| 558 | 0xFFFFFFFF); | |||
| 559 | ||||
| 560 | /* Wa_1406306137:icl,ehl */ | |||
| 561 | wa_masked_en(wal, GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), GEN11_DIS_PICK_2ND_EU((u32)((1UL << (7)) + 0))); | |||
| 562 | } | |||
| 563 | ||||
| 564 | /* | |||
| 565 | * These settings aren't actually workarounds, but general tuning settings that | |||
| 566 | * need to be programmed on dg2 platform. | |||
| 567 | */ | |||
| 568 | static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, | |||
| 569 | struct i915_wa_list *wal) | |||
| 570 | { | |||
| 571 | wa_masked_en(wal, CHICKEN_RASTER_2((const i915_reg_t){ .reg = (0x6208) }), TBIMR_FAST_CLIP((u32)((1UL << (5)) + 0))); | |||
| 572 | wa_write_clr_set(wal, GEN11_L3SQCREG5((const i915_reg_t){ .reg = (0xb158) }), L3_PWM_TIMER_INIT_VAL_MASK((u32)((((~0UL) >> (64 - (9) - 1)) & ((~0UL) << (0))) + 0)), | |||
| 573 | REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)((u32)((((typeof(((u32)((((~0UL) >> (64 - (9) - 1)) & ((~0UL) << (0))) + 0))))(0x7f) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (9) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (9) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0))); | |||
| 574 | wa_add(wal, | |||
| 575 | FF_MODE2((const i915_reg_t){ .reg = (0x6604) }), | |||
| 576 | FF_MODE2_TDS_TIMER_MASK((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0)), | |||
| 577 | FF_MODE2_TDS_TIMER_128((u32)((((typeof(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))))(4) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (23 ) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0 )), | |||
| 578 | 0, false0); | |||
| 579 | } | |||
| 580 | ||||
| 581 | /* | |||
| 582 | * These settings aren't actually workarounds, but general tuning settings that | |||
| 583 | * need to be programmed on several platforms. | |||
| 584 | */ | |||
| 585 | static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, | |||
| 586 | struct i915_wa_list *wal) | |||
| 587 | { | |||
| 588 | /* | |||
| 589 | * Although some platforms refer to it as Wa_1604555607, we need to | |||
| 590 | * program it even on those that don't explicitly list that | |||
| 591 | * workaround. | |||
| 592 | * | |||
| 593 | * Note that the programming of this register is further modified | |||
| 594 | * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. | |||
| 595 | * Wa_1608008084 tells us the FF_MODE2 register will return the wrong | |||
| 596 | * value when read. The default value for this register is zero for all | |||
| 597 | * fields and there are no bit masks. So instead of doing a RMW we | |||
| 598 | * should just write TDS timer value. For the same reason read | |||
| 599 | * verification is ignored. | |||
| 600 | */ | |||
| 601 | wa_add(wal, | |||
| 602 | FF_MODE2((const i915_reg_t){ .reg = (0x6604) }), | |||
| 603 | FF_MODE2_TDS_TIMER_MASK((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0)), | |||
| 604 | FF_MODE2_TDS_TIMER_128((u32)((((typeof(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))))(4) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (23 ) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0 )), | |||
| 605 | 0, false0); | |||
| 606 | } | |||
| 607 | ||||
| 608 | static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 609 | struct i915_wa_list *wal) | |||
| 610 | { | |||
| 611 | gen12_ctx_gt_tuning_init(engine, wal); | |||
| 612 | ||||
| 613 | /* | |||
| 614 | * Wa_1409142259:tgl,dg1,adl-p | |||
| 615 | * Wa_1409347922:tgl,dg1,adl-p | |||
| 616 | * Wa_1409252684:tgl,dg1,adl-p | |||
| 617 | * Wa_1409217633:tgl,dg1,adl-p | |||
| 618 | * Wa_1409207793:tgl,dg1,adl-p | |||
| 619 | * Wa_1409178076:tgl,dg1,adl-p | |||
| 620 | * Wa_1408979724:tgl,dg1,adl-p | |||
| 621 | * Wa_14010443199:tgl,rkl,dg1,adl-p | |||
| 622 | * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p | |||
| 623 | * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p | |||
| 624 | */ | |||
| 625 | wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0x7304) }), | |||
| 626 | GEN12_DISABLE_CPS_AWARE_COLOR_PIPE((u32)((1UL << (9)) + 0))); | |||
| 627 | ||||
| 628 | /* WaDisableGPGPUMidThreadPreemption:gen12 */ | |||
| 629 | wa_masked_field_set(wal, GEN8_CS_CHICKEN1((const i915_reg_t){ .reg = (0x2580) }), | |||
| 630 | GEN9_PREEMPT_GPGPU_LEVEL_MASK(((1) << 2) | ((1) << 1)), | |||
| 631 | GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL(((0) << 2) | ((1) << 1))); | |||
| 632 | ||||
| 633 | /* | |||
| 634 | * Wa_16011163337 | |||
| 635 | * | |||
| 636 | * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due | |||
| 637 | * to Wa_1608008084. | |||
| 638 | */ | |||
| 639 | wa_add(wal, | |||
| 640 | FF_MODE2((const i915_reg_t){ .reg = (0x6604) }), | |||
| 641 | FF_MODE2_GS_TIMER_MASK((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0)), | |||
| 642 | FF_MODE2_GS_TIMER_224((u32)((((typeof(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))))(224) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (31 ) - 1)) & ((~0UL) << (24))) + 0)))) + 0 + 0 + 0 + 0 )), | |||
| 643 | 0, false0); | |||
| 644 | } | |||
| 645 | ||||
| 646 | static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 647 | struct i915_wa_list *wal) | |||
| 648 | { | |||
| 649 | gen12_ctx_workarounds_init(engine, wal); | |||
| 650 | ||||
| 651 | /* Wa_1409044764 */ | |||
| 652 | wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0x7304) }), | |||
| 653 | DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN((u32)((1UL << (12)) + 0))); | |||
| 654 | ||||
| 655 | /* Wa_22010493298 */ | |||
| 656 | wa_masked_en(wal, HIZ_CHICKEN((const i915_reg_t){ .reg = (0x7018) }), | |||
| 657 | DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE((u32)((1UL << (14)) + 0))); | |||
| 658 | } | |||
| 659 | ||||
| 660 | static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, | |||
| 661 | struct i915_wa_list *wal) | |||
| 662 | { | |||
| 663 | dg2_ctx_gt_tuning_init(engine, wal); | |||
| 664 | ||||
| 665 | /* Wa_16011186671:dg2_g11 */ | |||
| 666 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) { | |||
| 667 | wa_masked_dis(wal, VFLSKPD((const i915_reg_t){ .reg = (0x62a8) }), DIS_MULT_MISS_RD_SQUASH((u32)((1UL << (0)) + 0))); | |||
| 668 | wa_masked_en(wal, VFLSKPD((const i915_reg_t){ .reg = (0x62a8) }), DIS_OVER_FETCH_CACHE((u32)((1UL << (1)) + 0))); | |||
| 669 | } | |||
| 670 | ||||
| 671 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) { | |||
| 672 | /* Wa_14010469329:dg2_g10 */ | |||
| 673 | wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0x7304) }), | |||
| 674 | XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE((u32)((1UL << (12)) + 0))); | |||
| 675 | ||||
| 676 | /* | |||
| 677 | * Wa_22010465075:dg2_g10 | |||
| 678 | * Wa_22010613112:dg2_g10 | |||
| 679 | * Wa_14010698770:dg2_g10 | |||
| 680 | */ | |||
| 681 | wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0x7304) }), | |||
| 682 | GEN12_DISABLE_CPS_AWARE_COLOR_PIPE((u32)((1UL << (9)) + 0))); | |||
| 683 | } | |||
| 684 | ||||
| 685 | /* Wa_16013271637:dg2 */ | |||
| 686 | wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1((const i915_reg_t){ .reg = (0x731c) }), | |||
| 687 | MSC_MSAA_REODER_BUF_BYPASS_DISABLE((u32)((1UL << (14)) + 0))); | |||
| 688 | ||||
| 689 | /* Wa_14014947963:dg2 */ | |||
| 690 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_B0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_FOREVER))) || | |||
| 691 | IS_DG2_G11(engine->i915)IS_SUBPLATFORM(engine->i915, INTEL_DG2, 1) || IS_DG2_G12(engine->i915)IS_SUBPLATFORM(engine->i915, INTEL_DG2, 2)) | |||
| 692 | wa_masked_field_set(wal, VF_PREEMPTION((const i915_reg_t){ .reg = (0x83a4) }), PREEMPTION_VERTEX_COUNT((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (0))) + 0)), 0x4000); | |||
| 693 | ||||
| 694 | /* Wa_15010599737:dg2 */ | |||
| 695 | wa_masked_en(wal, CHICKEN_RASTER_1((const i915_reg_t){ .reg = (0x6204) }), DIS_SF_ROUND_NEAREST_EVEN((u32)((1UL << (8)) + 0))); | |||
| 696 | } | |||
| 697 | ||||
| 698 | static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, | |||
| 699 | struct i915_wa_list *wal) | |||
| 700 | { | |||
| 701 | /* | |||
| 702 | * This is a "fake" workaround defined by software to ensure we | |||
| 703 | * maintain reliable, backward-compatible behavior for userspace with | |||
| 704 | * regards to how nested MI_BATCH_BUFFER_START commands are handled. | |||
| 705 | * | |||
| 706 | * The per-context setting of MI_MODE[12] determines whether the bits | |||
| 707 | * of a nested MI_BATCH_BUFFER_START instruction should be interpreted | |||
| 708 | * in the traditional manner or whether they should instead use a new | |||
| 709 | * tgl+ meaning that breaks backward compatibility, but allows nesting | |||
| 710 | * into 3rd-level batchbuffers. When this new capability was first | |||
| 711 | * added in TGL, it remained off by default unless a context | |||
| 712 | * intentionally opted in to the new behavior. However Xe_HPG now | |||
| 713 | * flips this on by default and requires that we explicitly opt out if | |||
| 714 | * we don't want the new behavior. | |||
| 715 | * | |||
| 716 | * From a SW perspective, we want to maintain the backward-compatible | |||
| 717 | * behavior for userspace, so we'll apply a fake workaround to set it | |||
| 718 | * back to the legacy behavior on platforms where the hardware default | |||
| 719 | * is to break compatibility. At the moment there is no Linux | |||
| 720 | * userspace that utilizes third-level batchbuffers, so this will avoid | |||
| 721 | * userspace from needing to make any changes. using the legacy | |||
| 722 | * meaning is the correct thing to do. If/when we have userspace | |||
| 723 | * consumers that want to utilize third-level batch nesting, we can | |||
| 724 | * provide a context parameter to allow them to opt-in. | |||
| 725 | */ | |||
| 726 | wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x9c) } ), TGL_NESTED_BB_EN((u32)((1UL << (12)) + 0))); | |||
| 727 | } | |||
| 728 | ||||
| 729 | static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine, | |||
| 730 | struct i915_wa_list *wal) | |||
| 731 | { | |||
| 732 | u8 mocs; | |||
| 733 | ||||
| 734 | /* | |||
| 735 | * Some blitter commands do not have a field for MOCS, those | |||
| 736 | * commands will use MOCS index pointed by BLIT_CCTL. | |||
| 737 | * BLIT_CCTL registers are needed to be programmed to un-cached. | |||
| 738 | */ | |||
| 739 | if (engine->class == COPY_ENGINE_CLASS3) { | |||
| 740 | mocs = engine->gt->mocs.uc_index; | |||
| 741 | wa_write_clr_set(wal, | |||
| 742 | BLIT_CCTL(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x204) } ), | |||
| 743 | BLIT_CCTL_MASK(((u32)((((~0UL) >> (64 - (14) - 1)) & ((~0UL) << (8))) + 0)) | ((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (0))) + 0))), | |||
| 744 | BLIT_CCTL_MOCS(mocs, mocs)(((u32)((((typeof(((u32)((((~0UL) >> (64 - (14) - 1)) & ((~0UL) << (8))) + 0))))((mocs) << 1) << ( __builtin_ffsll(((u32)((((~0UL) >> (64 - (14) - 1)) & ((~0UL) << (8))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (14) - 1)) & ((~0UL) << (8))) + 0)))) + 0 + 0 + 0 + 0)) | ((u32)((((typeof(((u32)((((~0UL) >> (64 - ( 6) - 1)) & ((~0UL) << (0))) + 0))))((mocs) << 1) << (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 6) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32 )((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (0 ))) + 0)))) + 0 + 0 + 0 + 0)))); | |||
| 745 | } | |||
| 746 | } | |||
| 747 | ||||
| 748 | /* | |||
| 749 | * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround | |||
| 750 | * defined by the hardware team, but it programming general context registers. | |||
| 751 | * Adding those context register programming in context workaround | |||
| 752 | * allow us to use the wa framework for proper application and validation. | |||
| 753 | */ | |||
| 754 | static void | |||
| 755 | gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine, | |||
| 756 | struct i915_wa_list *wal) | |||
| 757 | { | |||
| 758 | if (GRAPHICS_VER_FULL(engine->i915)(((&(engine->i915)->__runtime)->graphics.ip.ver) << 8 | ((&(engine->i915)->__runtime)->graphics .ip.rel)) >= IP_VER(12, 55)((12) << 8 | (55))) | |||
| 759 | fakewa_disable_nestedbb_mode(engine, wal); | |||
| 760 | ||||
| 761 | gen12_ctx_gt_mocs_init(engine, wal); | |||
| 762 | } | |||
| 763 | ||||
| 764 | static void | |||
| 765 | __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, | |||
| 766 | struct i915_wa_list *wal, | |||
| 767 | const char *name) | |||
| 768 | { | |||
| 769 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 770 | ||||
| 771 | wa_init_start(wal, name, engine->name); | |||
| 772 | ||||
| 773 | /* Applies to all engines */ | |||
| 774 | /* | |||
| 775 | * Fake workarounds are not the actual workaround but | |||
| 776 | * programming of context registers using workaround framework. | |||
| 777 | */ | |||
| 778 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) | |||
| 779 | gen12_ctx_gt_fake_wa_init(engine, wal); | |||
| 780 | ||||
| 781 | if (engine->class != RENDER_CLASS0) | |||
| 782 | goto done; | |||
| 783 | ||||
| 784 | if (IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) | |||
| 785 | ; /* noop; none at this time */ | |||
| 786 | else if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) | |||
| 787 | dg2_ctx_workarounds_init(engine, wal); | |||
| 788 | else if (IS_XEHPSDV(i915)IS_PLATFORM(i915, INTEL_XEHPSDV)) | |||
| 789 | ; /* noop; none at this time */ | |||
| 790 | else if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1)) | |||
| 791 | dg1_ctx_workarounds_init(engine, wal); | |||
| 792 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12) | |||
| 793 | gen12_ctx_workarounds_init(engine, wal); | |||
| 794 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) | |||
| 795 | icl_ctx_workarounds_init(engine, wal); | |||
| 796 | else if (IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) | |||
| 797 | cfl_ctx_workarounds_init(engine, wal); | |||
| 798 | else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE)) | |||
| 799 | glk_ctx_workarounds_init(engine, wal); | |||
| 800 | else if (IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE)) | |||
| 801 | kbl_ctx_workarounds_init(engine, wal); | |||
| 802 | else if (IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) | |||
| 803 | bxt_ctx_workarounds_init(engine, wal); | |||
| 804 | else if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE)) | |||
| 805 | skl_ctx_workarounds_init(engine, wal); | |||
| 806 | else if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) | |||
| 807 | chv_ctx_workarounds_init(engine, wal); | |||
| 808 | else if (IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) | |||
| 809 | bdw_ctx_workarounds_init(engine, wal); | |||
| 810 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) | |||
| 811 | gen7_ctx_workarounds_init(engine, wal); | |||
| 812 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) | |||
| 813 | gen6_ctx_workarounds_init(engine, wal); | |||
| 814 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 8) | |||
| 815 | ; | |||
| 816 | else | |||
| 817 | MISSING_CASE(GRAPHICS_VER(i915))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(i915)->__runtime)->graphics.ip.ver)", (long) (((&(i915)->__runtime)->graphics.ip.ver))); __builtin_expect (!!(__ret), 0); }); | |||
| 818 | ||||
| 819 | done: | |||
| 820 | wa_init_finish(wal); | |||
| 821 | } | |||
| 822 | ||||
| 823 | void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) | |||
| 824 | { | |||
| 825 | __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context"); | |||
| 826 | } | |||
| 827 | ||||
| 828 | int intel_engine_emit_ctx_wa(struct i915_request *rq) | |||
| 829 | { | |||
| 830 | struct i915_wa_list *wal = &rq->engine->ctx_wa_list; | |||
| 831 | struct i915_wa *wa; | |||
| 832 | unsigned int i; | |||
| 833 | u32 *cs; | |||
| 834 | int ret; | |||
| 835 | ||||
| 836 | if (wal->count == 0) | |||
| 837 | return 0; | |||
| 838 | ||||
| 839 | ret = rq->engine->emit_flush(rq, EMIT_BARRIER((1UL << (0)) | (1UL << (1)))); | |||
| 840 | if (ret) | |||
| 841 | return ret; | |||
| 842 | ||||
| 843 | cs = intel_ring_begin(rq, (wal->count * 2 + 2)); | |||
| 844 | if (IS_ERR(cs)) | |||
| 845 | return PTR_ERR(cs); | |||
| 846 | ||||
| 847 | *cs++ = MI_LOAD_REGISTER_IMM(wal->count)(((0x0) << 29) | (0x22) << 23 | (2*(wal->count )-1)); | |||
| 848 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | |||
| 849 | *cs++ = i915_mmio_reg_offset(wa->reg); | |||
| 850 | *cs++ = wa->set; | |||
| 851 | } | |||
| 852 | *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0)); | |||
| 853 | ||||
| 854 | intel_ring_advance(rq, cs); | |||
| 855 | ||||
| 856 | ret = rq->engine->emit_flush(rq, EMIT_BARRIER((1UL << (0)) | (1UL << (1)))); | |||
| 857 | if (ret) | |||
| 858 | return ret; | |||
| 859 | ||||
| 860 | return 0; | |||
| 861 | } | |||
| 862 | ||||
| 863 | static void | |||
| 864 | gen4_gt_workarounds_init(struct intel_gt *gt, | |||
| 865 | struct i915_wa_list *wal) | |||
| 866 | { | |||
| 867 | /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */ | |||
| 868 | wa_masked_dis(wal, CACHE_MODE_0((const i915_reg_t){ .reg = (0x2120) }), RC_OP_FLUSH_ENABLE(1 << 0)); | |||
| 869 | } | |||
| 870 | ||||
| 871 | static void | |||
| 872 | g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 873 | { | |||
| 874 | gen4_gt_workarounds_init(gt, wal); | |||
| 875 | ||||
| 876 | /* WaDisableRenderCachePipelinedFlush:g4x,ilk */ | |||
| 877 | wa_masked_en(wal, CACHE_MODE_0((const i915_reg_t){ .reg = (0x2120) }), CM0_PIPELINED_RENDER_FLUSH_DISABLE(1 << 8)); | |||
| 878 | } | |||
| 879 | ||||
| 880 | static void | |||
| 881 | ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 882 | { | |||
| 883 | g4x_gt_workarounds_init(gt, wal); | |||
| 884 | ||||
| 885 | wa_masked_en(wal, _3D_CHICKEN2((const i915_reg_t){ .reg = (0x208c) }), _3D_CHICKEN2_WM_READ_PIPELINED(1 << 14)); | |||
| 886 | } | |||
| 887 | ||||
| 888 | static void | |||
| 889 | snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 890 | { | |||
| 891 | } | |||
| 892 | ||||
| 893 | static void | |||
| 894 | ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 895 | { | |||
| 896 | /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ | |||
| 897 | wa_masked_dis(wal, | |||
| 898 | GEN7_COMMON_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0x7010) }), | |||
| 899 | GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC(1 << 10)); | |||
| 900 | ||||
| 901 | /* WaApplyL3ControlAndL3ChickenMode:ivb */ | |||
| 902 | wa_write(wal, GEN7_L3CNTLREG1((const i915_reg_t){ .reg = (0xb01c) }), GEN7_WA_FOR_GEN7_L3_CONTROL0x3C47FF8C); | |||
| 903 | wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER((const i915_reg_t){ .reg = (0xb030) }), GEN7_WA_L3_CHICKEN_MODE0x20000000); | |||
| 904 | ||||
| 905 | /* WaForceL3Serialization:ivb */ | |||
| 906 | wa_write_clr(wal, GEN7_L3SQCREG4((const i915_reg_t){ .reg = (0xb034) }), L3SQ_URB_READ_CAM_MATCH_DISABLE(1 << 27)); | |||
| 907 | } | |||
| 908 | ||||
| 909 | static void | |||
| 910 | vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 911 | { | |||
| 912 | /* WaForceL3Serialization:vlv */ | |||
| 913 | wa_write_clr(wal, GEN7_L3SQCREG4((const i915_reg_t){ .reg = (0xb034) }), L3SQ_URB_READ_CAM_MATCH_DISABLE(1 << 27)); | |||
| 914 | ||||
| 915 | /* | |||
| 916 | * WaIncreaseL3CreditsForVLVB0:vlv | |||
| 917 | * This is the hardware default actually. | |||
| 918 | */ | |||
| 919 | wa_write(wal, GEN7_L3SQCREG1((const i915_reg_t){ .reg = (0xb010) }), VLV_B0_WA_L3SQCREG1_VALUE0x00D30000); | |||
| 920 | } | |||
| 921 | ||||
| 922 | static void | |||
| 923 | hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 924 | { | |||
| 925 | /* L3 caching of data atomics doesn't work -- disable it. */ | |||
| 926 | wa_write(wal, HSW_SCRATCH1((const i915_reg_t){ .reg = (0xb038) }), HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE(1 << 27)); | |||
| 927 | ||||
| 928 | wa_add(wal, | |||
| 929 | HSW_ROW_CHICKEN3((const i915_reg_t){ .reg = (0xe49c) }), 0, | |||
| 930 | _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)({ typeof((1 << 6)) _a = ((1 << 6)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), | |||
| 931 | 0 /* XXX does this reg exist? */, true1); | |||
| 932 | ||||
| 933 | /* WaVSRefCountFullforceMissDisable:hsw */ | |||
| 934 | wa_write_clr(wal, GEN7_FF_THREAD_MODE((const i915_reg_t){ .reg = (0x20a0) }), GEN7_FF_VS_REF_CNT_FFME(1 << 15)); | |||
| 935 | } | |||
| 936 | ||||
| 937 | static void | |||
| 938 | gen9_wa_init_mcr(struct drm_i915_privateinteldrm_softc *i915, struct i915_wa_list *wal) | |||
| 939 | { | |||
| 940 | const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; | |||
| 941 | unsigned int slice, subslice; | |||
| 942 | u32 mcr, mcr_mask; | |||
| 943 | ||||
| 944 | GEM_BUG_ON(GRAPHICS_VER(i915) != 9)((void)0); | |||
| 945 | ||||
| 946 | /* | |||
| 947 | * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml | |||
| 948 | * Before any MMIO read into slice/subslice specific registers, MCR | |||
| 949 | * packet control register needs to be programmed to point to any | |||
| 950 | * enabled s/ss pair. Otherwise, incorrect values will be returned. | |||
| 951 | * This means each subsequent MMIO read will be forwarded to an | |||
| 952 | * specific s/ss combination, but this is OK since these registers | |||
| 953 | * are consistent across s/ss in almost all cases. In the rare | |||
| 954 | * occasions, such as INSTDONE, where this value is dependent | |||
| 955 | * on s/ss combo, the read should be done with read_subslice_reg. | |||
| 956 | */ | |||
| 957 | slice = ffs(sseu->slice_mask) - 1; | |||
| 958 | GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw))((void)0); | |||
| 959 | subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice)); | |||
| 960 | GEM_BUG_ON(!subslice)((void)0); | |||
| 961 | subslice--; | |||
| 962 | ||||
| 963 | /* | |||
| 964 | * We use GEN8_MCR..() macros to calculate the |mcr| value for | |||
| 965 | * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads | |||
| 966 | */ | |||
| 967 | mcr = GEN8_MCR_SLICE(slice)(((slice) & 3) << 26) | GEN8_MCR_SUBSLICE(subslice)(((subslice) & 3) << 24); | |||
| 968 | mcr_mask = GEN8_MCR_SLICE_MASK(((3) & 3) << 26) | GEN8_MCR_SUBSLICE_MASK(((3) & 3) << 24); | |||
| 969 | ||||
| 970 | drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "MCR slice:%d/subslice:%d = %x\n" , slice, subslice, mcr); | |||
| 971 | ||||
| 972 | wa_write_clr_set(wal, GEN8_MCR_SELECTOR((const i915_reg_t){ .reg = (0xfdc) }), mcr_mask, mcr); | |||
| 973 | } | |||
| 974 | ||||
| 975 | static void | |||
| 976 | gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 977 | { | |||
| 978 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 979 | ||||
| 980 | /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ | |||
| 981 | gen9_wa_init_mcr(i915, wal); | |||
| 982 | ||||
| 983 | /* WaDisableKillLogic:bxt,skl,kbl */ | |||
| 984 | if (!IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) && !IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) | |||
| 985 | wa_write_or(wal, | |||
| 986 | GAM_ECOCHK((const i915_reg_t){ .reg = (0x4090) }), | |||
| 987 | ECOCHK_DIS_TLB(1 << 8)); | |||
| 988 | ||||
| 989 | if (HAS_LLC(i915)((&(i915)->__info)->has_llc)) { | |||
| 990 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl | |||
| 991 | * | |||
| 992 | * Must match Display Engine. See | |||
| 993 | * WaCompressedResourceDisplayNewHashMode. | |||
| 994 | */ | |||
| 995 | wa_write_or(wal, | |||
| 996 | MMCD_MISC_CTRL((const i915_reg_t){ .reg = (0x4ddc) }), | |||
| 997 | MMCD_PCLA(1 << 31) | MMCD_HOTSPOT_EN(1 << 27)); | |||
| 998 | } | |||
| 999 | ||||
| 1000 | /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ | |||
| 1001 | wa_write_or(wal, | |||
| 1002 | GAM_ECOCHK((const i915_reg_t){ .reg = (0x4090) }), | |||
| 1003 | BDW_DISABLE_HDC_INVALIDATION(1 << 25)); | |||
| 1004 | } | |||
| 1005 | ||||
| 1006 | static void | |||
| 1007 | skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1008 | { | |||
| 1009 | gen9_gt_workarounds_init(gt, wal); | |||
| 1010 | ||||
| 1011 | /* WaDisableGafsUnitClkGating:skl */ | |||
| 1012 | wa_write_or(wal, | |||
| 1013 | GEN7_UCGCTL4((const i915_reg_t){ .reg = (0x940c) }), | |||
| 1014 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE(1 << 14)); | |||
| 1015 | ||||
| 1016 | /* WaInPlaceDecompressionHang:skl */ | |||
| 1017 | if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)(IS_PLATFORM(gt->i915, INTEL_SKYLAKE) && (({ int __ret = !!((((&(gt->i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(gt->i915)->drm))->dev), "", "drm_WARN_ON(" "((&(gt->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(gt->i915 )->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(gt->i915)->__runtime)->step.graphics_step) < (STEP_H0)))) | |||
| 1018 | wa_write_or(wal, | |||
| 1019 | GEN9_GAMT_ECO_REG_RW_IA((const i915_reg_t){ .reg = (0x4ab0) }), | |||
| 1020 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS(1 << 18)); | |||
| 1021 | } | |||
| 1022 | ||||
| 1023 | static void | |||
| 1024 | kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1025 | { | |||
| 1026 | gen9_gt_workarounds_init(gt, wal); | |||
| 1027 | ||||
| 1028 | /* WaDisableDynamicCreditSharing:kbl */ | |||
| 1029 | if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)(IS_PLATFORM(gt->i915, INTEL_KABYLAKE) && (({ int __ret = !!((((&(gt->i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(gt->i915)->drm))->dev), "", "drm_WARN_ON(" "((&(gt->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(gt->i915 )->__runtime)->step.graphics_step) >= (0) && ((&(gt->i915)->__runtime)->step.graphics_step) < (STEP_C0)))) | |||
| 1030 | wa_write_or(wal, | |||
| 1031 | GAMT_CHKN_BIT_REG((const i915_reg_t){ .reg = (0x4ab8) }), | |||
| 1032 | GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING(1 << 28)); | |||
| 1033 | ||||
| 1034 | /* WaDisableGafsUnitClkGating:kbl */ | |||
| 1035 | wa_write_or(wal, | |||
| 1036 | GEN7_UCGCTL4((const i915_reg_t){ .reg = (0x940c) }), | |||
| 1037 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE(1 << 14)); | |||
| 1038 | ||||
| 1039 | /* WaInPlaceDecompressionHang:kbl */ | |||
| 1040 | wa_write_or(wal, | |||
| 1041 | GEN9_GAMT_ECO_REG_RW_IA((const i915_reg_t){ .reg = (0x4ab0) }), | |||
| 1042 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS(1 << 18)); | |||
| 1043 | } | |||
| 1044 | ||||
| 1045 | static void | |||
| 1046 | glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1047 | { | |||
| 1048 | gen9_gt_workarounds_init(gt, wal); | |||
| 1049 | } | |||
| 1050 | ||||
| 1051 | static void | |||
| 1052 | cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1053 | { | |||
| 1054 | gen9_gt_workarounds_init(gt, wal); | |||
| 1055 | ||||
| 1056 | /* WaDisableGafsUnitClkGating:cfl */ | |||
| 1057 | wa_write_or(wal, | |||
| 1058 | GEN7_UCGCTL4((const i915_reg_t){ .reg = (0x940c) }), | |||
| 1059 | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE(1 << 14)); | |||
| 1060 | ||||
| 1061 | /* WaInPlaceDecompressionHang:cfl */ | |||
| 1062 | wa_write_or(wal, | |||
| 1063 | GEN9_GAMT_ECO_REG_RW_IA((const i915_reg_t){ .reg = (0x4ab0) }), | |||
| 1064 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS(1 << 18)); | |||
| 1065 | } | |||
| 1066 | ||||
| 1067 | static void __set_mcr_steering(struct i915_wa_list *wal, | |||
| 1068 | i915_reg_t steering_reg, | |||
| 1069 | unsigned int slice, unsigned int subslice) | |||
| 1070 | { | |||
| 1071 | u32 mcr, mcr_mask; | |||
| 1072 | ||||
| 1073 | mcr = GEN11_MCR_SLICE(slice)(((slice) & 0xf) << 27) | GEN11_MCR_SUBSLICE(subslice)(((subslice) & 0x7) << 24); | |||
| 1074 | mcr_mask = GEN11_MCR_SLICE_MASK(((0xf) & 0xf) << 27) | GEN11_MCR_SUBSLICE_MASK(((0x7) & 0x7) << 24); | |||
| 1075 | ||||
| 1076 | wa_write_clr_set(wal, steering_reg, mcr_mask, mcr); | |||
| 1077 | } | |||
| 1078 | ||||
| 1079 | static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal, | |||
| 1080 | unsigned int slice, unsigned int subslice) | |||
| 1081 | { | |||
| 1082 | struct drm_printer p = drm_debug_printer("MCR Steering:"); | |||
| 1083 | ||||
| 1084 | __set_mcr_steering(wal, GEN8_MCR_SELECTOR((const i915_reg_t){ .reg = (0xfdc) }), slice, subslice); | |||
| 1085 | ||||
| 1086 | gt->default_steering.groupid = slice; | |||
| 1087 | gt->default_steering.instanceid = subslice; | |||
| 1088 | ||||
| 1089 | if (drm_debug_enabled(DRM_UT_DRIVER)drm_debug_enabled_raw(DRM_UT_DRIVER)) | |||
| 1090 | intel_gt_mcr_report_steering(&p, gt, false0); | |||
| 1091 | } | |||
| 1092 | ||||
| 1093 | static void | |||
| 1094 | icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1095 | { | |||
| 1096 | const struct sseu_dev_info *sseu = >->info.sseu; | |||
| 1097 | unsigned int subslice; | |||
| 1098 | ||||
| 1099 | GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11)((void)0); | |||
| 1100 | GEM_BUG_ON(hweight8(sseu->slice_mask) > 1)((void)0); | |||
| 1101 | ||||
| 1102 | /* | |||
| 1103 | * Although a platform may have subslices, we need to always steer | |||
| 1104 | * reads to the lowest instance that isn't fused off. When Render | |||
| 1105 | * Power Gating is enabled, grabbing forcewake will only power up a | |||
| 1106 | * single subslice (the "minconfig") if there isn't a real workload | |||
| 1107 | * that needs to be run; this means that if we steer register reads to | |||
| 1108 | * one of the higher subslices, we run the risk of reading back 0's or | |||
| 1109 | * random garbage. | |||
| 1110 | */ | |||
| 1111 | subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0))__builtin_ctzl(intel_sseu_get_hsw_subslices(sseu, 0)); | |||
| 1112 | ||||
| 1113 | /* | |||
| 1114 | * If the subslice we picked above also steers us to a valid L3 bank, | |||
| 1115 | * then we can just rely on the default steering and won't need to | |||
| 1116 | * worry about explicitly re-steering L3BANK reads later. | |||
| 1117 | */ | |||
| 1118 | if (gt->info.l3bank_mask & BIT(subslice)(1UL << (subslice))) | |||
| 1119 | gt->steering_table[L3BANK] = NULL((void *)0); | |||
| 1120 | ||||
| 1121 | __add_mcr_wa(gt, wal, 0, subslice); | |||
| 1122 | } | |||
| 1123 | ||||
| 1124 | static void | |||
| 1125 | xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1126 | { | |||
| 1127 | const struct sseu_dev_info *sseu = >->info.sseu; | |||
| 1128 | unsigned long slice, subslice = 0, slice_mask = 0; | |||
| 1129 | u32 lncf_mask = 0; | |||
| 1130 | int i; | |||
| 1131 | ||||
| 1132 | /* | |||
| 1133 | * On Xe_HP the steering increases in complexity. There are now several | |||
| 1134 | * more units that require steering and we're not guaranteed to be able | |||
| 1135 | * to find a common setting for all of them. These are: | |||
| 1136 | * - GSLICE (fusable) | |||
| 1137 | * - DSS (sub-unit within gslice; fusable) | |||
| 1138 | * - L3 Bank (fusable) | |||
| 1139 | * - MSLICE (fusable) | |||
| 1140 | * - LNCF (sub-unit within mslice; always present if mslice is present) | |||
| 1141 | * | |||
| 1142 | * We'll do our default/implicit steering based on GSLICE (in the | |||
| 1143 | * sliceid field) and DSS (in the subsliceid field). If we can | |||
| 1144 | * find overlap between the valid MSLICE and/or LNCF values with | |||
| 1145 | * a suitable GSLICE, then we can just re-use the default value and | |||
| 1146 | * skip and explicit steering at runtime. | |||
| 1147 | * | |||
| 1148 | * We only need to look for overlap between GSLICE/MSLICE/LNCF to find | |||
| 1149 | * a valid sliceid value. DSS steering is the only type of steering | |||
| 1150 | * that utilizes the 'subsliceid' bits. | |||
| 1151 | * | |||
| 1152 | * Also note that, even though the steering domain is called "GSlice" | |||
| 1153 | * and it is encoded in the register using the gslice format, the spec | |||
| 1154 | * says that the combined (geometry | compute) fuse should be used to | |||
| 1155 | * select the steering. | |||
| 1156 | */ | |||
| 1157 | ||||
| 1158 | /* Find the potential gslice candidates */ | |||
| 1159 | slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask, | |||
| 1160 | GEN_DSS_PER_GSLICE4); | |||
| 1161 | ||||
| 1162 | /* | |||
| 1163 | * Find the potential LNCF candidates. Either LNCF within a valid | |||
| 1164 | * mslice is fine. | |||
| 1165 | */ | |||
| 1166 | for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)for ((i) = find_first_bit((>->info.mslice_mask), (4) ); (i) < (4); (i) = find_next_bit((>->info.mslice_mask ), (4), (i) + 1)) | |||
| 1167 | lncf_mask |= (0x3 << (i * 2)); | |||
| 1168 | ||||
| 1169 | /* | |||
| 1170 | * Are there any sliceid values that work for both GSLICE and LNCF | |||
| 1171 | * steering? | |||
| 1172 | */ | |||
| 1173 | if (slice_mask & lncf_mask) { | |||
| 1174 | slice_mask &= lncf_mask; | |||
| 1175 | gt->steering_table[LNCF] = NULL((void *)0); | |||
| 1176 | } | |||
| 1177 | ||||
| 1178 | /* How about sliceid values that also work for MSLICE steering? */ | |||
| 1179 | if (slice_mask & gt->info.mslice_mask) { | |||
| 1180 | slice_mask &= gt->info.mslice_mask; | |||
| 1181 | gt->steering_table[MSLICE] = NULL((void *)0); | |||
| 1182 | } | |||
| 1183 | ||||
| 1184 | slice = __ffs(slice_mask)__builtin_ctzl(slice_mask); | |||
| 1185 | subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE4, slice) % | |||
| 1186 | GEN_DSS_PER_GSLICE4; | |||
| 1187 | ||||
| 1188 | __add_mcr_wa(gt, wal, slice, subslice); | |||
| 1189 | ||||
| 1190 | /* | |||
| 1191 | * SQIDI ranges are special because they use different steering | |||
| 1192 | * registers than everything else we work with. On XeHP SDV and | |||
| 1193 | * DG2-G10, any value in the steering registers will work fine since | |||
| 1194 | * all instances are present, but DG2-G11 only has SQIDI instances at | |||
| 1195 | * ID's 2 and 3, so we need to steer to one of those. For simplicity | |||
| 1196 | * we'll just steer to a hardcoded "2" since that value will work | |||
| 1197 | * everywhere. | |||
| 1198 | */ | |||
| 1199 | __set_mcr_steering(wal, MCFG_MCR_SELECTOR((const i915_reg_t){ .reg = (0xfd0) }), 0, 2); | |||
| 1200 | __set_mcr_steering(wal, SF_MCR_SELECTOR((const i915_reg_t){ .reg = (0xfd8) }), 0, 2); | |||
| 1201 | } | |||
| 1202 | ||||
| 1203 | static void | |||
| 1204 | pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1205 | { | |||
| 1206 | unsigned int dss; | |||
| 1207 | ||||
| 1208 | /* | |||
| 1209 | * Setup implicit steering for COMPUTE and DSS ranges to the first | |||
| 1210 | * non-fused-off DSS. All other types of MCR registers will be | |||
| 1211 | * explicitly steered. | |||
| 1212 | */ | |||
| 1213 | dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0); | |||
| 1214 | __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE8, dss % GEN_DSS_PER_CSLICE8); | |||
| 1215 | } | |||
| 1216 | ||||
| 1217 | static void | |||
| 1218 | icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1219 | { | |||
| 1220 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 1221 | ||||
| 1222 | icl_wa_init_mcr(gt, wal); | |||
| 1223 | ||||
| 1224 | /* WaModifyGamTlbPartitioning:icl */ | |||
| 1225 | wa_write_clr_set(wal, | |||
| 1226 | GEN11_GACB_PERF_CTRL((const i915_reg_t){ .reg = (0x4b80) }), | |||
| 1227 | GEN11_HASH_CTRL_MASK(0x3 << 12 | 0xf << 0), | |||
| 1228 | GEN11_HASH_CTRL_BIT0(1 << 0) | GEN11_HASH_CTRL_BIT4(1 << 12)); | |||
| 1229 | ||||
| 1230 | /* Wa_1405766107:icl | |||
| 1231 | * Formerly known as WaCL2SFHalfMaxAlloc | |||
| 1232 | */ | |||
| 1233 | wa_write_or(wal, | |||
| 1234 | GEN11_LSN_UNSLCVC((const i915_reg_t){ .reg = (0xb43c) }), | |||
| 1235 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC(1 << 7) | | |||
| 1236 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC(1 << 9)); | |||
| 1237 | ||||
| 1238 | /* Wa_220166154:icl | |||
| 1239 | * Formerly known as WaDisCtxReload | |||
| 1240 | */ | |||
| 1241 | wa_write_or(wal, | |||
| 1242 | GEN8_GAMW_ECO_DEV_RW_IA((const i915_reg_t){ .reg = (0x4080) }), | |||
| 1243 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE(1 << 7)); | |||
| 1244 | ||||
| 1245 | /* Wa_1406463099:icl | |||
| 1246 | * Formerly known as WaGamTlbPendError | |||
| 1247 | */ | |||
| 1248 | wa_write_or(wal, | |||
| 1249 | GAMT_CHKN_BIT_REG((const i915_reg_t){ .reg = (0x4ab8) }), | |||
| 1250 | GAMT_CHKN_DISABLE_L3_COH_PIPE(1 << 31)); | |||
| 1251 | ||||
| 1252 | /* | |||
| 1253 | * Wa_1408615072:icl,ehl (vsunit) | |||
| 1254 | * Wa_1407596294:icl,ehl (hsunit) | |||
| 1255 | */ | |||
| 1256 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9434) }), | |||
| 1257 | VSUNIT_CLKGATE_DIS((u32)((1UL << (3)) + 0)) | HSUNIT_CLKGATE_DIS((u32)((1UL << (8)) + 0))); | |||
| 1258 | ||||
| 1259 | /* Wa_1407352427:icl,ehl */ | |||
| 1260 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2((const i915_reg_t){ .reg = (0x94e4) }), | |||
| 1261 | PSDUNIT_CLKGATE_DIS((u32)((1UL << (5)) + 0))); | |||
| 1262 | ||||
| 1263 | /* Wa_1406680159:icl,ehl */ | |||
| 1264 | wa_write_or(wal, | |||
| 1265 | SUBSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9524) }), | |||
| 1266 | GWUNIT_CLKGATE_DIS((u32)((1UL << (16)) + 0))); | |||
| 1267 | ||||
| 1268 | /* Wa_1607087056:icl,ehl,jsl */ | |||
| 1269 | if (IS_ICELAKE(i915)IS_PLATFORM(i915, INTEL_ICELAKE) || | |||
| 1270 | IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)((IS_PLATFORM(i915, INTEL_JASPERLAKE) || IS_PLATFORM(i915, INTEL_ELKHARTLAKE )) && (({ int __ret = !!((((&(i915)->__runtime )->step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(i915)->drm))->dev), "" , "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1271 | wa_write_or(wal, | |||
| 1272 | SLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x94d4) }), | |||
| 1273 | L3_CLKGATE_DIS((u32)((1UL << (16)) + 0)) | L3_CR2X_CLKGATE_DIS((u32)((1UL << (17)) + 0))); | |||
| 1274 | ||||
| 1275 | /* | |||
| 1276 | * This is not a documented workaround, but rather an optimization | |||
| 1277 | * to reduce sampler power. | |||
| 1278 | */ | |||
| 1279 | wa_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN((const i915_reg_t){ .reg = (0x9550) }), DFR_DISABLE(1 << 9)); | |||
| 1280 | } | |||
| 1281 | ||||
| 1282 | /* | |||
| 1283 | * Though there are per-engine instances of these registers, | |||
| 1284 | * they retain their value through engine resets and should | |||
| 1285 | * only be provided on the GT workaround list rather than | |||
| 1286 | * the engine-specific workaround list. | |||
| 1287 | */ | |||
| 1288 | static void | |||
| 1289 | wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1290 | { | |||
| 1291 | struct intel_engine_cs *engine; | |||
| 1292 | int id; | |||
| 1293 | ||||
| 1294 | for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine ) = (gt)->engine[(id)])) {} else { | |||
| 1295 | if (engine->class != VIDEO_DECODE_CLASS1 || | |||
| 1296 | (engine->instance % 2)) | |||
| 1297 | continue; | |||
| 1298 | ||||
| 1299 | wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x3f10) }), | |||
| 1300 | IECPUNIT_CLKGATE_DIS((u32)((1UL << (22)) + 0))); | |||
| 1301 | } | |||
| 1302 | } | |||
| 1303 | ||||
| 1304 | static void | |||
| 1305 | gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1306 | { | |||
| 1307 | icl_wa_init_mcr(gt, wal); | |||
| 1308 | ||||
| 1309 | /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */ | |||
| 1310 | wa_14011060649(gt, wal); | |||
| 1311 | ||||
| 1312 | /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */ | |||
| 1313 | wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN((const i915_reg_t){ .reg = (0x9550) }), DFR_DISABLE(1 << 9)); | |||
| 1314 | } | |||
| 1315 | ||||
| 1316 | static void | |||
| 1317 | tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1318 | { | |||
| 1319 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 1320 | ||||
| 1321 | gen12_gt_workarounds_init(gt, wal); | |||
| 1322 | ||||
| 1323 | /* Wa_1409420604:tgl */ | |||
| 1324 | if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, (0)) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1325 | wa_write_or(wal, | |||
| 1326 | SUBSLICE_UNIT_LEVEL_CLKGATE2((const i915_reg_t){ .reg = (0x9528) }), | |||
| 1327 | CPSSUNIT_CLKGATE_DIS((u32)((1UL << (9)) + 0))); | |||
| 1328 | ||||
| 1329 | /* Wa_1607087056:tgl also know as BUG:1409180338 */ | |||
| 1330 | if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, (0)) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1331 | wa_write_or(wal, | |||
| 1332 | SLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x94d4) }), | |||
| 1333 | L3_CLKGATE_DIS((u32)((1UL << (16)) + 0)) | L3_CR2X_CLKGATE_DIS((u32)((1UL << (17)) + 0))); | |||
| 1334 | ||||
| 1335 | /* Wa_1408615072:tgl[a0] */ | |||
| 1336 | if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, (0)) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1337 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2((const i915_reg_t){ .reg = (0x94e4) }), | |||
| 1338 | VSUNIT_CLKGATE_DIS_TGL((u32)((1UL << (19)) + 0))); | |||
| 1339 | } | |||
| 1340 | ||||
| 1341 | static void | |||
| 1342 | dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1343 | { | |||
| 1344 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 1345 | ||||
| 1346 | gen12_gt_workarounds_init(gt, wal); | |||
| 1347 | ||||
| 1348 | /* Wa_1607087056:dg1 */ | |||
| 1349 | if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_PLATFORM(i915, INTEL_DG1) && (({ int __ret = !!(( ((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1350 | wa_write_or(wal, | |||
| 1351 | SLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x94d4) }), | |||
| 1352 | L3_CLKGATE_DIS((u32)((1UL << (16)) + 0)) | L3_CR2X_CLKGATE_DIS((u32)((1UL << (17)) + 0))); | |||
| 1353 | ||||
| 1354 | /* Wa_1409420604:dg1 */ | |||
| 1355 | if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1)) | |||
| 1356 | wa_write_or(wal, | |||
| 1357 | SUBSLICE_UNIT_LEVEL_CLKGATE2((const i915_reg_t){ .reg = (0x9528) }), | |||
| 1358 | CPSSUNIT_CLKGATE_DIS((u32)((1UL << (9)) + 0))); | |||
| 1359 | ||||
| 1360 | /* Wa_1408615072:dg1 */ | |||
| 1361 | /* Empirical testing shows this register is unaffected by engine reset. */ | |||
| 1362 | if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1)) | |||
| 1363 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2((const i915_reg_t){ .reg = (0x94e4) }), | |||
| 1364 | VSUNIT_CLKGATE_DIS_TGL((u32)((1UL << (19)) + 0))); | |||
| 1365 | } | |||
| 1366 | ||||
| 1367 | static void | |||
| 1368 | xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1369 | { | |||
| 1370 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 1371 | ||||
| 1372 | xehp_init_mcr(gt, wal); | |||
| 1373 | ||||
| 1374 | /* Wa_1409757795:xehpsdv */ | |||
| 1375 | wa_write_or(wal, SCCGCTL94DC((const i915_reg_t){ .reg = (0x94dc) }), CG3DDISURB((u32)((1UL << (14)) + 0))); | |||
| 1376 | ||||
| 1377 | /* Wa_16011155590:xehpsdv */ | |||
| 1378 | if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_PLATFORM(i915, INTEL_XEHPSDV) && (({ int __ret = ! !((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 1379 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9434) }), | |||
| 1380 | TSGUNIT_CLKGATE_DIS((u32)((1UL << (17)) + 0))); | |||
| 1381 | ||||
| 1382 | /* Wa_14011780169:xehpsdv */ | |||
| 1383 | if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)(IS_PLATFORM(i915, INTEL_XEHPSDV) && (({ int __ret = ! !((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER )))) { | |||
| 1384 | wa_write_or(wal, UNSLCGCTL9440((const i915_reg_t){ .reg = (0x9440) }), GAMTLBOACS_CLKGATE_DIS((u32)((1UL << (28)) + 0)) | | |||
| 1385 | GAMTLBVDBOX7_CLKGATE_DIS((u32)((1UL << (22)) + 0)) | | |||
| 1386 | GAMTLBVDBOX6_CLKGATE_DIS((u32)((1UL << (26)) + 0)) | | |||
| 1387 | GAMTLBVDBOX5_CLKGATE_DIS((u32)((1UL << (27)) + 0)) | | |||
| 1388 | GAMTLBVDBOX4_CLKGATE_DIS((u32)((1UL << (23)) + 0)) | | |||
| 1389 | GAMTLBVDBOX3_CLKGATE_DIS((u32)((1UL << (24)) + 0)) | | |||
| 1390 | GAMTLBVDBOX2_CLKGATE_DIS((u32)((1UL << (21)) + 0)) | | |||
| 1391 | GAMTLBVDBOX1_CLKGATE_DIS((u32)((1UL << (6)) + 0)) | | |||
| 1392 | GAMTLBVDBOX0_CLKGATE_DIS((u32)((1UL << (17)) + 0)) | | |||
| 1393 | GAMTLBKCR_CLKGATE_DIS((u32)((1UL << (16)) + 0)) | | |||
| 1394 | GAMTLBGUC_CLKGATE_DIS((u32)((1UL << (15)) + 0)) | | |||
| 1395 | GAMTLBBLT_CLKGATE_DIS((u32)((1UL << (14)) + 0))); | |||
| 1396 | wa_write_or(wal, UNSLCGCTL9444((const i915_reg_t){ .reg = (0x9444) }), GAMTLBGFXA0_CLKGATE_DIS((u32)((1UL << (30)) + 0)) | | |||
| 1397 | GAMTLBGFXA1_CLKGATE_DIS((u32)((1UL << (29)) + 0)) | | |||
| 1398 | GAMTLBCOMPA0_CLKGATE_DIS((u32)((1UL << (28)) + 0)) | | |||
| 1399 | GAMTLBCOMPA1_CLKGATE_DIS((u32)((1UL << (27)) + 0)) | | |||
| 1400 | GAMTLBCOMPB0_CLKGATE_DIS((u32)((1UL << (26)) + 0)) | | |||
| 1401 | GAMTLBCOMPB1_CLKGATE_DIS((u32)((1UL << (25)) + 0)) | | |||
| 1402 | GAMTLBCOMPC0_CLKGATE_DIS((u32)((1UL << (24)) + 0)) | | |||
| 1403 | GAMTLBCOMPC1_CLKGATE_DIS((u32)((1UL << (23)) + 0)) | | |||
| 1404 | GAMTLBCOMPD0_CLKGATE_DIS((u32)((1UL << (22)) + 0)) | | |||
| 1405 | GAMTLBCOMPD1_CLKGATE_DIS((u32)((1UL << (21)) + 0)) | | |||
| 1406 | GAMTLBMERT_CLKGATE_DIS((u32)((1UL << (20)) + 0)) | | |||
| 1407 | GAMTLBVEBOX3_CLKGATE_DIS((u32)((1UL << (19)) + 0)) | | |||
| 1408 | GAMTLBVEBOX2_CLKGATE_DIS((u32)((1UL << (18)) + 0)) | | |||
| 1409 | GAMTLBVEBOX1_CLKGATE_DIS((u32)((1UL << (17)) + 0)) | | |||
| 1410 | GAMTLBVEBOX0_CLKGATE_DIS((u32)((1UL << (16)) + 0))); | |||
| 1411 | } | |||
| 1412 | ||||
| 1413 | /* Wa_16012725990:xehpsdv */ | |||
| 1414 | if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)(IS_PLATFORM(i915, INTEL_XEHPSDV) && (({ int __ret = ! !((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A1) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER )))) | |||
| 1415 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9434) }), VFUNIT_CLKGATE_DIS((u32)((1UL << (20)) + 0))); | |||
| 1416 | ||||
| 1417 | /* Wa_14011060649:xehpsdv */ | |||
| 1418 | wa_14011060649(gt, wal); | |||
| 1419 | } | |||
| 1420 | ||||
| 1421 | static void | |||
| 1422 | dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1423 | { | |||
| 1424 | struct intel_engine_cs *engine; | |||
| 1425 | int id; | |||
| 1426 | ||||
| 1427 | xehp_init_mcr(gt, wal); | |||
| 1428 | ||||
| 1429 | /* Wa_14011060649:dg2 */ | |||
| 1430 | wa_14011060649(gt, wal); | |||
| 1431 | ||||
| 1432 | /* | |||
| 1433 | * Although there are per-engine instances of these registers, | |||
| 1434 | * they technically exist outside the engine itself and are not | |||
| 1435 | * impacted by engine resets. Furthermore, they're part of the | |||
| 1436 | * GuC blacklist so trying to treat them as engine workarounds | |||
| 1437 | * will result in GuC initialization failure and a wedged GPU. | |||
| 1438 | */ | |||
| 1439 | for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine ) = (gt)->engine[(id)])) {} else { | |||
| 1440 | if (engine->class != VIDEO_DECODE_CLASS1) | |||
| 1441 | continue; | |||
| 1442 | ||||
| 1443 | /* Wa_16010515920:dg2_g10 */ | |||
| 1444 | if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(gt->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(gt->i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(gt->i915)->drm))->dev), "", "drm_WARN_ON(" "((&(gt->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(gt->i915 )->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(gt->i915)->__runtime)->step.graphics_step) < (STEP_B0)))) | |||
| 1445 | wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x3f18) }), | |||
| 1446 | ALNUNIT_CLKGATE_DIS((u32)((1UL << (13)) + 0))); | |||
| 1447 | } | |||
| 1448 | ||||
| 1449 | if (IS_DG2_G10(gt->i915)IS_SUBPLATFORM(gt->i915, INTEL_DG2, 0)) { | |||
| 1450 | /* Wa_22010523718:dg2 */ | |||
| 1451 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9434) }), | |||
| 1452 | CG3DDISCFEG_CLKGATE_DIS((u32)((1UL << (17)) + 0))); | |||
| 1453 | ||||
| 1454 | /* Wa_14011006942:dg2 */ | |||
| 1455 | wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9524) }), | |||
| 1456 | DSS_ROUTER_CLKGATE_DIS((u32)((1UL << (28)) + 0))); | |||
| 1457 | } | |||
| 1458 | ||||
| 1459 | if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(gt->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(gt->i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(gt->i915)->drm))->dev), "", "drm_WARN_ON(" "((&(gt->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(gt->i915 )->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(gt->i915)->__runtime)->step.graphics_step) < (STEP_B0)))) { | |||
| 1460 | /* Wa_14010948348:dg2_g10 */ | |||
| 1461 | wa_write_or(wal, UNSLCGCTL9430((const i915_reg_t){ .reg = (0x9430) }), MSQDUNIT_CLKGATE_DIS((u32)((1UL << (3)) + 0))); | |||
| 1462 | ||||
| 1463 | /* Wa_14011037102:dg2_g10 */ | |||
| 1464 | wa_write_or(wal, UNSLCGCTL9444((const i915_reg_t){ .reg = (0x9444) }), LTCDD_CLKGATE_DIS((u32)((1UL << (10)) + 0))); | |||
| 1465 | ||||
| 1466 | /* Wa_14011371254:dg2_g10 */ | |||
| 1467 | wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x94d4) }), NODEDSS_CLKGATE_DIS((u32)((1UL << (12)) + 0))); | |||
| 1468 | ||||
| 1469 | /* Wa_14011431319:dg2_g10 */ | |||
| 1470 | wa_write_or(wal, UNSLCGCTL9440((const i915_reg_t){ .reg = (0x9440) }), GAMTLBOACS_CLKGATE_DIS((u32)((1UL << (28)) + 0)) | | |||
| 1471 | GAMTLBVDBOX7_CLKGATE_DIS((u32)((1UL << (22)) + 0)) | | |||
| 1472 | GAMTLBVDBOX6_CLKGATE_DIS((u32)((1UL << (26)) + 0)) | | |||
| 1473 | GAMTLBVDBOX5_CLKGATE_DIS((u32)((1UL << (27)) + 0)) | | |||
| 1474 | GAMTLBVDBOX4_CLKGATE_DIS((u32)((1UL << (23)) + 0)) | | |||
| 1475 | GAMTLBVDBOX3_CLKGATE_DIS((u32)((1UL << (24)) + 0)) | | |||
| 1476 | GAMTLBVDBOX2_CLKGATE_DIS((u32)((1UL << (21)) + 0)) | | |||
| 1477 | GAMTLBVDBOX1_CLKGATE_DIS((u32)((1UL << (6)) + 0)) | | |||
| 1478 | GAMTLBVDBOX0_CLKGATE_DIS((u32)((1UL << (17)) + 0)) | | |||
| 1479 | GAMTLBKCR_CLKGATE_DIS((u32)((1UL << (16)) + 0)) | | |||
| 1480 | GAMTLBGUC_CLKGATE_DIS((u32)((1UL << (15)) + 0)) | | |||
| 1481 | GAMTLBBLT_CLKGATE_DIS((u32)((1UL << (14)) + 0))); | |||
| 1482 | wa_write_or(wal, UNSLCGCTL9444((const i915_reg_t){ .reg = (0x9444) }), GAMTLBGFXA0_CLKGATE_DIS((u32)((1UL << (30)) + 0)) | | |||
| 1483 | GAMTLBGFXA1_CLKGATE_DIS((u32)((1UL << (29)) + 0)) | | |||
| 1484 | GAMTLBCOMPA0_CLKGATE_DIS((u32)((1UL << (28)) + 0)) | | |||
| 1485 | GAMTLBCOMPA1_CLKGATE_DIS((u32)((1UL << (27)) + 0)) | | |||
| 1486 | GAMTLBCOMPB0_CLKGATE_DIS((u32)((1UL << (26)) + 0)) | | |||
| 1487 | GAMTLBCOMPB1_CLKGATE_DIS((u32)((1UL << (25)) + 0)) | | |||
| 1488 | GAMTLBCOMPC0_CLKGATE_DIS((u32)((1UL << (24)) + 0)) | | |||
| 1489 | GAMTLBCOMPC1_CLKGATE_DIS((u32)((1UL << (23)) + 0)) | | |||
| 1490 | GAMTLBCOMPD0_CLKGATE_DIS((u32)((1UL << (22)) + 0)) | | |||
| 1491 | GAMTLBCOMPD1_CLKGATE_DIS((u32)((1UL << (21)) + 0)) | | |||
| 1492 | GAMTLBMERT_CLKGATE_DIS((u32)((1UL << (20)) + 0)) | | |||
| 1493 | GAMTLBVEBOX3_CLKGATE_DIS((u32)((1UL << (19)) + 0)) | | |||
| 1494 | GAMTLBVEBOX2_CLKGATE_DIS((u32)((1UL << (18)) + 0)) | | |||
| 1495 | GAMTLBVEBOX1_CLKGATE_DIS((u32)((1UL << (17)) + 0)) | | |||
| 1496 | GAMTLBVEBOX0_CLKGATE_DIS((u32)((1UL << (16)) + 0))); | |||
| 1497 | ||||
| 1498 | /* Wa_14010569222:dg2_g10 */ | |||
| 1499 | wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE((const i915_reg_t){ .reg = (0x9434) }), | |||
| 1500 | GAMEDIA_CLKGATE_DIS((u32)((1UL << (11)) + 0))); | |||
| 1501 | ||||
| 1502 | /* Wa_14011028019:dg2_g10 */ | |||
| 1503 | wa_write_or(wal, SSMCGCTL9530((const i915_reg_t){ .reg = (0x9530) }), RTFUNIT_CLKGATE_DIS((u32)((1UL << (18)) + 0))); | |||
| 1504 | } | |||
| 1505 | ||||
| 1506 | /* Wa_14014830051:dg2 */ | |||
| 1507 | wa_write_clr(wal, SARB_CHICKEN1((const i915_reg_t){ .reg = (0xe90c) }), COMP_CKN_IN((u32)((((~0UL) >> (64 - (30) - 1)) & ((~0UL) << (29))) + 0))); | |||
| 1508 | ||||
| 1509 | /* | |||
| 1510 | * The following are not actually "workarounds" but rather | |||
| 1511 | * recommended tuning settings documented in the bspec's | |||
| 1512 | * performance guide section. | |||
| 1513 | */ | |||
| 1514 | wa_write_or(wal, GEN12_SQCM((const i915_reg_t){ .reg = (0x8724) }), EN_32B_ACCESS((u32)((1UL << (30)) + 0))); | |||
| 1515 | ||||
| 1516 | /* Wa_14015795083 */ | |||
| 1517 | wa_write_clr(wal, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), GEN12_DOP_CLOCK_GATE_RENDER_ENABLE((u32)((1UL << (1)) + 0))); | |||
| 1518 | } | |||
| 1519 | ||||
| 1520 | static void | |||
| 1521 | pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1522 | { | |||
| 1523 | pvc_init_mcr(gt, wal); | |||
| 1524 | ||||
| 1525 | /* Wa_14015795083 */ | |||
| 1526 | wa_write_clr(wal, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), GEN12_DOP_CLOCK_GATE_RENDER_ENABLE((u32)((1UL << (1)) + 0))); | |||
| 1527 | } | |||
| 1528 | ||||
| 1529 | static void | |||
| 1530 | gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) | |||
| 1531 | { | |||
| 1532 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; | |||
| 1533 | ||||
| 1534 | if (IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) | |||
| 1535 | pvc_gt_workarounds_init(gt, wal); | |||
| 1536 | else if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) | |||
| 1537 | dg2_gt_workarounds_init(gt, wal); | |||
| 1538 | else if (IS_XEHPSDV(i915)IS_PLATFORM(i915, INTEL_XEHPSDV)) | |||
| 1539 | xehpsdv_gt_workarounds_init(gt, wal); | |||
| 1540 | else if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1)) | |||
| 1541 | dg1_gt_workarounds_init(gt, wal); | |||
| 1542 | else if (IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) | |||
| 1543 | tgl_gt_workarounds_init(gt, wal); | |||
| 1544 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12) | |||
| 1545 | gen12_gt_workarounds_init(gt, wal); | |||
| 1546 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) | |||
| 1547 | icl_gt_workarounds_init(gt, wal); | |||
| 1548 | else if (IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) | |||
| 1549 | cfl_gt_workarounds_init(gt, wal); | |||
| 1550 | else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE)) | |||
| 1551 | glk_gt_workarounds_init(gt, wal); | |||
| 1552 | else if (IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE)) | |||
| 1553 | kbl_gt_workarounds_init(gt, wal); | |||
| 1554 | else if (IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) | |||
| 1555 | gen9_gt_workarounds_init(gt, wal); | |||
| 1556 | else if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE)) | |||
| 1557 | skl_gt_workarounds_init(gt, wal); | |||
| 1558 | else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) | |||
| 1559 | hsw_gt_workarounds_init(gt, wal); | |||
| 1560 | else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) | |||
| 1561 | vlv_gt_workarounds_init(gt, wal); | |||
| 1562 | else if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) | |||
| 1563 | ivb_gt_workarounds_init(gt, wal); | |||
| 1564 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) | |||
| 1565 | snb_gt_workarounds_init(gt, wal); | |||
| 1566 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5) | |||
| 1567 | ilk_gt_workarounds_init(gt, wal); | |||
| 1568 | else if (IS_G4X(i915)(IS_PLATFORM(i915, INTEL_G45) || IS_PLATFORM(i915, INTEL_GM45 ))) | |||
| 1569 | g4x_gt_workarounds_init(gt, wal); | |||
| 1570 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 4) | |||
| 1571 | gen4_gt_workarounds_init(gt, wal); | |||
| 1572 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) <= 8) | |||
| 1573 | ; | |||
| 1574 | else | |||
| 1575 | MISSING_CASE(GRAPHICS_VER(i915))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(i915)->__runtime)->graphics.ip.ver)", (long) (((&(i915)->__runtime)->graphics.ip.ver))); __builtin_expect (!!(__ret), 0); }); | |||
| 1576 | } | |||
| 1577 | ||||
| 1578 | void intel_gt_init_workarounds(struct intel_gt *gt) | |||
| 1579 | { | |||
| 1580 | struct i915_wa_list *wal = >->wa_list; | |||
| 1581 | ||||
| 1582 | wa_init_start(wal, "GT", "global"); | |||
| 1583 | gt_init_workarounds(gt, wal); | |||
| 1584 | wa_init_finish(wal); | |||
| 1585 | } | |||
| 1586 | ||||
| 1587 | static enum forcewake_domains | |||
| 1588 | wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) | |||
| 1589 | { | |||
| 1590 | enum forcewake_domains fw = 0; | |||
| 1591 | struct i915_wa *wa; | |||
| 1592 | unsigned int i; | |||
| 1593 | ||||
| 1594 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) | |||
| 1595 | fw |= intel_uncore_forcewake_for_reg(uncore, | |||
| 1596 | wa->reg, | |||
| 1597 | FW_REG_READ(1) | | |||
| 1598 | FW_REG_WRITE(2)); | |||
| 1599 | ||||
| 1600 | return fw; | |||
| 1601 | } | |||
| 1602 | ||||
| 1603 | static bool_Bool | |||
| 1604 | wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) | |||
| 1605 | { | |||
| 1606 | if ((cur ^ wa->set) & wa->read) { | |||
| 1607 | DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",__drm_err("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n" , name, from, i915_mmio_reg_offset(wa->reg), cur, cur & wa->read, wa->set & wa->read) | |||
| 1608 | name, from, i915_mmio_reg_offset(wa->reg),__drm_err("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n" , name, from, i915_mmio_reg_offset(wa->reg), cur, cur & wa->read, wa->set & wa->read) | |||
| 1609 | cur, cur & wa->read, wa->set & wa->read)__drm_err("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n" , name, from, i915_mmio_reg_offset(wa->reg), cur, cur & wa->read, wa->set & wa->read); | |||
| 1610 | ||||
| 1611 | return false0; | |||
| 1612 | } | |||
| 1613 | ||||
| 1614 | return true1; | |||
| 1615 | } | |||
| 1616 | ||||
| 1617 | static void | |||
| 1618 | wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal) | |||
| 1619 | { | |||
| 1620 | struct intel_uncore *uncore = gt->uncore; | |||
| 1621 | enum forcewake_domains fw; | |||
| 1622 | unsigned long flags; | |||
| 1623 | struct i915_wa *wa; | |||
| 1624 | unsigned int i; | |||
| 1625 | ||||
| 1626 | if (!wal->count) | |||
| 1627 | return; | |||
| 1628 | ||||
| 1629 | fw = wal_get_fw_for_rmw(uncore, wal); | |||
| 1630 | ||||
| 1631 | spin_lock_irqsave(&uncore->lock, flags)do { flags = 0; mtx_enter(&uncore->lock); } while (0); | |||
| 1632 | intel_uncore_forcewake_get__locked(uncore, fw); | |||
| 1633 | ||||
| 1634 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | |||
| 1635 | u32 val, old = 0; | |||
| 1636 | ||||
| 1637 | /* open-coded rmw due to steering */ | |||
| 1638 | old = wa->clr ? intel_gt_mcr_read_any_fw(gt, wa->reg) : 0; | |||
| 1639 | val = (old & ~wa->clr) | wa->set; | |||
| 1640 | if (val != old || !wa->clr) | |||
| 1641 | intel_uncore_write_fw(uncore, wa->reg, val)__raw_uncore_write32(uncore, wa->reg, val); | |||
| 1642 | ||||
| 1643 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0) | |||
| 1644 | wa_verify(wa, intel_gt_mcr_read_any_fw(gt, wa->reg), | |||
| 1645 | wal->name, "application"); | |||
| 1646 | } | |||
| 1647 | ||||
| 1648 | intel_uncore_forcewake_put__locked(uncore, fw); | |||
| 1649 | spin_unlock_irqrestore(&uncore->lock, flags)do { (void)(flags); mtx_leave(&uncore->lock); } while ( 0); | |||
| 1650 | } | |||
| 1651 | ||||
| 1652 | void intel_gt_apply_workarounds(struct intel_gt *gt) | |||
| 1653 | { | |||
| 1654 | wa_list_apply(gt, >->wa_list); | |||
| 1655 | } | |||
| 1656 | ||||
| 1657 | static bool_Bool wa_list_verify(struct intel_gt *gt, | |||
| 1658 | const struct i915_wa_list *wal, | |||
| 1659 | const char *from) | |||
| 1660 | { | |||
| 1661 | struct intel_uncore *uncore = gt->uncore; | |||
| 1662 | struct i915_wa *wa; | |||
| 1663 | enum forcewake_domains fw; | |||
| 1664 | unsigned long flags; | |||
| 1665 | unsigned int i; | |||
| 1666 | bool_Bool ok = true1; | |||
| 1667 | ||||
| 1668 | fw = wal_get_fw_for_rmw(uncore, wal); | |||
| 1669 | ||||
| 1670 | spin_lock_irqsave(&uncore->lock, flags)do { flags = 0; mtx_enter(&uncore->lock); } while (0); | |||
| 1671 | intel_uncore_forcewake_get__locked(uncore, fw); | |||
| 1672 | ||||
| 1673 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) | |||
| 1674 | ok &= wa_verify(wa, | |||
| 1675 | intel_gt_mcr_read_any_fw(gt, wa->reg), | |||
| 1676 | wal->name, from); | |||
| 1677 | ||||
| 1678 | intel_uncore_forcewake_put__locked(uncore, fw); | |||
| 1679 | spin_unlock_irqrestore(&uncore->lock, flags)do { (void)(flags); mtx_leave(&uncore->lock); } while ( 0); | |||
| 1680 | ||||
| 1681 | return ok; | |||
| 1682 | } | |||
| 1683 | ||||
| 1684 | bool_Bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) | |||
| 1685 | { | |||
| 1686 | return wa_list_verify(gt, >->wa_list, from); | |||
| 1687 | } | |||
| 1688 | ||||
| 1689 | __maybe_unused__attribute__((__unused__)) | |||
| 1690 | static bool_Bool is_nonpriv_flags_valid(u32 flags) | |||
| 1691 | { | |||
| 1692 | /* Check only valid flag bits are set */ | |||
| 1693 | if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID((3 << 0) | (3 << 28) | ((u32)((1UL << (30) ) + 0)))) | |||
| 1694 | return false0; | |||
| 1695 | ||||
| 1696 | /* NB: Only 3 out of 4 enum values are valid for access field */ | |||
| 1697 | if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK(3 << 28)) == | |||
| 1698 | RING_FORCE_TO_NONPRIV_ACCESS_INVALID(3 << 28)) | |||
| 1699 | return false0; | |||
| 1700 | ||||
| 1701 | return true1; | |||
| 1702 | } | |||
| 1703 | ||||
| 1704 | static void | |||
| 1705 | whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) | |||
| 1706 | { | |||
| 1707 | struct i915_wa wa = { | |||
| 1708 | .reg = reg | |||
| 1709 | }; | |||
| 1710 | ||||
| 1711 | if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)({ ((void)0); 0; })) | |||
| 1712 | return; | |||
| 1713 | ||||
| 1714 | if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))({ ((void)0); 0; })) | |||
| 1715 | return; | |||
| 1716 | ||||
| 1717 | wa.reg.reg |= flags; | |||
| 1718 | _wa_add(wal, &wa); | |||
| 1719 | } | |||
| 1720 | ||||
| 1721 | static void | |||
| 1722 | whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) | |||
| 1723 | { | |||
| 1724 | whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW(0 << 28)); | |||
| 1725 | } | |||
| 1726 | ||||
| 1727 | static void gen9_whitelist_build(struct i915_wa_list *w) | |||
| 1728 | { | |||
| 1729 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ | |||
| 1730 | whitelist_reg(w, GEN9_CTX_PREEMPT_REG((const i915_reg_t){ .reg = (0x2248) })); | |||
| 1731 | ||||
| 1732 | /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ | |||
| 1733 | whitelist_reg(w, GEN8_CS_CHICKEN1((const i915_reg_t){ .reg = (0x2580) })); | |||
| 1734 | ||||
| 1735 | /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ | |||
| 1736 | whitelist_reg(w, GEN8_HDC_CHICKEN1((const i915_reg_t){ .reg = (0x7304) })); | |||
| 1737 | ||||
| 1738 | /* WaSendPushConstantsFromMMIO:skl,bxt */ | |||
| 1739 | whitelist_reg(w, COMMON_SLICE_CHICKEN2((const i915_reg_t){ .reg = (0x7014) })); | |||
| 1740 | } | |||
| 1741 | ||||
| 1742 | static void skl_whitelist_build(struct intel_engine_cs *engine) | |||
| 1743 | { | |||
| 1744 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1745 | ||||
| 1746 | if (engine->class != RENDER_CLASS0) | |||
| 1747 | return; | |||
| 1748 | ||||
| 1749 | gen9_whitelist_build(w); | |||
| 1750 | ||||
| 1751 | /* WaDisableLSQCROPERFforOCL:skl */ | |||
| 1752 | whitelist_reg(w, GEN8_L3SQCREG4((const i915_reg_t){ .reg = (0xb118) })); | |||
| 1753 | } | |||
| 1754 | ||||
| 1755 | static void bxt_whitelist_build(struct intel_engine_cs *engine) | |||
| 1756 | { | |||
| 1757 | if (engine->class != RENDER_CLASS0) | |||
| 1758 | return; | |||
| 1759 | ||||
| 1760 | gen9_whitelist_build(&engine->whitelist); | |||
| 1761 | } | |||
| 1762 | ||||
| 1763 | static void kbl_whitelist_build(struct intel_engine_cs *engine) | |||
| 1764 | { | |||
| 1765 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1766 | ||||
| 1767 | if (engine->class != RENDER_CLASS0) | |||
| 1768 | return; | |||
| 1769 | ||||
| 1770 | gen9_whitelist_build(w); | |||
| 1771 | ||||
| 1772 | /* WaDisableLSQCROPERFforOCL:kbl */ | |||
| 1773 | whitelist_reg(w, GEN8_L3SQCREG4((const i915_reg_t){ .reg = (0xb118) })); | |||
| 1774 | } | |||
| 1775 | ||||
| 1776 | static void glk_whitelist_build(struct intel_engine_cs *engine) | |||
| 1777 | { | |||
| 1778 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1779 | ||||
| 1780 | if (engine->class != RENDER_CLASS0) | |||
| 1781 | return; | |||
| 1782 | ||||
| 1783 | gen9_whitelist_build(w); | |||
| 1784 | ||||
| 1785 | /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ | |||
| 1786 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1((const i915_reg_t){ .reg = (0x731c) })); | |||
| 1787 | } | |||
| 1788 | ||||
| 1789 | static void cfl_whitelist_build(struct intel_engine_cs *engine) | |||
| 1790 | { | |||
| 1791 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1792 | ||||
| 1793 | if (engine->class != RENDER_CLASS0) | |||
| 1794 | return; | |||
| 1795 | ||||
| 1796 | gen9_whitelist_build(w); | |||
| 1797 | ||||
| 1798 | /* | |||
| 1799 | * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml | |||
| 1800 | * | |||
| 1801 | * This covers 4 register which are next to one another : | |||
| 1802 | * - PS_INVOCATION_COUNT | |||
| 1803 | * - PS_INVOCATION_COUNT_UDW | |||
| 1804 | * - PS_DEPTH_COUNT | |||
| 1805 | * - PS_DEPTH_COUNT_UDW | |||
| 1806 | */ | |||
| 1807 | whitelist_reg_ext(w, PS_INVOCATION_COUNT((const i915_reg_t){ .reg = (0x2348) }), | |||
| 1808 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28) | | |||
| 1809 | RING_FORCE_TO_NONPRIV_RANGE_4(1 << 0)); | |||
| 1810 | } | |||
| 1811 | ||||
| 1812 | static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) | |||
| 1813 | { | |||
| 1814 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1815 | ||||
| 1816 | if (engine->class != RENDER_CLASS0) | |||
| 1817 | whitelist_reg_ext(w, | |||
| 1818 | RING_CTX_TIMESTAMP(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x3a8) } ), | |||
| 1819 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28)); | |||
| 1820 | } | |||
| 1821 | ||||
| 1822 | static void cml_whitelist_build(struct intel_engine_cs *engine) | |||
| 1823 | { | |||
| 1824 | allow_read_ctx_timestamp(engine); | |||
| 1825 | ||||
| 1826 | cfl_whitelist_build(engine); | |||
| 1827 | } | |||
| 1828 | ||||
| 1829 | static void icl_whitelist_build(struct intel_engine_cs *engine) | |||
| 1830 | { | |||
| 1831 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1832 | ||||
| 1833 | allow_read_ctx_timestamp(engine); | |||
| 1834 | ||||
| 1835 | switch (engine->class) { | |||
| 1836 | case RENDER_CLASS0: | |||
| 1837 | /* WaAllowUMDToModifyHalfSliceChicken7:icl */ | |||
| 1838 | whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7((const i915_reg_t){ .reg = (0xe194) })); | |||
| 1839 | ||||
| 1840 | /* WaAllowUMDToModifySamplerMode:icl */ | |||
| 1841 | whitelist_reg(w, GEN10_SAMPLER_MODE((const i915_reg_t){ .reg = (0xe18c) })); | |||
| 1842 | ||||
| 1843 | /* WaEnableStateCacheRedirectToCS:icl */ | |||
| 1844 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1((const i915_reg_t){ .reg = (0x731c) })); | |||
| 1845 | ||||
| 1846 | /* | |||
| 1847 | * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl | |||
| 1848 | * | |||
| 1849 | * This covers 4 register which are next to one another : | |||
| 1850 | * - PS_INVOCATION_COUNT | |||
| 1851 | * - PS_INVOCATION_COUNT_UDW | |||
| 1852 | * - PS_DEPTH_COUNT | |||
| 1853 | * - PS_DEPTH_COUNT_UDW | |||
| 1854 | */ | |||
| 1855 | whitelist_reg_ext(w, PS_INVOCATION_COUNT((const i915_reg_t){ .reg = (0x2348) }), | |||
| 1856 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28) | | |||
| 1857 | RING_FORCE_TO_NONPRIV_RANGE_4(1 << 0)); | |||
| 1858 | break; | |||
| 1859 | ||||
| 1860 | case VIDEO_DECODE_CLASS1: | |||
| 1861 | /* hucStatusRegOffset */ | |||
| 1862 | whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base)((const i915_reg_t){ .reg = (0x2000 + engine->mmio_base) } ), | |||
| 1863 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28)); | |||
| 1864 | /* hucUKernelHdrInfoRegOffset */ | |||
| 1865 | whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base)((const i915_reg_t){ .reg = (0x2014 + engine->mmio_base) } ), | |||
| 1866 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28)); | |||
| 1867 | /* hucStatus2RegOffset */ | |||
| 1868 | whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base)((const i915_reg_t){ .reg = (0x23B0 + engine->mmio_base) } ), | |||
| 1869 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28)); | |||
| 1870 | break; | |||
| 1871 | ||||
| 1872 | default: | |||
| 1873 | break; | |||
| 1874 | } | |||
| 1875 | } | |||
| 1876 | ||||
| 1877 | static void tgl_whitelist_build(struct intel_engine_cs *engine) | |||
| 1878 | { | |||
| 1879 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1880 | ||||
| 1881 | allow_read_ctx_timestamp(engine); | |||
| 1882 | ||||
| 1883 | switch (engine->class) { | |||
| 1884 | case RENDER_CLASS0: | |||
| 1885 | /* | |||
| 1886 | * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl | |||
| 1887 | * Wa_1408556865:tgl | |||
| 1888 | * | |||
| 1889 | * This covers 4 registers which are next to one another : | |||
| 1890 | * - PS_INVOCATION_COUNT | |||
| 1891 | * - PS_INVOCATION_COUNT_UDW | |||
| 1892 | * - PS_DEPTH_COUNT | |||
| 1893 | * - PS_DEPTH_COUNT_UDW | |||
| 1894 | */ | |||
| 1895 | whitelist_reg_ext(w, PS_INVOCATION_COUNT((const i915_reg_t){ .reg = (0x2348) }), | |||
| 1896 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28) | | |||
| 1897 | RING_FORCE_TO_NONPRIV_RANGE_4(1 << 0)); | |||
| 1898 | ||||
| 1899 | /* | |||
| 1900 | * Wa_1808121037:tgl | |||
| 1901 | * Wa_14012131227:dg1 | |||
| 1902 | * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p | |||
| 1903 | */ | |||
| 1904 | whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0x7010) })); | |||
| 1905 | ||||
| 1906 | /* Wa_1806527549:tgl */ | |||
| 1907 | whitelist_reg(w, HIZ_CHICKEN((const i915_reg_t){ .reg = (0x7018) })); | |||
| 1908 | break; | |||
| 1909 | default: | |||
| 1910 | break; | |||
| 1911 | } | |||
| 1912 | } | |||
| 1913 | ||||
| 1914 | static void dg1_whitelist_build(struct intel_engine_cs *engine) | |||
| 1915 | { | |||
| 1916 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1917 | ||||
| 1918 | tgl_whitelist_build(engine); | |||
| 1919 | ||||
| 1920 | /* GEN:BUG:1409280441:dg1 */ | |||
| 1921 | if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0)(IS_PLATFORM(engine->i915, INTEL_DG1) && (({ int __ret = !!((((&(engine->i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(engine->i915)->drm))->dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0))) && | |||
| 1922 | (engine->class == RENDER_CLASS0 || | |||
| 1923 | engine->class == COPY_ENGINE_CLASS3)) | |||
| 1924 | whitelist_reg_ext(w, RING_ID(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x8c) } ), | |||
| 1925 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28)); | |||
| 1926 | } | |||
| 1927 | ||||
| 1928 | static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) | |||
| 1929 | { | |||
| 1930 | allow_read_ctx_timestamp(engine); | |||
| 1931 | } | |||
| 1932 | ||||
| 1933 | static void dg2_whitelist_build(struct intel_engine_cs *engine) | |||
| 1934 | { | |||
| 1935 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1936 | ||||
| 1937 | allow_read_ctx_timestamp(engine); | |||
| 1938 | ||||
| 1939 | switch (engine->class) { | |||
| 1940 | case RENDER_CLASS0: | |||
| 1941 | /* | |||
| 1942 | * Wa_1507100340:dg2_g10 | |||
| 1943 | * | |||
| 1944 | * This covers 4 registers which are next to one another : | |||
| 1945 | * - PS_INVOCATION_COUNT | |||
| 1946 | * - PS_INVOCATION_COUNT_UDW | |||
| 1947 | * - PS_DEPTH_COUNT | |||
| 1948 | * - PS_DEPTH_COUNT_UDW | |||
| 1949 | */ | |||
| 1950 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) | |||
| 1951 | whitelist_reg_ext(w, PS_INVOCATION_COUNT((const i915_reg_t){ .reg = (0x2348) }), | |||
| 1952 | RING_FORCE_TO_NONPRIV_ACCESS_RD(1 << 28) | | |||
| 1953 | RING_FORCE_TO_NONPRIV_RANGE_4(1 << 0)); | |||
| 1954 | ||||
| 1955 | break; | |||
| 1956 | case COMPUTE_CLASS5: | |||
| 1957 | /* Wa_16011157294:dg2_g10 */ | |||
| 1958 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) | |||
| 1959 | whitelist_reg(w, GEN9_CTX_PREEMPT_REG((const i915_reg_t){ .reg = (0x2248) })); | |||
| 1960 | break; | |||
| 1961 | default: | |||
| 1962 | break; | |||
| 1963 | } | |||
| 1964 | } | |||
| 1965 | ||||
| 1966 | static void blacklist_trtt(struct intel_engine_cs *engine) | |||
| 1967 | { | |||
| 1968 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1969 | ||||
| 1970 | /* | |||
| 1971 | * Prevent read/write access to [0x4400, 0x4600) which covers | |||
| 1972 | * the TRTT range across all engines. Note that normally userspace | |||
| 1973 | * cannot access the other engines' trtt control, but for simplicity | |||
| 1974 | * we cover the entire range on each engine. | |||
| 1975 | */ | |||
| 1976 | whitelist_reg_ext(w, _MMIO(0x4400)((const i915_reg_t){ .reg = (0x4400) }), | |||
| 1977 | RING_FORCE_TO_NONPRIV_DENY((u32)((1UL << (30)) + 0)) | | |||
| 1978 | RING_FORCE_TO_NONPRIV_RANGE_64(3 << 0)); | |||
| 1979 | whitelist_reg_ext(w, _MMIO(0x4500)((const i915_reg_t){ .reg = (0x4500) }), | |||
| 1980 | RING_FORCE_TO_NONPRIV_DENY((u32)((1UL << (30)) + 0)) | | |||
| 1981 | RING_FORCE_TO_NONPRIV_RANGE_64(3 << 0)); | |||
| 1982 | } | |||
| 1983 | ||||
| 1984 | static void pvc_whitelist_build(struct intel_engine_cs *engine) | |||
| 1985 | { | |||
| 1986 | allow_read_ctx_timestamp(engine); | |||
| 1987 | ||||
| 1988 | /* Wa_16014440446:pvc */ | |||
| 1989 | blacklist_trtt(engine); | |||
| 1990 | } | |||
| 1991 | ||||
| 1992 | void intel_engine_init_whitelist(struct intel_engine_cs *engine) | |||
| 1993 | { | |||
| 1994 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 1995 | struct i915_wa_list *w = &engine->whitelist; | |||
| 1996 | ||||
| 1997 | wa_init_start(w, "whitelist", engine->name); | |||
| 1998 | ||||
| 1999 | if (IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) | |||
| 2000 | pvc_whitelist_build(engine); | |||
| 2001 | else if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) | |||
| 2002 | dg2_whitelist_build(engine); | |||
| 2003 | else if (IS_XEHPSDV(i915)IS_PLATFORM(i915, INTEL_XEHPSDV)) | |||
| 2004 | xehpsdv_whitelist_build(engine); | |||
| 2005 | else if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1)) | |||
| 2006 | dg1_whitelist_build(engine); | |||
| 2007 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12) | |||
| 2008 | tgl_whitelist_build(engine); | |||
| 2009 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) | |||
| 2010 | icl_whitelist_build(engine); | |||
| 2011 | else if (IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) | |||
| 2012 | cml_whitelist_build(engine); | |||
| 2013 | else if (IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE)) | |||
| 2014 | cfl_whitelist_build(engine); | |||
| 2015 | else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE)) | |||
| 2016 | glk_whitelist_build(engine); | |||
| 2017 | else if (IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE)) | |||
| 2018 | kbl_whitelist_build(engine); | |||
| 2019 | else if (IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) | |||
| 2020 | bxt_whitelist_build(engine); | |||
| 2021 | else if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE)) | |||
| 2022 | skl_whitelist_build(engine); | |||
| 2023 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) <= 8) | |||
| 2024 | ; | |||
| 2025 | else | |||
| 2026 | MISSING_CASE(GRAPHICS_VER(i915))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(i915)->__runtime)->graphics.ip.ver)", (long) (((&(i915)->__runtime)->graphics.ip.ver))); __builtin_expect (!!(__ret), 0); }); | |||
| 2027 | ||||
| 2028 | wa_init_finish(w); | |||
| 2029 | } | |||
| 2030 | ||||
| 2031 | void intel_engine_apply_whitelist(struct intel_engine_cs *engine) | |||
| 2032 | { | |||
| 2033 | const struct i915_wa_list *wal = &engine->whitelist; | |||
| 2034 | struct intel_uncore *uncore = engine->uncore; | |||
| 2035 | const u32 base = engine->mmio_base; | |||
| 2036 | struct i915_wa *wa; | |||
| 2037 | unsigned int i; | |||
| 2038 | ||||
| 2039 | if (!wal->count) | |||
| 2040 | return; | |||
| 2041 | ||||
| 2042 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) | |||
| 2043 | intel_uncore_write(uncore, | |||
| 2044 | RING_FORCE_TO_NONPRIV(base, i)((const i915_reg_t){ .reg = (((base) + 0x4D0) + (i) * 4) }), | |||
| 2045 | i915_mmio_reg_offset(wa->reg)); | |||
| 2046 | ||||
| 2047 | /* And clear the rest just in case of garbage */ | |||
| 2048 | for (; i < RING_MAX_NONPRIV_SLOTS12; i++) | |||
| 2049 | intel_uncore_write(uncore, | |||
| 2050 | RING_FORCE_TO_NONPRIV(base, i)((const i915_reg_t){ .reg = (((base) + 0x4D0) + (i) * 4) }), | |||
| 2051 | i915_mmio_reg_offset(RING_NOPID(base)((const i915_reg_t){ .reg = ((base) + 0x94) }))); | |||
| 2052 | } | |||
| 2053 | ||||
| 2054 | /* | |||
| 2055 | * engine_fake_wa_init(), a place holder to program the registers | |||
| 2056 | * which are not part of an official workaround defined by the | |||
| 2057 | * hardware team. | |||
| 2058 | * Adding programming of those register inside workaround will | |||
| 2059 | * allow utilizing wa framework to proper application and verification. | |||
| 2060 | */ | |||
| 2061 | static void | |||
| 2062 | engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2063 | { | |||
| 2064 | u8 mocs_w, mocs_r; | |||
| 2065 | ||||
| 2066 | /* | |||
| 2067 | * RING_CMD_CCTL specifies the default MOCS entry that will be used | |||
| 2068 | * by the command streamer when executing commands that don't have | |||
| 2069 | * a way to explicitly specify a MOCS setting. The default should | |||
| 2070 | * usually reference whichever MOCS entry corresponds to uncached | |||
| 2071 | * behavior, although use of a WB cached entry is recommended by the | |||
| 2072 | * spec in certain circumstances on specific platforms. | |||
| 2073 | */ | |||
| 2074 | if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) >= 12) { | |||
| 2075 | mocs_r = engine->gt->mocs.uc_index; | |||
| 2076 | mocs_w = engine->gt->mocs.uc_index; | |||
| 2077 | ||||
| 2078 | if (HAS_L3_CCS_READ(engine->i915)((&(engine->i915)->__info)->has_l3_ccs_read) && | |||
| 2079 | engine->class == COMPUTE_CLASS5) { | |||
| 2080 | mocs_r = engine->gt->mocs.wb_index; | |||
| 2081 | ||||
| 2082 | /* | |||
| 2083 | * Even on the few platforms where MOCS 0 is a | |||
| 2084 | * legitimate table entry, it's never the correct | |||
| 2085 | * setting to use here; we can assume the MOCS init | |||
| 2086 | * just forgot to initialize wb_index. | |||
| 2087 | */ | |||
| 2088 | drm_WARN_ON(&engine->i915->drm, mocs_r == 0)({ int __ret = !!((mocs_r == 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&engine->i915->drm))-> dev), "", "drm_WARN_ON(" "mocs_r == 0" ")"); __builtin_expect (!!(__ret), 0); }); | |||
| 2089 | } | |||
| 2090 | ||||
| 2091 | wa_masked_field_set(wal, | |||
| 2092 | RING_CMD_CCTL(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0xc4) } ), | |||
| 2093 | CMD_CCTL_MOCS_MASK(((u32)((((~0UL) >> (64 - (13) - 1)) & ((~0UL) << (7))) + 0)) | ((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (0))) + 0))), | |||
| 2094 | CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r)(((u32)((((typeof(((u32)((((~0UL) >> (64 - (13) - 1)) & ((~0UL) << (7))) + 0))))((mocs_w) << 1) << (__builtin_ffsll(((u32)((((~0UL) >> (64 - (13) - 1)) & ((~0UL) << (7))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (13) - 1)) & ((~0UL) << (7))) + 0)))) + 0 + 0 + 0 + 0)) | ((u32)((((typeof(((u32)((((~0UL) >> (64 - ( 6) - 1)) & ((~0UL) << (0))) + 0))))((mocs_r) << 1) << (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 6) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32 )((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (0 ))) + 0)))) + 0 + 0 + 0 + 0)))); | |||
| 2095 | } | |||
| 2096 | } | |||
| 2097 | ||||
| 2098 | static bool_Bool needs_wa_1308578152(struct intel_engine_cs *engine) | |||
| 2099 | { | |||
| 2100 | return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >= | |||
| 2101 | GEN_DSS_PER_GSLICE4; | |||
| 2102 | } | |||
| 2103 | ||||
| 2104 | static void | |||
| 2105 | rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2106 | { | |||
| 2107 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 2108 | ||||
| 2109 | if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) { | |||
| 2110 | /* Wa_1509235366:dg2 */ | |||
| 2111 | wa_write_or(wal, GEN12_GAMCNTRL_CTRL((const i915_reg_t){ .reg = (0xcf54) }), INVALIDATION_BROADCAST_MODE_DIS((u32)((1UL << (12)) + 0)) | | |||
| 2112 | GLOBAL_INVALIDATION_MODE((u32)((1UL << (2)) + 0))); | |||
| 2113 | } | |||
| 2114 | ||||
| 2115 | if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2116 | /* Wa_14013392000:dg2_g11 */ | |||
| 2117 | wa_masked_en(wal, GEN7_ROW_CHICKEN2((const i915_reg_t){ .reg = (0xe4f4) }), GEN12_ENABLE_LARGE_GRF_MODE((u32)((1UL << (12)) + 0))); | |||
| 2118 | ||||
| 2119 | /* Wa_16011620976:dg2_g11 */ | |||
| 2120 | wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW((const i915_reg_t){ .reg = (0xe7c8 + 4) }), DIS_CHAIN_2XSIMD8((u32)((1UL << (55 - 32)) + 0))); | |||
| 2121 | } | |||
| 2122 | ||||
| 2123 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER ))) || | |||
| 2124 | IS_DG2_G11(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 1) || IS_DG2_G12(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 2)) { | |||
| 2125 | /* Wa_1509727124:dg2 */ | |||
| 2126 | wa_masked_en(wal, GEN10_SAMPLER_MODE((const i915_reg_t){ .reg = (0xe18c) }), | |||
| 2127 | SC_DISABLE_POWER_OPTIMIZATION_EBB((u32)((1UL << (9)) + 0))); | |||
| 2128 | } | |||
| 2129 | ||||
| 2130 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 ))) || | |||
| 2131 | IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2132 | /* Wa_14012419201:dg2 */ | |||
| 2133 | wa_masked_en(wal, GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), | |||
| 2134 | GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX((u32)((1UL << (4)) + 0))); | |||
| 2135 | } | |||
| 2136 | ||||
| 2137 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_C0 ))) || | |||
| 2138 | IS_DG2_G11(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 1)) { | |||
| 2139 | /* | |||
| 2140 | * Wa_22012826095:dg2 | |||
| 2141 | * Wa_22013059131:dg2 | |||
| 2142 | */ | |||
| 2143 | wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW((const i915_reg_t){ .reg = (0xe7c8 + 4) }), | |||
| 2144 | MAXREQS_PER_BANK((u32)((((~0UL) >> (64 - (39 - 32) - 1)) & ((~0UL) << (37 - 32))) + 0)), | |||
| 2145 | REG_FIELD_PREP(MAXREQS_PER_BANK, 2)((u32)((((typeof(((u32)((((~0UL) >> (64 - (39 - 32) - 1 )) & ((~0UL) << (37 - 32))) + 0))))(2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (39 - 32) - 1)) & ((~0UL) << (37 - 32))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (39 - 32) - 1)) & ((~0UL) << (37 - 32))) + 0 )))) + 0 + 0 + 0 + 0))); | |||
| 2146 | ||||
| 2147 | /* Wa_22013059131:dg2 */ | |||
| 2148 | wa_write_or(wal, LSC_CHICKEN_BIT_0((const i915_reg_t){ .reg = (0xe7c8) }), | |||
| 2149 | FORCE_1_SUB_MESSAGE_PER_FRAGMENT((u32)((1UL << (15)) + 0))); | |||
| 2150 | } | |||
| 2151 | ||||
| 2152 | /* Wa_1308578152:dg2_g10 when first gslice is fused off */ | |||
| 2153 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_C0 ))) && | |||
| 2154 | needs_wa_1308578152(engine)) { | |||
| 2155 | wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON((const i915_reg_t){ .reg = (0x20ec) }), | |||
| 2156 | GEN12_REPLAY_MODE_GRANULARITY((u32)((1UL << (0)) + 0))); | |||
| 2157 | } | |||
| 2158 | ||||
| 2159 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER ))) || | |||
| 2160 | IS_DG2_G11(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 1) || IS_DG2_G12(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 2)) { | |||
| 2161 | /* Wa_22013037850:dg2 */ | |||
| 2162 | wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW((const i915_reg_t){ .reg = (0xe7c8 + 4) }), | |||
| 2163 | DISABLE_128B_EVICTION_COMMAND_UDW((u32)((1UL << (36 - 32)) + 0))); | |||
| 2164 | ||||
| 2165 | /* Wa_22012856258:dg2 */ | |||
| 2166 | wa_masked_en(wal, GEN7_ROW_CHICKEN2((const i915_reg_t){ .reg = (0xe4f4) }), | |||
| 2167 | GEN12_DISABLE_READ_SUPPRESSION((u32)((1UL << (15)) + 0))); | |||
| 2168 | ||||
| 2169 | /* | |||
| 2170 | * Wa_22010960976:dg2 | |||
| 2171 | * Wa_14013347512:dg2 | |||
| 2172 | */ | |||
| 2173 | wa_masked_dis(wal, GEN12_HDC_CHICKEN0((const i915_reg_t){ .reg = (0xe5f0) }), | |||
| 2174 | LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK((u32)((((~0UL) >> (64 - (13) - 1)) & ((~0UL) << (11))) + 0))); | |||
| 2175 | } | |||
| 2176 | ||||
| 2177 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2178 | /* | |||
| 2179 | * Wa_1608949956:dg2_g10 | |||
| 2180 | * Wa_14010198302:dg2_g10 | |||
| 2181 | */ | |||
| 2182 | wa_masked_en(wal, GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), | |||
| 2183 | MDQ_ARBITRATION_MODE((u32)((1UL << (12)) + 0)) | UGM_BACKUP_MODE((u32)((1UL << (13)) + 0))); | |||
| 2184 | ||||
| 2185 | /* | |||
| 2186 | * Wa_14010918519:dg2_g10 | |||
| 2187 | * | |||
| 2188 | * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, | |||
| 2189 | * so ignoring verification. | |||
| 2190 | */ | |||
| 2191 | wa_add(wal, LSC_CHICKEN_BIT_0_UDW((const i915_reg_t){ .reg = (0xe7c8 + 4) }), 0, | |||
| 2192 | FORCE_SLM_FENCE_SCOPE_TO_TILE((u32)((1UL << (42 - 32)) + 0)) | FORCE_UGM_FENCE_SCOPE_TO_TILE((u32)((1UL << (41 - 32)) + 0)), | |||
| 2193 | 0, false0); | |||
| 2194 | } | |||
| 2195 | ||||
| 2196 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2197 | /* Wa_22010430635:dg2 */ | |||
| 2198 | wa_masked_en(wal, | |||
| 2199 | GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), | |||
| 2200 | GEN12_DISABLE_GRF_CLEAR((u32)((1UL << (13)) + 0))); | |||
| 2201 | ||||
| 2202 | /* Wa_14010648519:dg2 */ | |||
| 2203 | wa_write_or(wal, XEHP_L3NODEARBCFG((const i915_reg_t){ .reg = (0xb0b4) }), XEHP_LNESPARE((u32)((1UL << (19)) + 0))); | |||
| 2204 | } | |||
| 2205 | ||||
| 2206 | /* Wa_14013202645:dg2 */ | |||
| 2207 | if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0)(IS_SUBPLATFORM(i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_C0 ))) || | |||
| 2208 | IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) | |||
| 2209 | wa_write_or(wal, RT_CTRL((const i915_reg_t){ .reg = (0xe530) }), DIS_NULL_QUERY((u32)((1UL << (10)) + 0))); | |||
| 2210 | ||||
| 2211 | /* Wa_22012532006:dg2 */ | |||
| 2212 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_C0))) || | |||
| 2213 | IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) | |||
| 2214 | wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7((const i915_reg_t){ .reg = (0xe194) }), | |||
| 2215 | DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA((u32)((1UL << (15)) + 0))); | |||
| 2216 | ||||
| 2217 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) { | |||
| 2218 | /* Wa_14010680813:dg2_g10 */ | |||
| 2219 | wa_write_or(wal, GEN12_GAMSTLB_CTRL((const i915_reg_t){ .reg = (0xcf4c) }), CONTROL_BLOCK_CLKGATE_DIS((u32)((1UL << (12)) + 0)) | | |||
| 2220 | EGRESS_BLOCK_CLKGATE_DIS((u32)((1UL << (11)) + 0)) | TAG_BLOCK_CLKGATE_DIS((u32)((1UL << (7)) + 0))); | |||
| 2221 | } | |||
| 2222 | ||||
| 2223 | if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 0) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0))) || | |||
| 2224 | IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)(IS_SUBPLATFORM(engine->i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_B0)))) { | |||
| 2225 | /* Wa_14012362059:dg2 */ | |||
| 2226 | wa_write_or(wal, GEN12_MERT_MOD_CTRL((const i915_reg_t){ .reg = (0xcf28) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2227 | } | |||
| 2228 | ||||
| 2229 | if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER)(IS_SUBPLATFORM(i915, INTEL_DG2, 1) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_B0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_FOREVER ))) || | |||
| 2230 | IS_DG2_G10(i915)IS_SUBPLATFORM(i915, INTEL_DG2, 0)) { | |||
| 2231 | /* Wa_22014600077:dg2 */ | |||
| 2232 | wa_add(wal, GEN10_CACHE_MODE_SS((const i915_reg_t){ .reg = (0xe420) }), 0, | |||
| 2233 | _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH)({ typeof(((u32)((1UL << (10)) + 0))) _a = (((u32)((1UL << (10)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), | |||
| 2234 | 0 /* Wa_14012342262 :write-only reg, so skip | |||
| 2235 | verification */, | |||
| 2236 | true1); | |||
| 2237 | } | |||
| 2238 | ||||
| 2239 | if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_PLATFORM(i915, INTEL_DG1) && (({ int __ret = !!(( ((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 ))) || | |||
| 2240 | IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, (0)) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2241 | /* | |||
| 2242 | * Wa_1607138336:tgl[a0],dg1[a0] | |||
| 2243 | * Wa_1607063988:tgl[a0],dg1[a0] | |||
| 2244 | */ | |||
| 2245 | wa_write_or(wal, | |||
| 2246 | GEN9_CTX_PREEMPT_REG((const i915_reg_t){ .reg = (0x2248) }), | |||
| 2247 | GEN12_DISABLE_POSH_BUSY_FF_DOP_CG((u32)((1UL << (11)) + 0))); | |||
| 2248 | } | |||
| 2249 | ||||
| 2250 | if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, (0)) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2251 | /* | |||
| 2252 | * Wa_1606679103:tgl | |||
| 2253 | * (see also Wa_1606682166:icl) | |||
| 2254 | */ | |||
| 2255 | wa_write_or(wal, | |||
| 2256 | GEN7_SARCHKMD((const i915_reg_t){ .reg = (0xb000) }), | |||
| 2257 | GEN7_DISABLE_SAMPLER_PREFETCH(1 << 30)); | |||
| 2258 | } | |||
| 2259 | ||||
| 2260 | if (IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) || IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1) || | |||
| 2261 | IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) || IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) { | |||
| 2262 | /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ | |||
| 2263 | wa_masked_en(wal, GEN7_ROW_CHICKEN2((const i915_reg_t){ .reg = (0xe4f4) }), GEN12_DISABLE_EARLY_READ((u32)((1UL << (14)) + 0))); | |||
| 2264 | ||||
| 2265 | /* | |||
| 2266 | * Wa_1407928979:tgl A* | |||
| 2267 | * Wa_18011464164:tgl[B0+],dg1[B0+] | |||
| 2268 | * Wa_22010931296:tgl[B0+],dg1[B0+] | |||
| 2269 | * Wa_14010919138:rkl,dg1,adl-s,adl-p | |||
| 2270 | */ | |||
| 2271 | wa_write_or(wal, GEN7_FF_THREAD_MODE((const i915_reg_t){ .reg = (0x20a0) }), | |||
| 2272 | GEN12_FF_TESSELATION_DOP_GATE_DISABLE(1UL << (19))); | |||
| 2273 | } | |||
| 2274 | ||||
| 2275 | if (IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2) || IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) || | |||
| 2276 | IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1) || IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) || IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) { | |||
| 2277 | /* | |||
| 2278 | * Wa_1606700617:tgl,dg1,adl-p | |||
| 2279 | * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p | |||
| 2280 | * Wa_14010826681:tgl,dg1,rkl,adl-p | |||
| 2281 | * Wa_18019627453:dg2 | |||
| 2282 | */ | |||
| 2283 | wa_masked_en(wal, | |||
| 2284 | GEN9_CS_DEBUG_MODE1((const i915_reg_t){ .reg = (0x20ec) }), | |||
| 2285 | FF_DOP_CLOCK_GATE_DISABLE((u32)((1UL << (1)) + 0))); | |||
| 2286 | } | |||
| 2287 | ||||
| 2288 | if (IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) || | |||
| 2289 | IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_PLATFORM(i915, INTEL_DG1) && (({ int __ret = !!(( ((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 ))) || | |||
| 2290 | IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) || IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) { | |||
| 2291 | /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ | |||
| 2292 | wa_masked_en(wal, GEN7_ROW_CHICKEN2((const i915_reg_t){ .reg = (0xe4f4) }), | |||
| 2293 | GEN12_PUSH_CONST_DEREF_HOLD_DIS((u32)((1UL << (8)) + 0))); | |||
| 2294 | ||||
| 2295 | /* | |||
| 2296 | * Wa_1409085225:tgl | |||
| 2297 | * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p | |||
| 2298 | */ | |||
| 2299 | wa_masked_en(wal, GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), GEN12_DISABLE_TDL_PUSH((u32)((1UL << (9)) + 0))); | |||
| 2300 | } | |||
| 2301 | ||||
| 2302 | if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)(IS_PLATFORM(i915, INTEL_DG1) && (({ int __ret = !!(( ((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 ))) || | |||
| 2303 | IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) || IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE) || IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P)) { | |||
| 2304 | /* | |||
| 2305 | * Wa_1607030317:tgl | |||
| 2306 | * Wa_1607186500:tgl | |||
| 2307 | * Wa_1607297627:tgl,rkl,dg1[a0],adlp | |||
| 2308 | * | |||
| 2309 | * On TGL and RKL there are multiple entries for this WA in the | |||
| 2310 | * BSpec; some indicate this is an A0-only WA, others indicate | |||
| 2311 | * it applies to all steppings so we trust the "all steppings." | |||
| 2312 | * For DG1 this only applies to A0. | |||
| 2313 | */ | |||
| 2314 | wa_masked_en(wal, | |||
| 2315 | RING_PSMI_CTL(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x50) }), | |||
| 2316 | GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE((u32)((1UL << (7)) + 0)) | | |||
| 2317 | GEN8_RC_SEMA_IDLE_MSG_DISABLE((u32)((1UL << (12)) + 0))); | |||
| 2318 | } | |||
| 2319 | ||||
| 2320 | if (IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1) || IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) || IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE) || | |||
| 2321 | IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) || IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P)) { | |||
| 2322 | /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */ | |||
| 2323 | wa_masked_en(wal, | |||
| 2324 | GEN10_SAMPLER_MODE((const i915_reg_t){ .reg = (0xe18c) }), | |||
| 2325 | ENABLE_SMALLPL((u32)((1UL << (15)) + 0))); | |||
| 2326 | } | |||
| 2327 | ||||
| 2328 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) { | |||
| 2329 | /* This is not an Wa. Enable for better image quality */ | |||
| 2330 | wa_masked_en(wal, | |||
| 2331 | _3D_CHICKEN3((const i915_reg_t){ .reg = (0x2090) }), | |||
| 2332 | _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE(1 << 5)); | |||
| 2333 | ||||
| 2334 | /* | |||
| 2335 | * Wa_1405543622:icl | |||
| 2336 | * Formerly known as WaGAPZPriorityScheme | |||
| 2337 | */ | |||
| 2338 | wa_write_or(wal, | |||
| 2339 | GEN8_GARBCNTL((const i915_reg_t){ .reg = (0xb004) }), | |||
| 2340 | GEN11_ARBITRATION_PRIO_ORDER_MASK(0x3f << 22)); | |||
| 2341 | ||||
| 2342 | /* | |||
| 2343 | * Wa_1604223664:icl | |||
| 2344 | * Formerly known as WaL3BankAddressHashing | |||
| 2345 | */ | |||
| 2346 | wa_write_clr_set(wal, | |||
| 2347 | GEN8_GARBCNTL((const i915_reg_t){ .reg = (0xb004) }), | |||
| 2348 | GEN11_HASH_CTRL_EXCL_MASK(0x7f << 0), | |||
| 2349 | GEN11_HASH_CTRL_EXCL_BIT0(1 << 0)); | |||
| 2350 | wa_write_clr_set(wal, | |||
| 2351 | GEN11_GLBLINVL((const i915_reg_t){ .reg = (0xb404) }), | |||
| 2352 | GEN11_BANK_HASH_ADDR_EXCL_MASK(0x7f << 5), | |||
| 2353 | GEN11_BANK_HASH_ADDR_EXCL_BIT0(1 << 5)); | |||
| 2354 | ||||
| 2355 | /* | |||
| 2356 | * Wa_1405733216:icl | |||
| 2357 | * Formerly known as WaDisableCleanEvicts | |||
| 2358 | */ | |||
| 2359 | wa_write_or(wal, | |||
| 2360 | GEN8_L3SQCREG4((const i915_reg_t){ .reg = (0xb118) }), | |||
| 2361 | GEN11_LQSC_CLEAN_EVICT_DISABLE(1 << 6)); | |||
| 2362 | ||||
| 2363 | /* Wa_1606682166:icl */ | |||
| 2364 | wa_write_or(wal, | |||
| 2365 | GEN7_SARCHKMD((const i915_reg_t){ .reg = (0xb000) }), | |||
| 2366 | GEN7_DISABLE_SAMPLER_PREFETCH(1 << 30)); | |||
| 2367 | ||||
| 2368 | /* Wa_1409178092:icl */ | |||
| 2369 | wa_write_clr_set(wal, | |||
| 2370 | GEN11_SCRATCH2((const i915_reg_t){ .reg = (0xb140) }), | |||
| 2371 | GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE(1 << 19), | |||
| 2372 | 0); | |||
| 2373 | ||||
| 2374 | /* WaEnable32PlaneMode:icl */ | |||
| 2375 | wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS((const i915_reg_t){ .reg = (0x20d4) }), | |||
| 2376 | GEN11_ENABLE_32_PLANE_MODE(1 << 7)); | |||
| 2377 | ||||
| 2378 | /* | |||
| 2379 | * Wa_1408767742:icl[a2..forever],ehl[all] | |||
| 2380 | * Wa_1605460711:icl[a0..c0] | |||
| 2381 | */ | |||
| 2382 | wa_write_or(wal, | |||
| 2383 | GEN7_FF_THREAD_MODE((const i915_reg_t){ .reg = (0x20a0) }), | |||
| 2384 | GEN12_FF_TESSELATION_DOP_GATE_DISABLE(1UL << (19))); | |||
| 2385 | ||||
| 2386 | /* Wa_22010271021 */ | |||
| 2387 | wa_masked_en(wal, | |||
| 2388 | GEN9_CS_DEBUG_MODE1((const i915_reg_t){ .reg = (0x20ec) }), | |||
| 2389 | FF_DOP_CLOCK_GATE_DISABLE((u32)((1UL << (1)) + 0))); | |||
| 2390 | } | |||
| 2391 | ||||
| 2392 | if (IS_GRAPHICS_VER(i915, 9, 12)(((&(i915)->__runtime)->graphics.ip.ver) >= (9) && ((&(i915)->__runtime)->graphics.ip.ver) <= (12) )) { | |||
| 2393 | /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */ | |||
| 2394 | wa_masked_en(wal, | |||
| 2395 | GEN7_FF_SLICE_CS_CHICKEN1((const i915_reg_t){ .reg = (0x20e0) }), | |||
| 2396 | GEN9_FFSC_PERCTX_PREEMPT_CTRL(1 << 14)); | |||
| 2397 | } | |||
| 2398 | ||||
| 2399 | if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE) || | |||
| 2400 | IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE) || | |||
| 2401 | IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || | |||
| 2402 | IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) { | |||
| 2403 | /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ | |||
| 2404 | wa_write_or(wal, | |||
| 2405 | GEN8_GARBCNTL((const i915_reg_t){ .reg = (0xb004) }), | |||
| 2406 | GEN9_GAPS_TSV_CREDIT_DISABLE(1 << 7)); | |||
| 2407 | } | |||
| 2408 | ||||
| 2409 | if (IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 2410 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | |||
| 2411 | wa_masked_en(wal, | |||
| 2412 | FF_SLICE_CS_CHICKEN2((const i915_reg_t){ .reg = (0x20e4) }), | |||
| 2413 | GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE(1 << 10)); | |||
| 2414 | } | |||
| 2415 | ||||
| 2416 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 9) { | |||
| 2417 | /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ | |||
| 2418 | wa_masked_en(wal, | |||
| 2419 | GEN9_CSFE_CHICKEN1_RCS((const i915_reg_t){ .reg = (0x20d4) }), | |||
| 2420 | GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE(1 << 2)); | |||
| 2421 | ||||
| 2422 | /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ | |||
| 2423 | wa_write_or(wal, | |||
| 2424 | BDW_SCRATCH1((const i915_reg_t){ .reg = (0xb11c) }), | |||
| 2425 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE(1 << 2)); | |||
| 2426 | ||||
| 2427 | /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ | |||
| 2428 | if (IS_GEN9_LP(i915)(((&(i915)->__runtime)->graphics.ip.ver) == 9 && ((&(i915)->__info)->is_lp))) | |||
| 2429 | wa_write_clr_set(wal, | |||
| 2430 | GEN8_L3SQCREG1((const i915_reg_t){ .reg = (0xb100) }), | |||
| 2431 | L3_PRIO_CREDITS_MASK((0x1f << 19) | (0x1f << 14)), | |||
| 2432 | L3_GENERAL_PRIO_CREDITS(62)(((62) >> 1) << 19) | | |||
| 2433 | L3_HIGH_PRIO_CREDITS(2)(((2) >> 1) << 14)); | |||
| 2434 | ||||
| 2435 | /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ | |||
| 2436 | wa_write_or(wal, | |||
| 2437 | GEN8_L3SQCREG4((const i915_reg_t){ .reg = (0xb118) }), | |||
| 2438 | GEN8_LQSC_FLUSH_COHERENT_LINES(1 << 21)); | |||
| 2439 | ||||
| 2440 | /* Disable atomics in L3 to prevent unrecoverable hangs */ | |||
| 2441 | wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1((const i915_reg_t){ .reg = (0xb008) }), | |||
| 2442 | GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE((u32)((1UL << (0)) + 0)), 0); | |||
| 2443 | wa_write_clr_set(wal, GEN8_L3SQCREG4((const i915_reg_t){ .reg = (0xb118) }), | |||
| 2444 | GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE((u32)((1UL << (22)) + 0)), 0); | |||
| 2445 | wa_write_clr_set(wal, GEN9_SCRATCH1((const i915_reg_t){ .reg = (0xb11c) }), | |||
| 2446 | EVICTION_PERF_FIX_ENABLE((u32)((1UL << (8)) + 0)), 0); | |||
| 2447 | } | |||
| 2448 | ||||
| 2449 | if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) { | |||
| 2450 | /* WaSampleCChickenBitEnable:hsw */ | |||
| 2451 | wa_masked_en(wal, | |||
| 2452 | HALF_SLICE_CHICKEN3((const i915_reg_t){ .reg = (0xe184) }), HSW_SAMPLE_C_PERFORMANCE(1 << 9)); | |||
| 2453 | ||||
| 2454 | wa_masked_dis(wal, | |||
| 2455 | CACHE_MODE_0_GEN7((const i915_reg_t){ .reg = (0x7000) }), | |||
| 2456 | /* enable HiZ Raw Stall Optimization */ | |||
| 2457 | HIZ_RAW_STALL_OPT_DISABLE(1 << 2)); | |||
| 2458 | } | |||
| 2459 | ||||
| 2460 | if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) { | |||
| 2461 | /* WaDisableEarlyCull:vlv */ | |||
| 2462 | wa_masked_en(wal, | |||
| 2463 | _3D_CHICKEN3((const i915_reg_t){ .reg = (0x2090) }), | |||
| 2464 | _3D_CHICKEN_SF_DISABLE_OBJEND_CULL(1 << 10)); | |||
| 2465 | ||||
| 2466 | /* | |||
| 2467 | * WaVSThreadDispatchOverride:ivb,vlv | |||
| 2468 | * | |||
| 2469 | * This actually overrides the dispatch | |||
| 2470 | * mode for all thread types. | |||
| 2471 | */ | |||
| 2472 | wa_write_clr_set(wal, | |||
| 2473 | GEN7_FF_THREAD_MODE((const i915_reg_t){ .reg = (0x20a0) }), | |||
| 2474 | GEN7_FF_SCHED_MASK0x0077070, | |||
| 2475 | GEN7_FF_TS_SCHED_HW(0x0 << 16) | | |||
| 2476 | GEN7_FF_VS_SCHED_HW(0x0 << 12) | | |||
| 2477 | GEN7_FF_DS_SCHED_HW(0x0 << 4)); | |||
| 2478 | ||||
| 2479 | /* WaPsdDispatchEnable:vlv */ | |||
| 2480 | /* WaDisablePSDDualDispatchEnable:vlv */ | |||
| 2481 | wa_masked_en(wal, | |||
| 2482 | GEN7_HALF_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0xe100) }), | |||
| 2483 | GEN7_MAX_PS_THREAD_DEP(8 << 12) | | |||
| 2484 | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE(1 << 3)); | |||
| 2485 | } | |||
| 2486 | ||||
| 2487 | if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) { | |||
| 2488 | /* WaDisableEarlyCull:ivb */ | |||
| 2489 | wa_masked_en(wal, | |||
| 2490 | _3D_CHICKEN3((const i915_reg_t){ .reg = (0x2090) }), | |||
| 2491 | _3D_CHICKEN_SF_DISABLE_OBJEND_CULL(1 << 10)); | |||
| 2492 | ||||
| 2493 | if (0) { /* causes HiZ corruption on ivb:gt1 */ | |||
| 2494 | /* enable HiZ Raw Stall Optimization */ | |||
| 2495 | wa_masked_dis(wal, | |||
| 2496 | CACHE_MODE_0_GEN7((const i915_reg_t){ .reg = (0x7000) }), | |||
| 2497 | HIZ_RAW_STALL_OPT_DISABLE(1 << 2)); | |||
| 2498 | } | |||
| 2499 | ||||
| 2500 | /* | |||
| 2501 | * WaVSThreadDispatchOverride:ivb,vlv | |||
| 2502 | * | |||
| 2503 | * This actually overrides the dispatch | |||
| 2504 | * mode for all thread types. | |||
| 2505 | */ | |||
| 2506 | wa_write_clr_set(wal, | |||
| 2507 | GEN7_FF_THREAD_MODE((const i915_reg_t){ .reg = (0x20a0) }), | |||
| 2508 | GEN7_FF_SCHED_MASK0x0077070, | |||
| 2509 | GEN7_FF_TS_SCHED_HW(0x0 << 16) | | |||
| 2510 | GEN7_FF_VS_SCHED_HW(0x0 << 12) | | |||
| 2511 | GEN7_FF_DS_SCHED_HW(0x0 << 4)); | |||
| 2512 | ||||
| 2513 | /* WaDisablePSDDualDispatchEnable:ivb */ | |||
| 2514 | if (IS_IVB_GT1(i915)(IS_PLATFORM(i915, INTEL_IVYBRIDGE) && (&(i915)-> __info)->gt == 1)) | |||
| 2515 | wa_masked_en(wal, | |||
| 2516 | GEN7_HALF_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0xe100) }), | |||
| 2517 | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE(1 << 3)); | |||
| 2518 | } | |||
| 2519 | ||||
| 2520 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) { | |||
| 2521 | /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ | |||
| 2522 | wa_masked_en(wal, | |||
| 2523 | RING_MODE_GEN7(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x29c) }), | |||
| 2524 | GFX_TLB_INVALIDATE_EXPLICIT(1 << 13) | GFX_REPLAY_MODE(1 << 11)); | |||
| 2525 | ||||
| 2526 | /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ | |||
| 2527 | wa_masked_dis(wal, CACHE_MODE_0_GEN7((const i915_reg_t){ .reg = (0x7000) }), RC_OP_FLUSH_ENABLE(1 << 0)); | |||
| 2528 | ||||
| 2529 | /* | |||
| 2530 | * BSpec says this must be set, even though | |||
| 2531 | * WaDisable4x2SubspanOptimization:ivb,hsw | |||
| 2532 | * WaDisable4x2SubspanOptimization isn't listed for VLV. | |||
| 2533 | */ | |||
| 2534 | wa_masked_en(wal, | |||
| 2535 | CACHE_MODE_1((const i915_reg_t){ .reg = (0x7004) }), | |||
| 2536 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE(1 << 6)); | |||
| 2537 | ||||
| 2538 | /* | |||
| 2539 | * BSpec recommends 8x4 when MSAA is used, | |||
| 2540 | * however in practice 16x4 seems fastest. | |||
| 2541 | * | |||
| 2542 | * Note that PS/WM thread counts depend on the WIZ hashing | |||
| 2543 | * disable bit, which we don't touch here, but it's good | |||
| 2544 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |||
| 2545 | */ | |||
| 2546 | wa_masked_field_set(wal, | |||
| 2547 | GEN7_GT_MODE((const i915_reg_t){ .reg = (0x7008) }), | |||
| 2548 | GEN6_WIZ_HASHING_MASK(((1) << 9) | ((1) << 7)), | |||
| 2549 | GEN6_WIZ_HASHING_16x4(((1) << 9) | ((0) << 7))); | |||
| 2550 | } | |||
| 2551 | ||||
| 2552 | if (IS_GRAPHICS_VER(i915, 6, 7)(((&(i915)->__runtime)->graphics.ip.ver) >= (6) && ((&(i915)->__runtime)->graphics.ip.ver) <= (7))) | |||
| 2553 | /* | |||
| 2554 | * We need to disable the AsyncFlip performance optimisations in | |||
| 2555 | * order to use MI_WAIT_FOR_EVENT within the CS. It should | |||
| 2556 | * already be programmed to '1' on all products. | |||
| 2557 | * | |||
| 2558 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv | |||
| 2559 | */ | |||
| 2560 | wa_masked_en(wal, | |||
| 2561 | RING_MI_MODE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x9c) }), | |||
| 2562 | ASYNC_FLIP_PERF_DISABLE((u32)((1UL << (14)) + 0))); | |||
| 2563 | ||||
| 2564 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) { | |||
| 2565 | /* | |||
| 2566 | * Required for the hardware to program scanline values for | |||
| 2567 | * waiting | |||
| 2568 | * WaEnableFlushTlbInvalidationMode:snb | |||
| 2569 | */ | |||
| 2570 | wa_masked_en(wal, | |||
| 2571 | GFX_MODE((const i915_reg_t){ .reg = (0x2520) }), | |||
| 2572 | GFX_TLB_INVALIDATE_EXPLICIT(1 << 13)); | |||
| 2573 | ||||
| 2574 | /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ | |||
| 2575 | wa_masked_en(wal, | |||
| 2576 | _3D_CHICKEN((const i915_reg_t){ .reg = (0x2084) }), | |||
| 2577 | _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB(1 << 10)); | |||
| 2578 | ||||
| 2579 | wa_masked_en(wal, | |||
| 2580 | _3D_CHICKEN3((const i915_reg_t){ .reg = (0x2090) }), | |||
| 2581 | /* WaStripsFansDisableFastClipPerformanceFix:snb */ | |||
| 2582 | _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL(1 << 5) | | |||
| 2583 | /* | |||
| 2584 | * Bspec says: | |||
| 2585 | * "This bit must be set if 3DSTATE_CLIP clip mode is set | |||
| 2586 | * to normal and 3DSTATE_SF number of SF output attributes | |||
| 2587 | * is more than 16." | |||
| 2588 | */ | |||
| 2589 | _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH(1 << 1)); | |||
| 2590 | ||||
| 2591 | /* | |||
| 2592 | * BSpec recommends 8x4 when MSAA is used, | |||
| 2593 | * however in practice 16x4 seems fastest. | |||
| 2594 | * | |||
| 2595 | * Note that PS/WM thread counts depend on the WIZ hashing | |||
| 2596 | * disable bit, which we don't touch here, but it's good | |||
| 2597 | * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). | |||
| 2598 | */ | |||
| 2599 | wa_masked_field_set(wal, | |||
| 2600 | GEN6_GT_MODE((const i915_reg_t){ .reg = (0x20d0) }), | |||
| 2601 | GEN6_WIZ_HASHING_MASK(((1) << 9) | ((1) << 7)), | |||
| 2602 | GEN6_WIZ_HASHING_16x4(((1) << 9) | ((0) << 7))); | |||
| 2603 | ||||
| 2604 | /* WaDisable_RenderCache_OperationalFlush:snb */ | |||
| 2605 | wa_masked_dis(wal, CACHE_MODE_0((const i915_reg_t){ .reg = (0x2120) }), RC_OP_FLUSH_ENABLE(1 << 0)); | |||
| 2606 | ||||
| 2607 | /* | |||
| 2608 | * From the Sandybridge PRM, volume 1 part 3, page 24: | |||
| 2609 | * "If this bit is set, STCunit will have LRA as replacement | |||
| 2610 | * policy. [...] This bit must be reset. LRA replacement | |||
| 2611 | * policy is not supported." | |||
| 2612 | */ | |||
| 2613 | wa_masked_dis(wal, | |||
| 2614 | CACHE_MODE_0((const i915_reg_t){ .reg = (0x2120) }), | |||
| 2615 | CM0_STC_EVICT_DISABLE_LRA_SNB(1 << 5)); | |||
| 2616 | } | |||
| 2617 | ||||
| 2618 | if (IS_GRAPHICS_VER(i915, 4, 6)(((&(i915)->__runtime)->graphics.ip.ver) >= (4) && ((&(i915)->__runtime)->graphics.ip.ver) <= (6))) | |||
| 2619 | /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ | |||
| 2620 | wa_add(wal, RING_MI_MODE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x9c) }), | |||
| 2621 | 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)({ typeof(((u32)((1UL << (6)) + 0))) _a = (((u32)((1UL << (6)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while ( 0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }), | |||
| 2622 | /* XXX bit doesn't stick on Broadwater */ | |||
| 2623 | IS_I965G(i915)IS_PLATFORM(i915, INTEL_I965G) ? 0 : VS_TIMER_DISPATCH((u32)((1UL << (6)) + 0)), true1); | |||
| 2624 | ||||
| 2625 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 4) | |||
| 2626 | /* | |||
| 2627 | * Disable CONSTANT_BUFFER before it is loaded from the context | |||
| 2628 | * image. For as it is loaded, it is executed and the stored | |||
| 2629 | * address may no longer be valid, leading to a GPU hang. | |||
| 2630 | * | |||
| 2631 | * This imposes the requirement that userspace reload their | |||
| 2632 | * CONSTANT_BUFFER on every batch, fortunately a requirement | |||
| 2633 | * they are already accustomed to from before contexts were | |||
| 2634 | * enabled. | |||
| 2635 | */ | |||
| 2636 | wa_add(wal, ECOSKPD(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x1d0) }), | |||
| 2637 | 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)({ typeof(((u32)((1UL << (4)) + 0))) _a = (((u32)((1UL << (4)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while ( 0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }), | |||
| 2638 | 0 /* XXX bit doesn't stick on Broadwater */, | |||
| 2639 | true1); | |||
| 2640 | } | |||
| 2641 | ||||
| 2642 | static void | |||
| 2643 | xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2644 | { | |||
| 2645 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 2646 | ||||
| 2647 | /* WaKBLVECSSemaphoreWaitPoll:kbl */ | |||
| 2648 | if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)(IS_PLATFORM(i915, INTEL_KABYLAKE) && (({ int __ret = !!((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A0) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_F0 )))) { | |||
| 2649 | wa_write(wal, | |||
| 2650 | RING_SEMA_WAIT_POLL(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x24c) } ), | |||
| 2651 | 1); | |||
| 2652 | } | |||
| 2653 | } | |||
| 2654 | ||||
| 2655 | static void | |||
| 2656 | ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2657 | { | |||
| 2658 | if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)(IS_PLATFORM(engine->i915, INTEL_PONTEVECCHIO) && ( ({ int __ret = !!((((&(engine->i915)->__runtime)-> step.graphics_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&(engine->i915)->drm))-> dev), "", "drm_WARN_ON(" "((&(engine->i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(engine-> i915)->__runtime)->step.graphics_step) >= (STEP_A0) && ((&(engine->i915)->__runtime)->step.graphics_step ) < (STEP_C0)))) { | |||
| 2659 | /* Wa_14014999345:pvc */ | |||
| 2660 | wa_masked_en(wal, GEN10_CACHE_MODE_SS((const i915_reg_t){ .reg = (0xe420) }), DISABLE_ECC((u32)((1UL << (5)) + 0))); | |||
| 2661 | } | |||
| 2662 | } | |||
| 2663 | ||||
| 2664 | /* | |||
| 2665 | * The bspec performance guide has recommended MMIO tuning settings. These | |||
| 2666 | * aren't truly "workarounds" but we want to program them with the same | |||
| 2667 | * workaround infrastructure to ensure that they're automatically added to | |||
| 2668 | * the GuC save/restore lists, re-applied at the right times, and checked for | |||
| 2669 | * any conflicting programming requested by real workarounds. | |||
| 2670 | * | |||
| 2671 | * Programming settings should be added here only if their registers are not | |||
| 2672 | * part of an engine's register state context. If a register is part of a | |||
| 2673 | * context, then any tuning settings should be programmed in an appropriate | |||
| 2674 | * function invoked by __intel_engine_init_ctx_wa(). | |||
| 2675 | */ | |||
| 2676 | static void | |||
| 2677 | add_render_compute_tuning_settings(struct drm_i915_privateinteldrm_softc *i915, | |||
| 2678 | struct i915_wa_list *wal) | |||
| 2679 | { | |||
| 2680 | if (IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) { | |||
| 2681 | wa_write(wal, XEHPC_L3SCRUB((const i915_reg_t){ .reg = (0xb18c) }), | |||
| 2682 | SCRUB_CL_DWNGRADE_SHARED((u32)((1UL << (12)) + 0)) | SCRUB_RATE_4B_PER_CLK((u32)((((typeof(((u32)((((~0UL) >> (64 - (2) - 1)) & ((~0UL) << (0))) + 0))))(0x6) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (2) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (2) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0))); | |||
| 2683 | } | |||
| 2684 | ||||
| 2685 | if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) { | |||
| 2686 | wa_write_or(wal, XEHP_L3SCQREG7((const i915_reg_t){ .reg = (0xb188) }), BLEND_FILL_CACHING_OPT_DIS((u32)((1UL << (3)) + 0))); | |||
| 2687 | wa_write_clr_set(wal, RT_CTRL((const i915_reg_t){ .reg = (0xe530) }), STACKID_CTRL((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (5))) + 0)), STACKID_CTRL_512((u32)((((typeof(((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (5))) + 0))))(0x2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (5))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (6) - 1)) & ((~0UL) << (5))) + 0)))) + 0 + 0 + 0 + 0))); | |||
| 2688 | ||||
| 2689 | /* | |||
| 2690 | * This is also listed as Wa_22012654132 for certain DG2 | |||
| 2691 | * steppings, but the tuning setting programming is a superset | |||
| 2692 | * since it applies to all DG2 variants and steppings. | |||
| 2693 | * | |||
| 2694 | * Note that register 0xE420 is write-only and cannot be read | |||
| 2695 | * back for verification on DG2 (due to Wa_14012342262), so | |||
| 2696 | * we need to explicitly skip the readback. | |||
| 2697 | */ | |||
| 2698 | wa_add(wal, GEN10_CACHE_MODE_SS((const i915_reg_t){ .reg = (0xe420) }), 0, | |||
| 2699 | _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC)({ typeof(((u32)((1UL << (3)) + 0))) _a = (((u32)((1UL << (3)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while ( 0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }), | |||
| 2700 | 0 /* write-only, so skip validation */, | |||
| 2701 | true1); | |||
| 2702 | } | |||
| 2703 | ||||
| 2704 | /* | |||
| 2705 | * This tuning setting proves beneficial only on ATS-M designs; the | |||
| 2706 | * default "age based" setting is optimal on regular DG2 and other | |||
| 2707 | * platforms. | |||
| 2708 | */ | |||
| 2709 | if (INTEL_INFO(i915)(&(i915)->__info)->tuning_thread_rr_after_dep) | |||
| 2710 | wa_masked_field_set(wal, GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), THREAD_EX_ARB_MODE((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (2))) + 0)), | |||
| 2711 | THREAD_EX_ARB_MODE_RR_AFTER_DEP((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (2))) + 0))))(0x2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (2))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (2))) + 0)))) + 0 + 0 + 0 + 0))); | |||
| 2712 | } | |||
| 2713 | ||||
| 2714 | /* | |||
| 2715 | * The workarounds in this function apply to shared registers in | |||
| 2716 | * the general render reset domain that aren't tied to a | |||
| 2717 | * specific engine. Since all render+compute engines get reset | |||
| 2718 | * together, and the contents of these registers are lost during | |||
| 2719 | * the shared render domain reset, we'll define such workarounds | |||
| 2720 | * here and then add them to just a single RCS or CCS engine's | |||
| 2721 | * workaround list (whichever engine has the XXXX flag). | |||
| 2722 | */ | |||
| 2723 | static void | |||
| 2724 | general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2725 | { | |||
| 2726 | struct drm_i915_privateinteldrm_softc *i915 = engine->i915; | |||
| 2727 | ||||
| 2728 | add_render_compute_tuning_settings(i915, wal); | |||
| 2729 | ||||
| 2730 | if (IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) { | |||
| 2731 | /* Wa_16016694945 */ | |||
| 2732 | wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0((const i915_reg_t){ .reg = (0xb01c) }), XEHPC_OVRLSCCC((u32)((1UL << (0)) + 0))); | |||
| 2733 | } | |||
| 2734 | ||||
| 2735 | if (IS_XEHPSDV(i915)IS_PLATFORM(i915, INTEL_XEHPSDV)) { | |||
| 2736 | /* Wa_1409954639 */ | |||
| 2737 | wa_masked_en(wal, | |||
| 2738 | GEN8_ROW_CHICKEN((const i915_reg_t){ .reg = (0xe4f0) }), | |||
| 2739 | SYSTOLIC_DOP_CLOCK_GATING_DIS((u32)((1UL << (10)) + 0))); | |||
| 2740 | ||||
| 2741 | /* Wa_1607196519 */ | |||
| 2742 | wa_masked_en(wal, | |||
| 2743 | GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), | |||
| 2744 | GEN12_DISABLE_GRF_CLEAR((u32)((1UL << (13)) + 0))); | |||
| 2745 | ||||
| 2746 | /* Wa_14010670810:xehpsdv */ | |||
| 2747 | wa_write_or(wal, XEHP_L3NODEARBCFG((const i915_reg_t){ .reg = (0xb0b4) }), XEHP_LNESPARE((u32)((1UL << (19)) + 0))); | |||
| 2748 | ||||
| 2749 | /* Wa_14010449647:xehpsdv */ | |||
| 2750 | wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1((const i915_reg_t){ .reg = (0xe100) }), | |||
| 2751 | GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE(1 << 3)); | |||
| 2752 | ||||
| 2753 | /* Wa_18011725039:xehpsdv */ | |||
| 2754 | if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)(IS_PLATFORM(i915, INTEL_XEHPSDV) && (({ int __ret = ! !((((&(i915)->__runtime)->step.graphics_step) == STEP_NONE )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& (i915)->drm))->dev), "", "drm_WARN_ON(" "((&(i915)->__runtime)->step.graphics_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(i915)-> __runtime)->step.graphics_step) >= (STEP_A1) && ((&(i915)->__runtime)->step.graphics_step) < (STEP_B0 )))) { | |||
| 2755 | wa_masked_dis(wal, MLTICTXCTL((const i915_reg_t){ .reg = (0xb170) }), TDONRENDER((u32)((1UL << (2)) + 0))); | |||
| 2756 | wa_write_or(wal, L3SQCREG1_CCS0((const i915_reg_t){ .reg = (0xb200) }), FLUSHALLNONCOH((u32)((1UL << (5)) + 0))); | |||
| 2757 | } | |||
| 2758 | ||||
| 2759 | /* Wa_14012362059:xehpsdv */ | |||
| 2760 | wa_write_or(wal, GEN12_MERT_MOD_CTRL((const i915_reg_t){ .reg = (0xcf28) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2761 | ||||
| 2762 | /* Wa_14014368820:xehpsdv */ | |||
| 2763 | wa_write_or(wal, GEN12_GAMCNTRL_CTRL((const i915_reg_t){ .reg = (0xcf54) }), INVALIDATION_BROADCAST_MODE_DIS((u32)((1UL << (12)) + 0)) | | |||
| 2764 | GLOBAL_INVALIDATION_MODE((u32)((1UL << (2)) + 0))); | |||
| 2765 | } | |||
| 2766 | ||||
| 2767 | if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2) || IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) { | |||
| 2768 | /* Wa_14015227452:dg2,pvc */ | |||
| 2769 | wa_masked_en(wal, GEN9_ROW_CHICKEN4((const i915_reg_t){ .reg = (0xe48c) }), XEHP_DIS_BBL_SYSPIPE((u32)((1UL << (11)) + 0))); | |||
| 2770 | ||||
| 2771 | /* Wa_22014226127:dg2,pvc */ | |||
| 2772 | wa_write_or(wal, LSC_CHICKEN_BIT_0((const i915_reg_t){ .reg = (0xe7c8) }), DISABLE_D8_D16_COASLESCE((u32)((1UL << (30)) + 0))); | |||
| 2773 | ||||
| 2774 | /* Wa_16015675438:dg2,pvc */ | |||
| 2775 | wa_masked_en(wal, FF_SLICE_CS_CHICKEN2((const i915_reg_t){ .reg = (0x20e4) }), GEN12_PERF_FIX_BALANCING_CFE_DISABLE((u32)((1UL << (15)) + 0))); | |||
| 2776 | ||||
| 2777 | /* Wa_18018781329:dg2,pvc */ | |||
| 2778 | wa_write_or(wal, RENDER_MOD_CTRL((const i915_reg_t){ .reg = (0xcf2c) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2779 | wa_write_or(wal, COMP_MOD_CTRL((const i915_reg_t){ .reg = (0xcf30) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2780 | wa_write_or(wal, VDBX_MOD_CTRL((const i915_reg_t){ .reg = (0xcf34) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2781 | wa_write_or(wal, VEBX_MOD_CTRL((const i915_reg_t){ .reg = (0xcf38) }), FORCE_MISS_FTLB((u32)((1UL << (3)) + 0))); | |||
| 2782 | } | |||
| 2783 | } | |||
| 2784 | ||||
| 2785 | static void | |||
| 2786 | engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
| 2787 | { | |||
| 2788 | if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4)0) | |||
| 2789 | return; | |||
| 2790 | ||||
| 2791 | engine_fake_wa_init(engine, wal); | |||
| 2792 | ||||
| 2793 | /* | |||
| 2794 | * These are common workarounds that just need to applied | |||
| 2795 | * to a single RCS/CCS engine's workaround list since | |||
| 2796 | * they're reset as part of the general render domain reset. | |||
| 2797 | */ | |||
| 2798 | if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE(1UL << (11))) | |||
| 2799 | general_render_compute_wa_init(engine, wal); | |||
| 2800 | ||||
| 2801 | if (engine->class == COMPUTE_CLASS5) | |||
| 2802 | ccs_engine_wa_init(engine, wal); | |||
| 2803 | else if (engine->class == RENDER_CLASS0) | |||
| 2804 | rcs_engine_wa_init(engine, wal); | |||
| 2805 | else | |||
| 2806 | xcs_engine_wa_init(engine, wal); | |||
| 2807 | } | |||
| 2808 | ||||
| 2809 | void intel_engine_init_workarounds(struct intel_engine_cs *engine) | |||
| 2810 | { | |||
| 2811 | struct i915_wa_list *wal = &engine->wa_list; | |||
| 2812 | ||||
| 2813 | if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) < 4) | |||
| ||||
| 2814 | return; | |||
| 2815 | ||||
| 2816 | wa_init_start(wal, "engine", engine->name); | |||
| 2817 | engine_init_workarounds(engine, wal); | |||
| 2818 | wa_init_finish(wal); | |||
| 2819 | } | |||
| 2820 | ||||
| 2821 | void intel_engine_apply_workarounds(struct intel_engine_cs *engine) | |||
| 2822 | { | |||
| 2823 | wa_list_apply(engine->gt, &engine->wa_list); | |||
| 2824 | } | |||
| 2825 | ||||
| 2826 | static const struct i915_range mcr_ranges_gen8[] = { | |||
| 2827 | { .start = 0x5500, .end = 0x55ff }, | |||
| 2828 | { .start = 0x7000, .end = 0x7fff }, | |||
| 2829 | { .start = 0x9400, .end = 0x97ff }, | |||
| 2830 | { .start = 0xb000, .end = 0xb3ff }, | |||
| 2831 | { .start = 0xe000, .end = 0xe7ff }, | |||
| 2832 | {}, | |||
| 2833 | }; | |||
| 2834 | ||||
| 2835 | static const struct i915_range mcr_ranges_gen12[] = { | |||
| 2836 | { .start = 0x8150, .end = 0x815f }, | |||
| 2837 | { .start = 0x9520, .end = 0x955f }, | |||
| 2838 | { .start = 0xb100, .end = 0xb3ff }, | |||
| 2839 | { .start = 0xde80, .end = 0xe8ff }, | |||
| 2840 | { .start = 0x24a00, .end = 0x24a7f }, | |||
| 2841 | {}, | |||
| 2842 | }; | |||
| 2843 | ||||
| 2844 | static const struct i915_range mcr_ranges_xehp[] = { | |||
| 2845 | { .start = 0x4000, .end = 0x4aff }, | |||
| 2846 | { .start = 0x5200, .end = 0x52ff }, | |||
| 2847 | { .start = 0x5400, .end = 0x7fff }, | |||
| 2848 | { .start = 0x8140, .end = 0x815f }, | |||
| 2849 | { .start = 0x8c80, .end = 0x8dff }, | |||
| 2850 | { .start = 0x94d0, .end = 0x955f }, | |||
| 2851 | { .start = 0x9680, .end = 0x96ff }, | |||
| 2852 | { .start = 0xb000, .end = 0xb3ff }, | |||
| 2853 | { .start = 0xc800, .end = 0xcfff }, | |||
| 2854 | { .start = 0xd800, .end = 0xd8ff }, | |||
| 2855 | { .start = 0xdc00, .end = 0xffff }, | |||
| 2856 | { .start = 0x17000, .end = 0x17fff }, | |||
| 2857 | { .start = 0x24a00, .end = 0x24a7f }, | |||
| 2858 | {}, | |||
| 2859 | }; | |||
| 2860 | ||||
| 2861 | static bool_Bool mcr_range(struct drm_i915_privateinteldrm_softc *i915, u32 offset) | |||
| 2862 | { | |||
| 2863 | const struct i915_range *mcr_ranges; | |||
| 2864 | int i; | |||
| 2865 | ||||
| 2866 | if (GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8 | ((&(i915)->__runtime)->graphics.ip.rel)) >= IP_VER(12, 50)((12) << 8 | (50))) | |||
| 2867 | mcr_ranges = mcr_ranges_xehp; | |||
| 2868 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) | |||
| 2869 | mcr_ranges = mcr_ranges_gen12; | |||
| 2870 | else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) | |||
| 2871 | mcr_ranges = mcr_ranges_gen8; | |||
| 2872 | else | |||
| 2873 | return false0; | |||
| 2874 | ||||
| 2875 | /* | |||
| 2876 | * Registers in these ranges are affected by the MCR selector | |||
| 2877 | * which only controls CPU initiated MMIO. Routing does not | |||
| 2878 | * work for CS access so we cannot verify them on this path. | |||
| 2879 | */ | |||
| 2880 | for (i = 0; mcr_ranges[i].start; i++) | |||
| 2881 | if (offset >= mcr_ranges[i].start && | |||
| 2882 | offset <= mcr_ranges[i].end) | |||
| 2883 | return true1; | |||
| 2884 | ||||
| 2885 | return false0; | |||
| 2886 | } | |||
| 2887 | ||||
| 2888 | static int | |||
| 2889 | wa_list_srm(struct i915_request *rq, | |||
| 2890 | const struct i915_wa_list *wal, | |||
| 2891 | struct i915_vma *vma) | |||
| 2892 | { | |||
| 2893 | struct drm_i915_privateinteldrm_softc *i915 = rq->engine->i915; | |||
| 2894 | unsigned int i, count = 0; | |||
| 2895 | const struct i915_wa *wa; | |||
| 2896 | u32 srm, *cs; | |||
| 2897 | ||||
| 2898 | srm = MI_STORE_REGISTER_MEM(((0x0) << 29) | (0x24) << 23 | (1)) | MI_SRM_LRM_GLOBAL_GTT(1<<22); | |||
| 2899 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) | |||
| 2900 | srm++; | |||
| 2901 | ||||
| 2902 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | |||
| 2903 | if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) | |||
| 2904 | count++; | |||
| 2905 | } | |||
| 2906 | ||||
| 2907 | cs = intel_ring_begin(rq, 4 * count); | |||
| 2908 | if (IS_ERR(cs)) | |||
| 2909 | return PTR_ERR(cs); | |||
| 2910 | ||||
| 2911 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | |||
| 2912 | u32 offset = i915_mmio_reg_offset(wa->reg); | |||
| 2913 | ||||
| 2914 | if (mcr_range(i915, offset)) | |||
| 2915 | continue; | |||
| 2916 | ||||
| 2917 | *cs++ = srm; | |||
| 2918 | *cs++ = offset; | |||
| 2919 | *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; | |||
| 2920 | *cs++ = 0; | |||
| 2921 | } | |||
| 2922 | intel_ring_advance(rq, cs); | |||
| 2923 | ||||
| 2924 | return 0; | |||
| 2925 | } | |||
| 2926 | ||||
| 2927 | static int engine_wa_list_verify(struct intel_context *ce, | |||
| 2928 | const struct i915_wa_list * const wal, | |||
| 2929 | const char *from) | |||
| 2930 | { | |||
| 2931 | const struct i915_wa *wa; | |||
| 2932 | struct i915_request *rq; | |||
| 2933 | struct i915_vma *vma; | |||
| 2934 | struct i915_gem_ww_ctx ww; | |||
| 2935 | unsigned int i; | |||
| 2936 | u32 *results; | |||
| 2937 | int err; | |||
| 2938 | ||||
| 2939 | if (!wal->count) | |||
| 2940 | return 0; | |||
| 2941 | ||||
| 2942 | vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm, | |||
| 2943 | wal->count * sizeof(u32)); | |||
| 2944 | if (IS_ERR(vma)) | |||
| 2945 | return PTR_ERR(vma); | |||
| 2946 | ||||
| 2947 | intel_engine_pm_get(ce->engine); | |||
| 2948 | i915_gem_ww_ctx_init(&ww, false0); | |||
| 2949 | retry: | |||
| 2950 | err = i915_gem_object_lock(vma->obj, &ww); | |||
| 2951 | if (err == 0) | |||
| 2952 | err = intel_context_pin_ww(ce, &ww); | |||
| 2953 | if (err) | |||
| 2954 | goto err_pm; | |||
| 2955 | ||||
| 2956 | err = i915_vma_pin_ww(vma, &ww, 0, 0, | |||
| 2957 | i915_vma_is_ggtt(vma) ? PIN_GLOBAL(1ULL << (10)) : PIN_USER(1ULL << (11))); | |||
| 2958 | if (err) | |||
| 2959 | goto err_unpin; | |||
| 2960 | ||||
| 2961 | rq = i915_request_create(ce); | |||
| 2962 | if (IS_ERR(rq)) { | |||
| 2963 | err = PTR_ERR(rq); | |||
| 2964 | goto err_vma; | |||
| 2965 | } | |||
| 2966 | ||||
| 2967 | err = i915_request_await_object(rq, vma->obj, true1); | |||
| 2968 | if (err == 0) | |||
| 2969 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE(1<<2)); | |||
| 2970 | if (err == 0) | |||
| 2971 | err = wa_list_srm(rq, wal, vma); | |||
| 2972 | ||||
| 2973 | i915_request_get(rq); | |||
| 2974 | if (err) | |||
| 2975 | i915_request_set_error_once(rq, err); | |||
| 2976 | i915_request_add(rq); | |||
| 2977 | ||||
| 2978 | if (err) | |||
| 2979 | goto err_rq; | |||
| 2980 | ||||
| 2981 | if (i915_request_wait(rq, 0, HZhz / 5) < 0) { | |||
| 2982 | err = -ETIME60; | |||
| 2983 | goto err_rq; | |||
| 2984 | } | |||
| 2985 | ||||
| 2986 | results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); | |||
| 2987 | if (IS_ERR(results)) { | |||
| 2988 | err = PTR_ERR(results); | |||
| 2989 | goto err_rq; | |||
| 2990 | } | |||
| 2991 | ||||
| 2992 | err = 0; | |||
| 2993 | for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { | |||
| 2994 | if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg))) | |||
| 2995 | continue; | |||
| 2996 | ||||
| 2997 | if (!wa_verify(wa, results[i], wal->name, from)) | |||
| 2998 | err = -ENXIO6; | |||
| 2999 | } | |||
| 3000 | ||||
| 3001 | i915_gem_object_unpin_map(vma->obj); | |||
| 3002 | ||||
| 3003 | err_rq: | |||
| 3004 | i915_request_put(rq); | |||
| 3005 | err_vma: | |||
| 3006 | i915_vma_unpin(vma); | |||
| 3007 | err_unpin: | |||
| 3008 | intel_context_unpin(ce); | |||
| 3009 | err_pm: | |||
| 3010 | if (err == -EDEADLK11) { | |||
| 3011 | err = i915_gem_ww_ctx_backoff(&ww); | |||
| 3012 | if (!err) | |||
| 3013 | goto retry; | |||
| 3014 | } | |||
| 3015 | i915_gem_ww_ctx_fini(&ww); | |||
| 3016 | intel_engine_pm_put(ce->engine); | |||
| 3017 | i915_vma_put(vma); | |||
| 3018 | return err; | |||
| 3019 | } | |||
| 3020 | ||||
| 3021 | int intel_engine_verify_workarounds(struct intel_engine_cs *engine, | |||
| 3022 | const char *from) | |||
| 3023 | { | |||
| 3024 | return engine_wa_list_verify(engine->kernel_context, | |||
| 3025 | &engine->wa_list, | |||
| 3026 | from); | |||
| 3027 | } | |||
| 3028 | ||||
| 3029 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 | |||
| 3030 | #include "selftest_workarounds.c" | |||
| 3031 | #endif |
| 1 | /* SPDX-License-Identifier: MIT */ | |||
| 2 | /* | |||
| 3 | * Copyright © 2022 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #ifndef __I915_REG_DEFS__ | |||
| 7 | #define __I915_REG_DEFS__ | |||
| 8 | ||||
| 9 | #include <linux/bitfield.h> | |||
| 10 | #include <linux/bits.h> | |||
| 11 | ||||
| 12 | /** | |||
| 13 | * REG_BIT() - Prepare a u32 bit value | |||
| 14 | * @__n: 0-based bit number | |||
| 15 | * | |||
| 16 | * Local wrapper for BIT() to force u32, with compile time checks. | |||
| 17 | * | |||
| 18 | * @return: Value with bit @__n set. | |||
| 19 | */ | |||
| 20 | #define REG_BIT(__n)((u32)((1UL << (__n)) + 0)) \ | |||
| 21 | ((u32)(BIT(__n)(1UL << (__n)) + \ | |||
| 22 | BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \0 | |||
| 23 | ((__n) < 0 || (__n) > 31))0)) | |||
| 24 | ||||
| 25 | /** | |||
| 26 | * REG_GENMASK() - Prepare a continuous u32 bitmask | |||
| 27 | * @__high: 0-based high bit | |||
| 28 | * @__low: 0-based low bit | |||
| 29 | * | |||
| 30 | * Local wrapper for GENMASK() to force u32, with compile time checks. | |||
| 31 | * | |||
| 32 | * @return: Continuous bitmask from @__high to @__low, inclusive. | |||
| 33 | */ | |||
| 34 | #define REG_GENMASK(__high, __low)((u32)((((~0UL) >> (64 - (__high) - 1)) & ((~0UL) << (__low))) + 0)) \ | |||
| 35 | ((u32)(GENMASK(__high, __low)(((~0UL) >> (64 - (__high) - 1)) & ((~0UL) << (__low))) + \ | |||
| 36 | BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \0 | |||
| 37 | __is_constexpr(__low) && \0 | |||
| 38 | ((__low) < 0 || (__high) > 31 || (__low) > (__high)))0)) | |||
| 39 | ||||
| 40 | /** | |||
| 41 | * REG_GENMASK64() - Prepare a continuous u64 bitmask | |||
| 42 | * @__high: 0-based high bit | |||
| 43 | * @__low: 0-based low bit | |||
| 44 | * | |||
| 45 | * Local wrapper for GENMASK_ULL() to force u64, with compile time checks. | |||
| 46 | * | |||
| 47 | * @return: Continuous bitmask from @__high to @__low, inclusive. | |||
| 48 | */ | |||
| 49 | #define REG_GENMASK64(__high, __low)((u64)((((~0ULL) >> (64 - (__high) - 1)) & ((~0ULL) << (__low))) + 0)) \ | |||
| 50 | ((u64)(GENMASK_ULL(__high, __low)(((~0ULL) >> (64 - (__high) - 1)) & ((~0ULL) << (__low))) + \ | |||
| 51 | BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \0 | |||
| 52 | __is_constexpr(__low) && \0 | |||
| 53 | ((__low) < 0 || (__high) > 63 || (__low) > (__high)))0)) | |||
| 54 | ||||
| 55 | /* | |||
| 56 | * Local integer constant expression version of is_power_of_2(). | |||
| 57 | */ | |||
| 58 | #define IS_POWER_OF_2(__x)((__x) && (((__x) & ((__x) - 1)) == 0)) ((__x) && (((__x) & ((__x) - 1)) == 0)) | |||
| 59 | ||||
| 60 | /** | |||
| 61 | * REG_FIELD_PREP() - Prepare a u32 bitfield value | |||
| 62 | * @__mask: shifted mask defining the field's length and position | |||
| 63 | * @__val: value to put in the field | |||
| 64 | * | |||
| 65 | * Local copy of FIELD_PREP() to generate an integer constant expression, force | |||
| 66 | * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). | |||
| 67 | * | |||
| 68 | * @return: @__val masked and shifted into the field defined by @__mask. | |||
| 69 | */ | |||
| 70 | #define REG_FIELD_PREP(__mask, __val)((u32)((((typeof(__mask))(__val) << (__builtin_ffsll(__mask ) - 1)) & (__mask)) + 0 + 0 + 0 + 0)) \ | |||
| 71 | ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)(__builtin_ffsll(__mask) - 1)) & (__mask)) + \ | |||
| 72 | BUILD_BUG_ON_ZERO(!__is_constexpr(__mask))0 + \ | |||
| 73 | BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX)0 + \ | |||
| 74 | BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask))))0 + \ | |||
| 75 | BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))0)) | |||
| 76 | ||||
| 77 | /** | |||
| 78 | * REG_FIELD_GET() - Extract a u32 bitfield value | |||
| 79 | * @__mask: shifted mask defining the field's length and position | |||
| 80 | * @__val: value to extract the bitfield value from | |||
| 81 | * | |||
| 82 | * Local wrapper for FIELD_GET() to force u32 and for consistency with | |||
| 83 | * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). | |||
| 84 | * | |||
| 85 | * @return: Masked and shifted value of the field defined by @__mask in @__val. | |||
| 86 | */ | |||
| 87 | #define REG_FIELD_GET(__mask, __val)((u32)((typeof(__mask))(((__val) & (__mask)) >> (__builtin_ffsll (__mask) - 1)))) ((u32)FIELD_GET(__mask, __val)((typeof(__mask))(((__val) & (__mask)) >> (__builtin_ffsll (__mask) - 1)))) | |||
| 88 | ||||
| 89 | /** | |||
| 90 | * REG_FIELD_GET64() - Extract a u64 bitfield value | |||
| 91 | * @__mask: shifted mask defining the field's length and position | |||
| 92 | * @__val: value to extract the bitfield value from | |||
| 93 | * | |||
| 94 | * Local wrapper for FIELD_GET() to force u64 and for consistency with | |||
| 95 | * REG_GENMASK64(). | |||
| 96 | * | |||
| 97 | * @return: Masked and shifted value of the field defined by @__mask in @__val. | |||
| 98 | */ | |||
| 99 | #define REG_FIELD_GET64(__mask, __val)((u64)((typeof(__mask))(((__val) & (__mask)) >> (__builtin_ffsll (__mask) - 1)))) ((u64)FIELD_GET(__mask, __val)((typeof(__mask))(((__val) & (__mask)) >> (__builtin_ffsll (__mask) - 1)))) | |||
| 100 | ||||
| 101 | typedef struct { | |||
| 102 | u32 reg; | |||
| 103 | } i915_reg_t; | |||
| 104 | ||||
| 105 | #define _MMIO(r)((const i915_reg_t){ .reg = (r) }) ((const i915_reg_t){ .reg = (r) }) | |||
| 106 | ||||
| 107 | #define INVALID_MMIO_REG((const i915_reg_t){ .reg = (0) }) _MMIO(0)((const i915_reg_t){ .reg = (0) }) | |||
| 108 | ||||
| 109 | static __always_inlineinline __attribute__((__always_inline__)) u32 i915_mmio_reg_offset(i915_reg_t reg) | |||
| 110 | { | |||
| 111 | return reg.reg; | |||
| ||||
| 112 | } | |||
| 113 | ||||
| 114 | static inline bool_Bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) | |||
| 115 | { | |||
| 116 | return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); | |||
| 117 | } | |||
| 118 | ||||
| 119 | static inline bool_Bool i915_mmio_reg_valid(i915_reg_t reg) | |||
| 120 | { | |||
| 121 | return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG((const i915_reg_t){ .reg = (0) })); | |||
| 122 | } | |||
| 123 | ||||
| 124 | #define VLV_DISPLAY_BASE0x180000 0x180000 | |||
| 125 | ||||
| 126 | #endif /* __I915_REG_DEFS__ */ |