| File: | dev/pci/drm/i915/intel_uncore.h |
| Warning: | line 358, column 1 Passed-by-value struct argument contains uninitialized data (e.g., field: 'reg') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* SPDX-License-Identifier: MIT */ | |||
| 2 | /* | |||
| 3 | * Copyright © 2019 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #include <linux/string_helpers.h> | |||
| 7 | ||||
| 8 | #include "i915_drv.h" | |||
| 9 | #include "i915_irq.h" | |||
| 10 | #include "intel_backlight_regs.h" | |||
| 11 | #include "intel_cdclk.h" | |||
| 12 | #include "intel_combo_phy.h" | |||
| 13 | #include "intel_de.h" | |||
| 14 | #include "intel_display_power.h" | |||
| 15 | #include "intel_display_power_map.h" | |||
| 16 | #include "intel_display_power_well.h" | |||
| 17 | #include "intel_display_types.h" | |||
| 18 | #include "intel_dmc.h" | |||
| 19 | #include "intel_mchbar_regs.h" | |||
| 20 | #include "intel_pch_refclk.h" | |||
| 21 | #include "intel_pcode.h" | |||
| 22 | #include "intel_snps_phy.h" | |||
| 23 | #include "skl_watermark.h" | |||
| 24 | #include "vlv_sideband.h" | |||
| 25 | ||||
| 26 | #define for_each_power_domain_well(__dev_priv, __power_well, __domain)for ((__power_well) = (__dev_priv)->display.power.domains. power_wells; (__power_well) - (__dev_priv)->display.power. domains.power_wells < (__dev_priv)->display.power.domains .power_well_count; (__power_well)++) if (!(test_bit((__domain ), (__power_well)->domains.bits))) {} else \ | |||
| 27 | for_each_power_well(__dev_priv, __power_well)for ((__power_well) = (__dev_priv)->display.power.domains. power_wells; (__power_well) - (__dev_priv)->display.power. domains.power_wells < (__dev_priv)->display.power.domains .power_well_count; (__power_well)++) \ | |||
| 28 | for_each_if(test_bit((__domain), (__power_well)->domains.bits))if (!(test_bit((__domain), (__power_well)->domains.bits))) {} else | |||
| 29 | ||||
| 30 | #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain)for ((__power_well) = (__dev_priv)->display.power.domains. power_wells + (__dev_priv)->display.power.domains.power_well_count - 1; (__power_well) - (__dev_priv)->display.power.domains .power_wells >= 0; (__power_well)--) if (!(test_bit((__domain ), (__power_well)->domains.bits))) {} else \ | |||
| 31 | for_each_power_well_reverse(__dev_priv, __power_well)for ((__power_well) = (__dev_priv)->display.power.domains. power_wells + (__dev_priv)->display.power.domains.power_well_count - 1; (__power_well) - (__dev_priv)->display.power.domains .power_wells >= 0; (__power_well)--) \ | |||
| 32 | for_each_if(test_bit((__domain), (__power_well)->domains.bits))if (!(test_bit((__domain), (__power_well)->domains.bits))) {} else | |||
| 33 | ||||
| 34 | const char * | |||
| 35 | intel_display_power_domain_str(enum intel_display_power_domain domain) | |||
| 36 | { | |||
| 37 | switch (domain) { | |||
| 38 | case POWER_DOMAIN_DISPLAY_CORE: | |||
| 39 | return "DISPLAY_CORE"; | |||
| 40 | case POWER_DOMAIN_PIPE_A: | |||
| 41 | return "PIPE_A"; | |||
| 42 | case POWER_DOMAIN_PIPE_B: | |||
| 43 | return "PIPE_B"; | |||
| 44 | case POWER_DOMAIN_PIPE_C: | |||
| 45 | return "PIPE_C"; | |||
| 46 | case POWER_DOMAIN_PIPE_D: | |||
| 47 | return "PIPE_D"; | |||
| 48 | case POWER_DOMAIN_PIPE_PANEL_FITTER_A: | |||
| 49 | return "PIPE_PANEL_FITTER_A"; | |||
| 50 | case POWER_DOMAIN_PIPE_PANEL_FITTER_B: | |||
| 51 | return "PIPE_PANEL_FITTER_B"; | |||
| 52 | case POWER_DOMAIN_PIPE_PANEL_FITTER_C: | |||
| 53 | return "PIPE_PANEL_FITTER_C"; | |||
| 54 | case POWER_DOMAIN_PIPE_PANEL_FITTER_D: | |||
| 55 | return "PIPE_PANEL_FITTER_D"; | |||
| 56 | case POWER_DOMAIN_TRANSCODER_A: | |||
| 57 | return "TRANSCODER_A"; | |||
| 58 | case POWER_DOMAIN_TRANSCODER_B: | |||
| 59 | return "TRANSCODER_B"; | |||
| 60 | case POWER_DOMAIN_TRANSCODER_C: | |||
| 61 | return "TRANSCODER_C"; | |||
| 62 | case POWER_DOMAIN_TRANSCODER_D: | |||
| 63 | return "TRANSCODER_D"; | |||
| 64 | case POWER_DOMAIN_TRANSCODER_EDP: | |||
| 65 | return "TRANSCODER_EDP"; | |||
| 66 | case POWER_DOMAIN_TRANSCODER_DSI_A: | |||
| 67 | return "TRANSCODER_DSI_A"; | |||
| 68 | case POWER_DOMAIN_TRANSCODER_DSI_C: | |||
| 69 | return "TRANSCODER_DSI_C"; | |||
| 70 | case POWER_DOMAIN_TRANSCODER_VDSC_PW2: | |||
| 71 | return "TRANSCODER_VDSC_PW2"; | |||
| 72 | case POWER_DOMAIN_PORT_DDI_LANES_A: | |||
| 73 | return "PORT_DDI_LANES_A"; | |||
| 74 | case POWER_DOMAIN_PORT_DDI_LANES_B: | |||
| 75 | return "PORT_DDI_LANES_B"; | |||
| 76 | case POWER_DOMAIN_PORT_DDI_LANES_C: | |||
| 77 | return "PORT_DDI_LANES_C"; | |||
| 78 | case POWER_DOMAIN_PORT_DDI_LANES_D: | |||
| 79 | return "PORT_DDI_LANES_D"; | |||
| 80 | case POWER_DOMAIN_PORT_DDI_LANES_E: | |||
| 81 | return "PORT_DDI_LANES_E"; | |||
| 82 | case POWER_DOMAIN_PORT_DDI_LANES_F: | |||
| 83 | return "PORT_DDI_LANES_F"; | |||
| 84 | case POWER_DOMAIN_PORT_DDI_LANES_TC1: | |||
| 85 | return "PORT_DDI_LANES_TC1"; | |||
| 86 | case POWER_DOMAIN_PORT_DDI_LANES_TC2: | |||
| 87 | return "PORT_DDI_LANES_TC2"; | |||
| 88 | case POWER_DOMAIN_PORT_DDI_LANES_TC3: | |||
| 89 | return "PORT_DDI_LANES_TC3"; | |||
| 90 | case POWER_DOMAIN_PORT_DDI_LANES_TC4: | |||
| 91 | return "PORT_DDI_LANES_TC4"; | |||
| 92 | case POWER_DOMAIN_PORT_DDI_LANES_TC5: | |||
| 93 | return "PORT_DDI_LANES_TC5"; | |||
| 94 | case POWER_DOMAIN_PORT_DDI_LANES_TC6: | |||
| 95 | return "PORT_DDI_LANES_TC6"; | |||
| 96 | case POWER_DOMAIN_PORT_DDI_IO_A: | |||
| 97 | return "PORT_DDI_IO_A"; | |||
| 98 | case POWER_DOMAIN_PORT_DDI_IO_B: | |||
| 99 | return "PORT_DDI_IO_B"; | |||
| 100 | case POWER_DOMAIN_PORT_DDI_IO_C: | |||
| 101 | return "PORT_DDI_IO_C"; | |||
| 102 | case POWER_DOMAIN_PORT_DDI_IO_D: | |||
| 103 | return "PORT_DDI_IO_D"; | |||
| 104 | case POWER_DOMAIN_PORT_DDI_IO_E: | |||
| 105 | return "PORT_DDI_IO_E"; | |||
| 106 | case POWER_DOMAIN_PORT_DDI_IO_F: | |||
| 107 | return "PORT_DDI_IO_F"; | |||
| 108 | case POWER_DOMAIN_PORT_DDI_IO_TC1: | |||
| 109 | return "PORT_DDI_IO_TC1"; | |||
| 110 | case POWER_DOMAIN_PORT_DDI_IO_TC2: | |||
| 111 | return "PORT_DDI_IO_TC2"; | |||
| 112 | case POWER_DOMAIN_PORT_DDI_IO_TC3: | |||
| 113 | return "PORT_DDI_IO_TC3"; | |||
| 114 | case POWER_DOMAIN_PORT_DDI_IO_TC4: | |||
| 115 | return "PORT_DDI_IO_TC4"; | |||
| 116 | case POWER_DOMAIN_PORT_DDI_IO_TC5: | |||
| 117 | return "PORT_DDI_IO_TC5"; | |||
| 118 | case POWER_DOMAIN_PORT_DDI_IO_TC6: | |||
| 119 | return "PORT_DDI_IO_TC6"; | |||
| 120 | case POWER_DOMAIN_PORT_DSI: | |||
| 121 | return "PORT_DSI"; | |||
| 122 | case POWER_DOMAIN_PORT_CRT: | |||
| 123 | return "PORT_CRT"; | |||
| 124 | case POWER_DOMAIN_PORT_OTHER: | |||
| 125 | return "PORT_OTHER"; | |||
| 126 | case POWER_DOMAIN_VGA: | |||
| 127 | return "VGA"; | |||
| 128 | case POWER_DOMAIN_AUDIO_MMIO: | |||
| 129 | return "AUDIO_MMIO"; | |||
| 130 | case POWER_DOMAIN_AUDIO_PLAYBACK: | |||
| 131 | return "AUDIO_PLAYBACK"; | |||
| 132 | case POWER_DOMAIN_AUX_A: | |||
| 133 | return "AUX_A"; | |||
| 134 | case POWER_DOMAIN_AUX_B: | |||
| 135 | return "AUX_B"; | |||
| 136 | case POWER_DOMAIN_AUX_C: | |||
| 137 | return "AUX_C"; | |||
| 138 | case POWER_DOMAIN_AUX_D: | |||
| 139 | return "AUX_D"; | |||
| 140 | case POWER_DOMAIN_AUX_E: | |||
| 141 | return "AUX_E"; | |||
| 142 | case POWER_DOMAIN_AUX_F: | |||
| 143 | return "AUX_F"; | |||
| 144 | case POWER_DOMAIN_AUX_USBC1: | |||
| 145 | return "AUX_USBC1"; | |||
| 146 | case POWER_DOMAIN_AUX_USBC2: | |||
| 147 | return "AUX_USBC2"; | |||
| 148 | case POWER_DOMAIN_AUX_USBC3: | |||
| 149 | return "AUX_USBC3"; | |||
| 150 | case POWER_DOMAIN_AUX_USBC4: | |||
| 151 | return "AUX_USBC4"; | |||
| 152 | case POWER_DOMAIN_AUX_USBC5: | |||
| 153 | return "AUX_USBC5"; | |||
| 154 | case POWER_DOMAIN_AUX_USBC6: | |||
| 155 | return "AUX_USBC6"; | |||
| 156 | case POWER_DOMAIN_AUX_IO_A: | |||
| 157 | return "AUX_IO_A"; | |||
| 158 | case POWER_DOMAIN_AUX_TBT1: | |||
| 159 | return "AUX_TBT1"; | |||
| 160 | case POWER_DOMAIN_AUX_TBT2: | |||
| 161 | return "AUX_TBT2"; | |||
| 162 | case POWER_DOMAIN_AUX_TBT3: | |||
| 163 | return "AUX_TBT3"; | |||
| 164 | case POWER_DOMAIN_AUX_TBT4: | |||
| 165 | return "AUX_TBT4"; | |||
| 166 | case POWER_DOMAIN_AUX_TBT5: | |||
| 167 | return "AUX_TBT5"; | |||
| 168 | case POWER_DOMAIN_AUX_TBT6: | |||
| 169 | return "AUX_TBT6"; | |||
| 170 | case POWER_DOMAIN_GMBUS: | |||
| 171 | return "GMBUS"; | |||
| 172 | case POWER_DOMAIN_INIT: | |||
| 173 | return "INIT"; | |||
| 174 | case POWER_DOMAIN_MODESET: | |||
| 175 | return "MODESET"; | |||
| 176 | case POWER_DOMAIN_GT_IRQ: | |||
| 177 | return "GT_IRQ"; | |||
| 178 | case POWER_DOMAIN_DC_OFF: | |||
| 179 | return "DC_OFF"; | |||
| 180 | case POWER_DOMAIN_TC_COLD_OFF: | |||
| 181 | return "TC_COLD_OFF"; | |||
| 182 | default: | |||
| 183 | MISSING_CASE(domain)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "domain", (long)(domain)); __builtin_expect(!!(__ret), 0); } ); | |||
| 184 | return "?"; | |||
| 185 | } | |||
| 186 | } | |||
| 187 | ||||
| 188 | /** | |||
| 189 | * __intel_display_power_is_enabled - unlocked check for a power domain | |||
| 190 | * @dev_priv: i915 device instance | |||
| 191 | * @domain: power domain to check | |||
| 192 | * | |||
| 193 | * This is the unlocked version of intel_display_power_is_enabled() and should | |||
| 194 | * only be used from error capture and recovery code where deadlocks are | |||
| 195 | * possible. | |||
| 196 | * | |||
| 197 | * Returns: | |||
| 198 | * True when the power domain is enabled, false otherwise. | |||
| 199 | */ | |||
| 200 | bool_Bool __intel_display_power_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 201 | enum intel_display_power_domain domain) | |||
| 202 | { | |||
| 203 | struct i915_power_well *power_well; | |||
| 204 | bool_Bool is_enabled; | |||
| 205 | ||||
| 206 | if (dev_priv->runtime_pm.suspended) | |||
| 207 | return false0; | |||
| 208 | ||||
| 209 | is_enabled = true1; | |||
| 210 | ||||
| 211 | for_each_power_domain_well_reverse(dev_priv, power_well, domain)for ((power_well) = (dev_priv)->display.power.domains.power_wells + (dev_priv)->display.power.domains.power_well_count - 1; (power_well) - (dev_priv)->display.power.domains.power_wells >= 0; (power_well)--) if (!(test_bit((domain), (power_well )->domains.bits))) {} else { | |||
| 212 | if (intel_power_well_is_always_on(power_well)) | |||
| 213 | continue; | |||
| 214 | ||||
| 215 | if (!intel_power_well_is_enabled_cached(power_well)) { | |||
| 216 | is_enabled = false0; | |||
| 217 | break; | |||
| 218 | } | |||
| 219 | } | |||
| 220 | ||||
| 221 | return is_enabled; | |||
| 222 | } | |||
| 223 | ||||
| 224 | /** | |||
| 225 | * intel_display_power_is_enabled - check for a power domain | |||
| 226 | * @dev_priv: i915 device instance | |||
| 227 | * @domain: power domain to check | |||
| 228 | * | |||
| 229 | * This function can be used to check the hw power domain state. It is mostly | |||
| 230 | * used in hardware state readout functions. Everywhere else code should rely | |||
| 231 | * upon explicit power domain reference counting to ensure that the hardware | |||
| 232 | * block is powered up before accessing it. | |||
| 233 | * | |||
| 234 | * Callers must hold the relevant modesetting locks to ensure that concurrent | |||
| 235 | * threads can't disable the power well while the caller tries to read a few | |||
| 236 | * registers. | |||
| 237 | * | |||
| 238 | * Returns: | |||
| 239 | * True when the power domain is enabled, false otherwise. | |||
| 240 | */ | |||
| 241 | bool_Bool intel_display_power_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 242 | enum intel_display_power_domain domain) | |||
| 243 | { | |||
| 244 | struct i915_power_domains *power_domains; | |||
| 245 | bool_Bool ret; | |||
| 246 | ||||
| 247 | power_domains = &dev_priv->display.power.domains; | |||
| 248 | ||||
| 249 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 250 | ret = __intel_display_power_is_enabled(dev_priv, domain); | |||
| 251 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 252 | ||||
| 253 | return ret; | |||
| 254 | } | |||
| 255 | ||||
| 256 | static u32 | |||
| 257 | sanitize_target_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 258 | u32 target_dc_state) | |||
| 259 | { | |||
| 260 | static const u32 states[] = { | |||
| 261 | DC_STATE_EN_UPTO_DC6(2 << 0), | |||
| 262 | DC_STATE_EN_UPTO_DC5(1 << 0), | |||
| 263 | DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)), | |||
| 264 | DC_STATE_DISABLE0, | |||
| 265 | }; | |||
| 266 | int i; | |||
| 267 | ||||
| 268 | for (i = 0; i < ARRAY_SIZE(states)(sizeof((states)) / sizeof((states)[0])) - 1; i++) { | |||
| 269 | if (target_dc_state != states[i]) | |||
| 270 | continue; | |||
| 271 | ||||
| 272 | if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) | |||
| 273 | break; | |||
| 274 | ||||
| 275 | target_dc_state = states[i + 1]; | |||
| 276 | } | |||
| 277 | ||||
| 278 | return target_dc_state; | |||
| 279 | } | |||
| 280 | ||||
| 281 | /** | |||
| 282 | * intel_display_power_set_target_dc_state - Set target dc state. | |||
| 283 | * @dev_priv: i915 device | |||
| 284 | * @state: state which needs to be set as target_dc_state. | |||
| 285 | * | |||
| 286 | * This function set the "DC off" power well target_dc_state, | |||
| 287 | * based upon this target_dc_stste, "DC off" power well will | |||
| 288 | * enable desired DC state. | |||
| 289 | */ | |||
| 290 | void intel_display_power_set_target_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 291 | u32 state) | |||
| 292 | { | |||
| 293 | struct i915_power_well *power_well; | |||
| 294 | bool_Bool dc_off_enabled; | |||
| 295 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 296 | ||||
| 297 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 298 | power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); | |||
| 299 | ||||
| 300 | if (drm_WARN_ON(&dev_priv->drm, !power_well)({ int __ret = !!((!power_well)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "!power_well" ")"); __builtin_expect(!!(__ret ), 0); })) | |||
| 301 | goto unlock; | |||
| 302 | ||||
| 303 | state = sanitize_target_dc_state(dev_priv, state); | |||
| 304 | ||||
| 305 | if (state == dev_priv->display.dmc.target_dc_state) | |||
| 306 | goto unlock; | |||
| 307 | ||||
| 308 | dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); | |||
| 309 | /* | |||
| 310 | * If DC off power well is disabled, need to enable and disable the | |||
| 311 | * DC off power well to effect target DC state. | |||
| 312 | */ | |||
| 313 | if (!dc_off_enabled) | |||
| 314 | intel_power_well_enable(dev_priv, power_well); | |||
| 315 | ||||
| 316 | dev_priv->display.dmc.target_dc_state = state; | |||
| 317 | ||||
| 318 | if (!dc_off_enabled) | |||
| 319 | intel_power_well_disable(dev_priv, power_well); | |||
| 320 | ||||
| 321 | unlock: | |||
| 322 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 323 | } | |||
| 324 | ||||
| 325 | #define POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & ( (~0ULL) << (0)))) (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)(((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & ( (~0ULL) << (0)))) | |||
| 326 | ||||
| 327 | static void __async_put_domains_mask(struct i915_power_domains *power_domains, | |||
| 328 | struct intel_power_domain_mask *mask) | |||
| 329 | { | |||
| 330 | bitmap_or(mask->bits, | |||
| 331 | power_domains->async_put_domains[0].bits, | |||
| 332 | power_domains->async_put_domains[1].bits, | |||
| 333 | POWER_DOMAIN_NUM); | |||
| 334 | } | |||
| 335 | ||||
| 336 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 337 | ||||
| 338 | static bool_Bool | |||
| 339 | assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) | |||
| 340 | { | |||
| 341 | struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 342 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 343 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 344 | ||||
| 345 | return !drm_WARN_ON(&i915->drm,({ int __ret = !!((bitmap_intersects(power_domains->async_put_domains [0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "bitmap_intersects(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 346 | bitmap_intersects(power_domains->async_put_domains[0].bits,({ int __ret = !!((bitmap_intersects(power_domains->async_put_domains [0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "bitmap_intersects(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 347 | power_domains->async_put_domains[1].bits,({ int __ret = !!((bitmap_intersects(power_domains->async_put_domains [0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "bitmap_intersects(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 348 | POWER_DOMAIN_NUM))({ int __ret = !!((bitmap_intersects(power_domains->async_put_domains [0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "bitmap_intersects(power_domains->async_put_domains[0].bits, power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 349 | } | |||
| 350 | ||||
| 351 | static bool_Bool | |||
| 352 | __async_put_domains_state_ok(struct i915_power_domains *power_domains) | |||
| 353 | { | |||
| 354 | struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 355 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 356 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 357 | struct intel_power_domain_mask async_put_mask; | |||
| 358 | enum intel_display_power_domain domain; | |||
| 359 | bool_Bool err = false0; | |||
| 360 | ||||
| 361 | err |= !assert_async_put_domain_masks_disjoint(power_domains); | |||
| 362 | __async_put_domains_mask(power_domains, &async_put_mask); | |||
| 363 | err |= drm_WARN_ON(&i915->drm,({ int __ret = !!((!!power_domains->async_put_wakeref != ! (find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!!power_domains->async_put_wakeref != !(find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 364 | !!power_domains->async_put_wakeref !=({ int __ret = !!((!!power_domains->async_put_wakeref != ! (find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!!power_domains->async_put_wakeref != !(find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 365 | !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))({ int __ret = !!((!!power_domains->async_put_wakeref != ! (find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!!power_domains->async_put_wakeref != !(find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 366 | ||||
| 367 | for_each_power_domain(domain, &async_put_mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (&async_put_mask)->bits))) { } else | |||
| 368 | err |= drm_WARN_ON(&i915->drm,({ int __ret = !!((power_domains->domain_use_count[domain] != 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string( ((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 369 | power_domains->domain_use_count[domain] != 1)({ int __ret = !!((power_domains->domain_use_count[domain] != 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string( ((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 370 | ||||
| 371 | return !err; | |||
| 372 | } | |||
| 373 | ||||
| 374 | static void print_power_domains(struct i915_power_domains *power_domains, | |||
| 375 | const char *prefix, struct intel_power_domain_mask *mask) | |||
| 376 | { | |||
| 377 | struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 378 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 379 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 380 | enum intel_display_power_domain domain; | |||
| 381 | ||||
| 382 | drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s (%d):\n", prefix , bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); | |||
| 383 | for_each_power_domain(domain, mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (mask)->bits))) {} else | |||
| 384 | drm_dbg(&i915->drm, "%s use_count %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s use_count %d\n" , intel_display_power_domain_str(domain), power_domains->domain_use_count [domain]) | |||
| 385 | intel_display_power_domain_str(domain),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s use_count %d\n" , intel_display_power_domain_str(domain), power_domains->domain_use_count [domain]) | |||
| 386 | power_domains->domain_use_count[domain])__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s use_count %d\n" , intel_display_power_domain_str(domain), power_domains->domain_use_count [domain]); | |||
| 387 | } | |||
| 388 | ||||
| 389 | static void | |||
| 390 | print_async_put_domains_state(struct i915_power_domains *power_domains) | |||
| 391 | { | |||
| 392 | struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 393 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 394 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 395 | ||||
| 396 | drm_dbg(&i915->drm, "async_put_wakeref %u\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "async_put_wakeref %u\n" , power_domains->async_put_wakeref) | |||
| 397 | power_domains->async_put_wakeref)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "async_put_wakeref %u\n" , power_domains->async_put_wakeref); | |||
| 398 | ||||
| 399 | print_power_domains(power_domains, "async_put_domains[0]", | |||
| 400 | &power_domains->async_put_domains[0]); | |||
| 401 | print_power_domains(power_domains, "async_put_domains[1]", | |||
| 402 | &power_domains->async_put_domains[1]); | |||
| 403 | } | |||
| 404 | ||||
| 405 | static void | |||
| 406 | verify_async_put_domains_state(struct i915_power_domains *power_domains) | |||
| 407 | { | |||
| 408 | if (!__async_put_domains_state_ok(power_domains)) | |||
| 409 | print_async_put_domains_state(power_domains); | |||
| 410 | } | |||
| 411 | ||||
| 412 | #else | |||
| 413 | ||||
| 414 | static void | |||
| 415 | assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) | |||
| 416 | { | |||
| 417 | } | |||
| 418 | ||||
| 419 | static void | |||
| 420 | verify_async_put_domains_state(struct i915_power_domains *power_domains) | |||
| 421 | { | |||
| 422 | } | |||
| 423 | ||||
| 424 | #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ | |||
| 425 | ||||
| 426 | static void async_put_domains_mask(struct i915_power_domains *power_domains, | |||
| 427 | struct intel_power_domain_mask *mask) | |||
| 428 | ||||
| 429 | { | |||
| 430 | assert_async_put_domain_masks_disjoint(power_domains); | |||
| 431 | ||||
| 432 | __async_put_domains_mask(power_domains, mask); | |||
| 433 | } | |||
| 434 | ||||
| 435 | static void | |||
| 436 | async_put_domains_clear_domain(struct i915_power_domains *power_domains, | |||
| 437 | enum intel_display_power_domain domain) | |||
| 438 | { | |||
| 439 | assert_async_put_domain_masks_disjoint(power_domains); | |||
| 440 | ||||
| 441 | clear_bit(domain, power_domains->async_put_domains[0].bits); | |||
| 442 | clear_bit(domain, power_domains->async_put_domains[1].bits); | |||
| 443 | } | |||
| 444 | ||||
| 445 | static bool_Bool | |||
| 446 | intel_display_power_grab_async_put_ref(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 447 | enum intel_display_power_domain domain) | |||
| 448 | { | |||
| 449 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 450 | struct intel_power_domain_mask async_put_mask; | |||
| 451 | bool_Bool ret = false0; | |||
| 452 | ||||
| 453 | async_put_domains_mask(power_domains, &async_put_mask); | |||
| 454 | if (!test_bit(domain, async_put_mask.bits)) | |||
| 455 | goto out_verify; | |||
| 456 | ||||
| 457 | async_put_domains_clear_domain(power_domains, domain); | |||
| 458 | ||||
| 459 | ret = true1; | |||
| 460 | ||||
| 461 | async_put_domains_mask(power_domains, &async_put_mask); | |||
| 462 | if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)(find_first_bit(async_put_mask.bits, POWER_DOMAIN_NUM) == POWER_DOMAIN_NUM )) | |||
| 463 | goto out_verify; | |||
| 464 | ||||
| 465 | cancel_delayed_work(&power_domains->async_put_work); | |||
| 466 | intel_runtime_pm_put_raw(&dev_priv->runtime_pm, | |||
| 467 | fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *( &power_domains->async_put_wakeref); *(&power_domains ->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref ))0; __T; })); | |||
| 468 | out_verify: | |||
| 469 | verify_async_put_domains_state(power_domains); | |||
| 470 | ||||
| 471 | return ret; | |||
| 472 | } | |||
| 473 | ||||
| 474 | static void | |||
| 475 | __intel_display_power_get_domain(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 476 | enum intel_display_power_domain domain) | |||
| 477 | { | |||
| 478 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 479 | struct i915_power_well *power_well; | |||
| 480 | ||||
| 481 | if (intel_display_power_grab_async_put_ref(dev_priv, domain)) | |||
| 482 | return; | |||
| 483 | ||||
| 484 | for_each_power_domain_well(dev_priv, power_well, domain)for ((power_well) = (dev_priv)->display.power.domains.power_wells ; (power_well) - (dev_priv)->display.power.domains.power_wells < (dev_priv)->display.power.domains.power_well_count; ( power_well)++) if (!(test_bit((domain), (power_well)->domains .bits))) {} else | |||
| 485 | intel_power_well_get(dev_priv, power_well); | |||
| 486 | ||||
| 487 | power_domains->domain_use_count[domain]++; | |||
| 488 | } | |||
| 489 | ||||
| 490 | /** | |||
| 491 | * intel_display_power_get - grab a power domain reference | |||
| 492 | * @dev_priv: i915 device instance | |||
| 493 | * @domain: power domain to reference | |||
| 494 | * | |||
| 495 | * This function grabs a power domain reference for @domain and ensures that the | |||
| 496 | * power domain and all its parents are powered up. Therefore users should only | |||
| 497 | * grab a reference to the innermost power domain they need. | |||
| 498 | * | |||
| 499 | * Any power domain reference obtained by this function must have a symmetric | |||
| 500 | * call to intel_display_power_put() to release the reference again. | |||
| 501 | */ | |||
| 502 | intel_wakeref_t intel_display_power_get(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 503 | enum intel_display_power_domain domain) | |||
| 504 | { | |||
| 505 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 506 | intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); | |||
| 507 | ||||
| 508 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 509 | __intel_display_power_get_domain(dev_priv, domain); | |||
| 510 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 511 | ||||
| 512 | return wakeref; | |||
| 513 | } | |||
| 514 | ||||
| 515 | /** | |||
| 516 | * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain | |||
| 517 | * @dev_priv: i915 device instance | |||
| 518 | * @domain: power domain to reference | |||
| 519 | * | |||
| 520 | * This function grabs a power domain reference for @domain and ensures that the | |||
| 521 | * power domain and all its parents are powered up. Therefore users should only | |||
| 522 | * grab a reference to the innermost power domain they need. | |||
| 523 | * | |||
| 524 | * Any power domain reference obtained by this function must have a symmetric | |||
| 525 | * call to intel_display_power_put() to release the reference again. | |||
| 526 | */ | |||
| 527 | intel_wakeref_t | |||
| 528 | intel_display_power_get_if_enabled(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 529 | enum intel_display_power_domain domain) | |||
| 530 | { | |||
| 531 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 532 | intel_wakeref_t wakeref; | |||
| 533 | bool_Bool is_enabled; | |||
| 534 | ||||
| 535 | wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); | |||
| 536 | if (!wakeref) | |||
| 537 | return false0; | |||
| 538 | ||||
| 539 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 540 | ||||
| 541 | if (__intel_display_power_is_enabled(dev_priv, domain)) { | |||
| 542 | __intel_display_power_get_domain(dev_priv, domain); | |||
| 543 | is_enabled = true1; | |||
| 544 | } else { | |||
| 545 | is_enabled = false0; | |||
| 546 | } | |||
| 547 | ||||
| 548 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 549 | ||||
| 550 | if (!is_enabled) { | |||
| 551 | intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); | |||
| 552 | wakeref = 0; | |||
| 553 | } | |||
| 554 | ||||
| 555 | return wakeref; | |||
| 556 | } | |||
| 557 | ||||
| 558 | static void | |||
| 559 | __intel_display_power_put_domain(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 560 | enum intel_display_power_domain domain) | |||
| 561 | { | |||
| 562 | struct i915_power_domains *power_domains; | |||
| 563 | struct i915_power_well *power_well; | |||
| 564 | const char *name = intel_display_power_domain_str(domain); | |||
| 565 | struct intel_power_domain_mask async_put_mask; | |||
| 566 | ||||
| 567 | power_domains = &dev_priv->display.power.domains; | |||
| 568 | ||||
| 569 | drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],({ int __ret = !!(!power_domains->domain_use_count[domain] ); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }) | |||
| 570 | "Use count on domain %s is already zero\n",({ int __ret = !!(!power_domains->domain_use_count[domain] ); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }) | |||
| 571 | name)({ int __ret = !!(!power_domains->domain_use_count[domain] ); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }); | |||
| 572 | async_put_domains_mask(power_domains, &async_put_mask); | |||
| 573 | drm_WARN(&dev_priv->drm,({ int __ret = !!(test_bit(domain, async_put_mask.bits)); if ( __ret) printf("%s %s: " "Async disabling of domain %s is pending\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }) | |||
| 574 | test_bit(domain, async_put_mask.bits),({ int __ret = !!(test_bit(domain, async_put_mask.bits)); if ( __ret) printf("%s %s: " "Async disabling of domain %s is pending\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }) | |||
| 575 | "Async disabling of domain %s is pending\n",({ int __ret = !!(test_bit(domain, async_put_mask.bits)); if ( __ret) printf("%s %s: " "Async disabling of domain %s is pending\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }) | |||
| 576 | name)({ int __ret = !!(test_bit(domain, async_put_mask.bits)); if ( __ret) printf("%s %s: " "Async disabling of domain %s is pending\n" , dev_driver_string((&dev_priv->drm)->dev), "", name ); __builtin_expect(!!(__ret), 0); }); | |||
| 577 | ||||
| 578 | power_domains->domain_use_count[domain]--; | |||
| 579 | ||||
| 580 | for_each_power_domain_well_reverse(dev_priv, power_well, domain)for ((power_well) = (dev_priv)->display.power.domains.power_wells + (dev_priv)->display.power.domains.power_well_count - 1; (power_well) - (dev_priv)->display.power.domains.power_wells >= 0; (power_well)--) if (!(test_bit((domain), (power_well )->domains.bits))) {} else | |||
| 581 | intel_power_well_put(dev_priv, power_well); | |||
| 582 | } | |||
| 583 | ||||
| 584 | static void __intel_display_power_put(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 585 | enum intel_display_power_domain domain) | |||
| 586 | { | |||
| 587 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 588 | ||||
| 589 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 590 | __intel_display_power_put_domain(dev_priv, domain); | |||
| 591 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 592 | } | |||
| 593 | ||||
| 594 | static void | |||
| 595 | queue_async_put_domains_work(struct i915_power_domains *power_domains, | |||
| 596 | intel_wakeref_t wakeref) | |||
| 597 | { | |||
| 598 | struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 599 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 600 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 601 | drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref)({ int __ret = !!((power_domains->async_put_wakeref)); if ( __ret) printf("%s %s: " "%s", dev_driver_string(((&i915-> drm))->dev), "", "drm_WARN_ON(" "power_domains->async_put_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 602 | power_domains->async_put_wakeref = wakeref; | |||
| 603 | drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,({ int __ret = !!((!queue_delayed_work(system_unbound_wq, & power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000 )))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 604 | &power_domains->async_put_work,({ int __ret = !!((!queue_delayed_work(system_unbound_wq, & power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000 )))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 605 | msecs_to_jiffies(100)))({ int __ret = !!((!queue_delayed_work(system_unbound_wq, & power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000 )))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 606 | } | |||
| 607 | ||||
| 608 | static void | |||
| 609 | release_async_put_domains(struct i915_power_domains *power_domains, | |||
| 610 | struct intel_power_domain_mask *mask) | |||
| 611 | { | |||
| 612 | struct drm_i915_privateinteldrm_softc *dev_priv = | |||
| 613 | container_of(power_domains, struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}) | |||
| 614 | display.power.domains)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains ) *__mptr = (power_domains); (struct inteldrm_softc * )( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, display.power.domains) );}); | |||
| 615 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; | |||
| 616 | enum intel_display_power_domain domain; | |||
| 617 | intel_wakeref_t wakeref; | |||
| 618 | ||||
| 619 | /* | |||
| 620 | * The caller must hold already raw wakeref, upgrade that to a proper | |||
| 621 | * wakeref to make the state checker happy about the HW access during | |||
| 622 | * power well disabling. | |||
| 623 | */ | |||
| 624 | assert_rpm_raw_wakeref_held(rpm); | |||
| 625 | wakeref = intel_runtime_pm_get(rpm); | |||
| 626 | ||||
| 627 | for_each_power_domain(domain, mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (mask)->bits))) {} else { | |||
| 628 | /* Clear before put, so put's sanity check is happy. */ | |||
| 629 | async_put_domains_clear_domain(power_domains, domain); | |||
| 630 | __intel_display_power_put_domain(dev_priv, domain); | |||
| 631 | } | |||
| 632 | ||||
| 633 | intel_runtime_pm_put(rpm, wakeref); | |||
| 634 | } | |||
| 635 | ||||
| 636 | static void | |||
| 637 | intel_display_power_put_async_work(struct work_struct *work) | |||
| 638 | { | |||
| 639 | struct drm_i915_privateinteldrm_softc *dev_priv = | |||
| 640 | container_of(work, struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains.async_put_work.work ) *__mptr = (work); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc , display.power.domains.async_put_work.work) );}) | |||
| 641 | display.power.domains.async_put_work.work)({ const __typeof( ((struct inteldrm_softc *)0)->display.power .domains.async_put_work.work ) *__mptr = (work); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc , display.power.domains.async_put_work.work) );}); | |||
| 642 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 643 | struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; | |||
| 644 | intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); | |||
| 645 | intel_wakeref_t old_work_wakeref = 0; | |||
| 646 | ||||
| 647 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 648 | ||||
| 649 | /* | |||
| 650 | * Bail out if all the domain refs pending to be released were grabbed | |||
| 651 | * by subsequent gets or a flush_work. | |||
| 652 | */ | |||
| 653 | old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *( &power_domains->async_put_wakeref); *(&power_domains ->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref ))0; __T; }); | |||
| 654 | if (!old_work_wakeref) | |||
| 655 | goto out_verify; | |||
| 656 | ||||
| 657 | release_async_put_domains(power_domains, | |||
| 658 | &power_domains->async_put_domains[0]); | |||
| 659 | ||||
| 660 | /* Requeue the work if more domains were async put meanwhile. */ | |||
| 661 | if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)(find_first_bit(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM ) == POWER_DOMAIN_NUM)) { | |||
| 662 | bitmap_copy(power_domains->async_put_domains[0].bits, | |||
| 663 | power_domains->async_put_domains[1].bits, | |||
| 664 | POWER_DOMAIN_NUM); | |||
| 665 | bitmap_zero(power_domains->async_put_domains[1].bits, | |||
| 666 | POWER_DOMAIN_NUM); | |||
| 667 | queue_async_put_domains_work(power_domains, | |||
| 668 | fetch_and_zero(&new_work_wakeref)({ typeof(*&new_work_wakeref) __T = *(&new_work_wakeref ); *(&new_work_wakeref) = (typeof(*&new_work_wakeref) )0; __T; })); | |||
| 669 | } else { | |||
| 670 | /* | |||
| 671 | * Cancel the work that got queued after this one got dequeued, | |||
| 672 | * since here we released the corresponding async-put reference. | |||
| 673 | */ | |||
| 674 | cancel_delayed_work(&power_domains->async_put_work); | |||
| 675 | } | |||
| 676 | ||||
| 677 | out_verify: | |||
| 678 | verify_async_put_domains_state(power_domains); | |||
| 679 | ||||
| 680 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 681 | ||||
| 682 | if (old_work_wakeref) | |||
| 683 | intel_runtime_pm_put_raw(rpm, old_work_wakeref); | |||
| 684 | if (new_work_wakeref) | |||
| 685 | intel_runtime_pm_put_raw(rpm, new_work_wakeref); | |||
| 686 | } | |||
| 687 | ||||
| 688 | /** | |||
| 689 | * intel_display_power_put_async - release a power domain reference asynchronously | |||
| 690 | * @i915: i915 device instance | |||
| 691 | * @domain: power domain to reference | |||
| 692 | * @wakeref: wakeref acquired for the reference that is being released | |||
| 693 | * | |||
| 694 | * This function drops the power domain reference obtained by | |||
| 695 | * intel_display_power_get*() and schedules a work to power down the | |||
| 696 | * corresponding hardware block if this is the last reference. | |||
| 697 | */ | |||
| 698 | void __intel_display_power_put_async(struct drm_i915_privateinteldrm_softc *i915, | |||
| 699 | enum intel_display_power_domain domain, | |||
| 700 | intel_wakeref_t wakeref) | |||
| 701 | { | |||
| 702 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 703 | struct intel_runtime_pm *rpm = &i915->runtime_pm; | |||
| 704 | intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); | |||
| 705 | ||||
| 706 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 707 | ||||
| 708 | if (power_domains->domain_use_count[domain] > 1) { | |||
| 709 | __intel_display_power_put_domain(i915, domain); | |||
| 710 | ||||
| 711 | goto out_verify; | |||
| 712 | } | |||
| 713 | ||||
| 714 | drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1)({ int __ret = !!((power_domains->domain_use_count[domain] != 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string( ((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 715 | ||||
| 716 | /* Let a pending work requeue itself or queue a new one. */ | |||
| 717 | if (power_domains->async_put_wakeref) { | |||
| 718 | set_bit(domain, power_domains->async_put_domains[1].bits); | |||
| 719 | } else { | |||
| 720 | set_bit(domain, power_domains->async_put_domains[0].bits); | |||
| 721 | queue_async_put_domains_work(power_domains, | |||
| 722 | fetch_and_zero(&work_wakeref)({ typeof(*&work_wakeref) __T = *(&work_wakeref); *(& work_wakeref) = (typeof(*&work_wakeref))0; __T; })); | |||
| 723 | } | |||
| 724 | ||||
| 725 | out_verify: | |||
| 726 | verify_async_put_domains_state(power_domains); | |||
| 727 | ||||
| 728 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 729 | ||||
| 730 | if (work_wakeref) | |||
| 731 | intel_runtime_pm_put_raw(rpm, work_wakeref); | |||
| 732 | ||||
| 733 | intel_runtime_pm_put(rpm, wakeref); | |||
| 734 | } | |||
| 735 | ||||
| 736 | /** | |||
| 737 | * intel_display_power_flush_work - flushes the async display power disabling work | |||
| 738 | * @i915: i915 device instance | |||
| 739 | * | |||
| 740 | * Flushes any pending work that was scheduled by a preceding | |||
| 741 | * intel_display_power_put_async() call, completing the disabling of the | |||
| 742 | * corresponding power domains. | |||
| 743 | * | |||
| 744 | * Note that the work handler function may still be running after this | |||
| 745 | * function returns; to ensure that the work handler isn't running use | |||
| 746 | * intel_display_power_flush_work_sync() instead. | |||
| 747 | */ | |||
| 748 | void intel_display_power_flush_work(struct drm_i915_privateinteldrm_softc *i915) | |||
| 749 | { | |||
| 750 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 751 | struct intel_power_domain_mask async_put_mask; | |||
| 752 | intel_wakeref_t work_wakeref; | |||
| 753 | ||||
| 754 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 755 | ||||
| 756 | work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *( &power_domains->async_put_wakeref); *(&power_domains ->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref ))0; __T; }); | |||
| 757 | if (!work_wakeref) | |||
| 758 | goto out_verify; | |||
| 759 | ||||
| 760 | async_put_domains_mask(power_domains, &async_put_mask); | |||
| 761 | release_async_put_domains(power_domains, &async_put_mask); | |||
| 762 | cancel_delayed_work(&power_domains->async_put_work); | |||
| 763 | ||||
| 764 | out_verify: | |||
| 765 | verify_async_put_domains_state(power_domains); | |||
| 766 | ||||
| 767 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 768 | ||||
| 769 | if (work_wakeref) | |||
| 770 | intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); | |||
| 771 | } | |||
| 772 | ||||
| 773 | /** | |||
| 774 | * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work | |||
| 775 | * @i915: i915 device instance | |||
| 776 | * | |||
| 777 | * Like intel_display_power_flush_work(), but also ensure that the work | |||
| 778 | * handler function is not running any more when this function returns. | |||
| 779 | */ | |||
| 780 | static void | |||
| 781 | intel_display_power_flush_work_sync(struct drm_i915_privateinteldrm_softc *i915) | |||
| 782 | { | |||
| 783 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 784 | ||||
| 785 | intel_display_power_flush_work(i915); | |||
| 786 | cancel_delayed_work_sync(&power_domains->async_put_work); | |||
| 787 | ||||
| 788 | verify_async_put_domains_state(power_domains); | |||
| 789 | ||||
| 790 | drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref)({ int __ret = !!((power_domains->async_put_wakeref)); if ( __ret) printf("%s %s: " "%s", dev_driver_string(((&i915-> drm))->dev), "", "drm_WARN_ON(" "power_domains->async_put_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 791 | } | |||
| 792 | ||||
| 793 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 794 | /** | |||
| 795 | * intel_display_power_put - release a power domain reference | |||
| 796 | * @dev_priv: i915 device instance | |||
| 797 | * @domain: power domain to reference | |||
| 798 | * @wakeref: wakeref acquired for the reference that is being released | |||
| 799 | * | |||
| 800 | * This function drops the power domain reference obtained by | |||
| 801 | * intel_display_power_get() and might power down the corresponding hardware | |||
| 802 | * block right away if this is the last reference. | |||
| 803 | */ | |||
| 804 | void intel_display_power_put(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 805 | enum intel_display_power_domain domain, | |||
| 806 | intel_wakeref_t wakeref) | |||
| 807 | { | |||
| 808 | __intel_display_power_put(dev_priv, domain); | |||
| 809 | intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); | |||
| 810 | } | |||
| 811 | #else | |||
| 812 | /** | |||
| 813 | * intel_display_power_put_unchecked - release an unchecked power domain reference | |||
| 814 | * @dev_priv: i915 device instance | |||
| 815 | * @domain: power domain to reference | |||
| 816 | * | |||
| 817 | * This function drops the power domain reference obtained by | |||
| 818 | * intel_display_power_get() and might power down the corresponding hardware | |||
| 819 | * block right away if this is the last reference. | |||
| 820 | * | |||
| 821 | * This function is only for the power domain code's internal use to suppress wakeref | |||
| 822 | * tracking when the correspondig debug kconfig option is disabled, should not | |||
| 823 | * be used otherwise. | |||
| 824 | */ | |||
| 825 | void intel_display_power_put_unchecked(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 826 | enum intel_display_power_domain domain) | |||
| 827 | { | |||
| 828 | __intel_display_power_put(dev_priv, domain); | |||
| 829 | intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); | |||
| 830 | } | |||
| 831 | #endif | |||
| 832 | ||||
| 833 | void | |||
| 834 | intel_display_power_get_in_set(struct drm_i915_privateinteldrm_softc *i915, | |||
| 835 | struct intel_display_power_domain_set *power_domain_set, | |||
| 836 | enum intel_display_power_domain domain) | |||
| 837 | { | |||
| 838 | intel_wakeref_t __maybe_unused__attribute__((__unused__)) wf; | |||
| 839 | ||||
| 840 | drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits))({ int __ret = !!((test_bit(domain, power_domain_set->mask .bits))); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "test_bit(domain, power_domain_set->mask.bits)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 841 | ||||
| 842 | wf = intel_display_power_get(i915, domain); | |||
| 843 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 844 | power_domain_set->wakerefs[domain] = wf; | |||
| 845 | #endif | |||
| 846 | set_bit(domain, power_domain_set->mask.bits); | |||
| 847 | } | |||
| 848 | ||||
| 849 | bool_Bool | |||
| 850 | intel_display_power_get_in_set_if_enabled(struct drm_i915_privateinteldrm_softc *i915, | |||
| 851 | struct intel_display_power_domain_set *power_domain_set, | |||
| 852 | enum intel_display_power_domain domain) | |||
| 853 | { | |||
| 854 | intel_wakeref_t wf; | |||
| 855 | ||||
| 856 | drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits))({ int __ret = !!((test_bit(domain, power_domain_set->mask .bits))); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "test_bit(domain, power_domain_set->mask.bits)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 857 | ||||
| 858 | wf = intel_display_power_get_if_enabled(i915, domain); | |||
| 859 | if (!wf) | |||
| 860 | return false0; | |||
| 861 | ||||
| 862 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 863 | power_domain_set->wakerefs[domain] = wf; | |||
| 864 | #endif | |||
| 865 | set_bit(domain, power_domain_set->mask.bits); | |||
| 866 | ||||
| 867 | return true1; | |||
| 868 | } | |||
| 869 | ||||
| 870 | void | |||
| 871 | intel_display_power_put_mask_in_set(struct drm_i915_privateinteldrm_softc *i915, | |||
| 872 | struct intel_display_power_domain_set *power_domain_set, | |||
| 873 | struct intel_power_domain_mask *mask) | |||
| 874 | { | |||
| 875 | enum intel_display_power_domain domain; | |||
| 876 | ||||
| 877 | #ifdef notyet | |||
| 878 | drm_WARN_ON(&i915->drm,({ int __ret = !!((!bitmap_subset(mask->bits, power_domain_set ->mask.bits, POWER_DOMAIN_NUM))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->drm))->dev), "", "drm_WARN_ON(" "!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 879 | !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM))({ int __ret = !!((!bitmap_subset(mask->bits, power_domain_set ->mask.bits, POWER_DOMAIN_NUM))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->drm))->dev), "", "drm_WARN_ON(" "!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 880 | #endif | |||
| 881 | ||||
| 882 | for_each_power_domain(domain, mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (mask)->bits))) {} else { | |||
| 883 | intel_wakeref_t __maybe_unused__attribute__((__unused__)) wf = -1; | |||
| 884 | ||||
| 885 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 886 | wf = fetch_and_zero(&power_domain_set->wakerefs[domain])({ typeof(*&power_domain_set->wakerefs[domain]) __T = * (&power_domain_set->wakerefs[domain]); *(&power_domain_set ->wakerefs[domain]) = (typeof(*&power_domain_set->wakerefs [domain]))0; __T; }); | |||
| 887 | #endif | |||
| 888 | intel_display_power_put(i915, domain, wf); | |||
| 889 | clear_bit(domain, power_domain_set->mask.bits); | |||
| 890 | } | |||
| 891 | } | |||
| 892 | ||||
| 893 | static int | |||
| 894 | sanitize_disable_power_well_option(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 895 | int disable_power_well) | |||
| 896 | { | |||
| 897 | if (disable_power_well >= 0) | |||
| 898 | return !!disable_power_well; | |||
| 899 | ||||
| 900 | return 1; | |||
| 901 | } | |||
| 902 | ||||
| 903 | static u32 get_allowed_dc_mask(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 904 | int enable_dc) | |||
| 905 | { | |||
| 906 | u32 mask; | |||
| 907 | int requested_dc; | |||
| 908 | int max_dc; | |||
| 909 | ||||
| 910 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 911 | return 0; | |||
| 912 | ||||
| 913 | if (IS_DG2(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG2)) | |||
| 914 | max_dc = 1; | |||
| 915 | else if (IS_DG1(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG1)) | |||
| 916 | max_dc = 3; | |||
| 917 | else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) | |||
| 918 | max_dc = 4; | |||
| 919 | else if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) | |||
| 920 | max_dc = 1; | |||
| 921 | else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9) | |||
| 922 | max_dc = 2; | |||
| 923 | else | |||
| 924 | max_dc = 0; | |||
| 925 | ||||
| 926 | /* | |||
| 927 | * DC9 has a separate HW flow from the rest of the DC states, | |||
| 928 | * not depending on the DMC firmware. It's needed by system | |||
| 929 | * suspend/resume, so allow it unconditionally. | |||
| 930 | */ | |||
| 931 | mask = IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON) || | |||
| 932 | DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11 ? | |||
| 933 | DC_STATE_EN_DC9(1 << 3) : 0; | |||
| 934 | ||||
| 935 | if (!dev_priv->params.disable_power_well) | |||
| 936 | max_dc = 0; | |||
| 937 | ||||
| 938 | if (enable_dc >= 0 && enable_dc <= max_dc) { | |||
| 939 | requested_dc = enable_dc; | |||
| 940 | } else if (enable_dc == -1) { | |||
| 941 | requested_dc = max_dc; | |||
| 942 | } else if (enable_dc > max_dc && enable_dc <= 4) { | |||
| 943 | drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n" , enable_dc, max_dc) | |||
| 944 | "Adjusting requested max DC state (%d->%d)\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n" , enable_dc, max_dc) | |||
| 945 | enable_dc, max_dc)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n" , enable_dc, max_dc); | |||
| 946 | requested_dc = max_dc; | |||
| 947 | } else { | |||
| 948 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected value for enable_dc (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable_dc ) | |||
| 949 | "Unexpected value for enable_dc (%d)\n", enable_dc)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected value for enable_dc (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , enable_dc ); | |||
| 950 | requested_dc = max_dc; | |||
| 951 | } | |||
| 952 | ||||
| 953 | switch (requested_dc) { | |||
| 954 | case 4: | |||
| 955 | mask |= DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)) | DC_STATE_EN_UPTO_DC6(2 << 0); | |||
| 956 | break; | |||
| 957 | case 3: | |||
| 958 | mask |= DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)) | DC_STATE_EN_UPTO_DC5(1 << 0); | |||
| 959 | break; | |||
| 960 | case 2: | |||
| 961 | mask |= DC_STATE_EN_UPTO_DC6(2 << 0); | |||
| 962 | break; | |||
| 963 | case 1: | |||
| 964 | mask |= DC_STATE_EN_UPTO_DC5(1 << 0); | |||
| 965 | break; | |||
| 966 | } | |||
| 967 | ||||
| 968 | drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Allowed DC state mask %02x\n" , mask); | |||
| 969 | ||||
| 970 | return mask; | |||
| 971 | } | |||
| 972 | ||||
| 973 | /** | |||
| 974 | * intel_power_domains_init - initializes the power domain structures | |||
| 975 | * @dev_priv: i915 device instance | |||
| 976 | * | |||
| 977 | * Initializes the power domain structures for @dev_priv depending upon the | |||
| 978 | * supported platform. | |||
| 979 | */ | |||
| 980 | int intel_power_domains_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 981 | { | |||
| 982 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 983 | ||||
| 984 | dev_priv->params.disable_power_well = | |||
| 985 | sanitize_disable_power_well_option(dev_priv, | |||
| 986 | dev_priv->params.disable_power_well); | |||
| 987 | dev_priv->display.dmc.allowed_dc_mask = | |||
| 988 | get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); | |||
| 989 | ||||
| 990 | dev_priv->display.dmc.target_dc_state = | |||
| 991 | sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6(2 << 0)); | |||
| 992 | ||||
| 993 | rw_init(&power_domains->lock, "ipdl")_rw_init_flags(&power_domains->lock, "ipdl", 0, ((void *)0)); | |||
| 994 | ||||
| 995 | INIT_DELAYED_WORK(&power_domains->async_put_work, | |||
| 996 | intel_display_power_put_async_work); | |||
| 997 | ||||
| 998 | return intel_display_power_map_init(power_domains); | |||
| 999 | } | |||
| 1000 | ||||
| 1001 | /** | |||
| 1002 | * intel_power_domains_cleanup - clean up power domains resources | |||
| 1003 | * @dev_priv: i915 device instance | |||
| 1004 | * | |||
| 1005 | * Release any resources acquired by intel_power_domains_init() | |||
| 1006 | */ | |||
| 1007 | void intel_power_domains_cleanup(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1008 | { | |||
| 1009 | intel_display_power_map_cleanup(&dev_priv->display.power.domains); | |||
| 1010 | } | |||
| 1011 | ||||
| 1012 | static void intel_power_domains_sync_hw(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1013 | { | |||
| 1014 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1015 | struct i915_power_well *power_well; | |||
| 1016 | ||||
| 1017 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1018 | for_each_power_well(dev_priv, power_well)for ((power_well) = (dev_priv)->display.power.domains.power_wells ; (power_well) - (dev_priv)->display.power.domains.power_wells < (dev_priv)->display.power.domains.power_well_count; ( power_well)++) | |||
| 1019 | intel_power_well_sync_hw(dev_priv, power_well); | |||
| 1020 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1021 | } | |||
| 1022 | ||||
| 1023 | static void gen9_dbuf_slice_set(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1024 | enum dbuf_slice slice, bool_Bool enable) | |||
| 1025 | { | |||
| 1026 | i915_reg_t reg = DBUF_CTL_S(slice)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45008, 0x44FE8 , 0x44300, 0x44304 })[slice])) }); | |||
| 1027 | bool_Bool state; | |||
| 1028 | ||||
| 1029 | intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST((u32)((1UL << (31)) + 0)), | |||
| 1030 | enable ? DBUF_POWER_REQUEST((u32)((1UL << (31)) + 0)) : 0); | |||
| 1031 | intel_de_posting_read(dev_priv, reg); | |||
| 1032 | udelay(10); | |||
| 1033 | ||||
| 1034 | state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE((u32)((1UL << (30)) + 0)); | |||
| 1035 | drm_WARN(&dev_priv->drm, enable != state,({ int __ret = !!(enable != state); if (__ret) printf("%s %s: " "DBuf slice %d power %s timeout!\n", dev_driver_string((& dev_priv->drm)->dev), "", slice, str_enable_disable(enable )); __builtin_expect(!!(__ret), 0); }) | |||
| 1036 | "DBuf slice %d power %s timeout!\n",({ int __ret = !!(enable != state); if (__ret) printf("%s %s: " "DBuf slice %d power %s timeout!\n", dev_driver_string((& dev_priv->drm)->dev), "", slice, str_enable_disable(enable )); __builtin_expect(!!(__ret), 0); }) | |||
| 1037 | slice, str_enable_disable(enable))({ int __ret = !!(enable != state); if (__ret) printf("%s %s: " "DBuf slice %d power %s timeout!\n", dev_driver_string((& dev_priv->drm)->dev), "", slice, str_enable_disable(enable )); __builtin_expect(!!(__ret), 0); }); | |||
| 1038 | } | |||
| 1039 | ||||
| 1040 | void gen9_dbuf_slices_update(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1041 | u8 req_slices) | |||
| 1042 | { | |||
| 1043 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1044 | u8 slice_mask = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->display.dbuf.slice_mask; | |||
| 1045 | enum dbuf_slice slice; | |||
| 1046 | ||||
| 1047 | drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,({ int __ret = !!(req_slices & ~slice_mask); if (__ret) printf ("%s %s: " "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n" , dev_driver_string((&dev_priv->drm)->dev), "", req_slices , slice_mask); __builtin_expect(!!(__ret), 0); }) | |||
| 1048 | "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",({ int __ret = !!(req_slices & ~slice_mask); if (__ret) printf ("%s %s: " "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n" , dev_driver_string((&dev_priv->drm)->dev), "", req_slices , slice_mask); __builtin_expect(!!(__ret), 0); }) | |||
| 1049 | req_slices, slice_mask)({ int __ret = !!(req_slices & ~slice_mask); if (__ret) printf ("%s %s: " "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n" , dev_driver_string((&dev_priv->drm)->dev), "", req_slices , slice_mask); __builtin_expect(!!(__ret), 0); }); | |||
| 1050 | ||||
| 1051 | drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Updating dbuf slices to 0x%x\n" , req_slices) | |||
| 1052 | req_slices)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Updating dbuf slices to 0x%x\n" , req_slices); | |||
| 1053 | ||||
| 1054 | /* | |||
| 1055 | * Might be running this in parallel to gen9_dc_off_power_well_enable | |||
| 1056 | * being called from intel_dp_detect for instance, | |||
| 1057 | * which causes assertion triggered by race condition, | |||
| 1058 | * as gen9_assert_dbuf_enabled might preempt this when registers | |||
| 1059 | * were already updated, while dev_priv was not. | |||
| 1060 | */ | |||
| 1061 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1062 | ||||
| 1063 | for_each_dbuf_slice(dev_priv, slice)for ((slice) = DBUF_S1; (slice) < I915_MAX_DBUF_SLICES; (slice )++) if (!((&(dev_priv)->__info)->display.dbuf.slice_mask & (1UL << (slice)))) {} else | |||
| 1064 | gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)(1UL << (slice))); | |||
| 1065 | ||||
| 1066 | dev_priv->display.dbuf.enabled_slices = req_slices; | |||
| 1067 | ||||
| 1068 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1069 | } | |||
| 1070 | ||||
| 1071 | static void gen9_dbuf_enable(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1072 | { | |||
| 1073 | dev_priv->display.dbuf.enabled_slices = | |||
| 1074 | intel_enabled_dbuf_slices_mask(dev_priv); | |||
| 1075 | ||||
| 1076 | /* | |||
| 1077 | * Just power up at least 1 slice, we will | |||
| 1078 | * figure out later which slices we have and what we need. | |||
| 1079 | */ | |||
| 1080 | gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1)(1UL << (DBUF_S1)) | | |||
| 1081 | dev_priv->display.dbuf.enabled_slices); | |||
| 1082 | } | |||
| 1083 | ||||
| 1084 | static void gen9_dbuf_disable(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1085 | { | |||
| 1086 | gen9_dbuf_slices_update(dev_priv, 0); | |||
| 1087 | } | |||
| 1088 | ||||
| 1089 | static void gen12_dbuf_slices_config(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1090 | { | |||
| 1091 | enum dbuf_slice slice; | |||
| 1092 | ||||
| 1093 | if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)) | |||
| 1094 | return; | |||
| 1095 | ||||
| 1096 | for_each_dbuf_slice(dev_priv, slice)for ((slice) = DBUF_S1; (slice) < I915_MAX_DBUF_SLICES; (slice )++) if (!((&(dev_priv)->__info)->display.dbuf.slice_mask & (1UL << (slice)))) {} else | |||
| 1097 | intel_de_rmw(dev_priv, DBUF_CTL_S(slice)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45008, 0x44FE8 , 0x44300, 0x44304 })[slice])) }), | |||
| 1098 | DBUF_TRACKER_STATE_SERVICE_MASK((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (19))) + 0)), | |||
| 1099 | DBUF_TRACKER_STATE_SERVICE(8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (19))) + 0))))(8) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (19))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (23 ) - 1)) & ((~0UL) << (19))) + 0)))) + 0 + 0 + 0 + 0 ))); | |||
| 1100 | } | |||
| 1101 | ||||
| 1102 | static void icl_mbus_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1103 | { | |||
| 1104 | unsigned long abox_regs = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->display.abox_mask; | |||
| 1105 | u32 mask, val, i; | |||
| 1106 | ||||
| 1107 | if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) || DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 14) | |||
| 1108 | return; | |||
| 1109 | ||||
| 1110 | mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK(0x1F << 0) | | |||
| 1111 | MBUS_ABOX_BT_CREDIT_POOL2_MASK(0x1F << 8) | | |||
| 1112 | MBUS_ABOX_B_CREDIT_MASK(0xF << 16) | | |||
| 1113 | MBUS_ABOX_BW_CREDIT_MASK(3 << 20); | |||
| 1114 | val = MBUS_ABOX_BT_CREDIT_POOL1(16)((16) << 0) | | |||
| 1115 | MBUS_ABOX_BT_CREDIT_POOL2(16)((16) << 8) | | |||
| 1116 | MBUS_ABOX_B_CREDIT(1)((1) << 16) | | |||
| 1117 | MBUS_ABOX_BW_CREDIT(1)((1) << 20); | |||
| 1118 | ||||
| 1119 | /* | |||
| 1120 | * gen12 platforms that use abox1 and abox2 for pixel data reads still | |||
| 1121 | * expect us to program the abox_ctl0 register as well, even though | |||
| 1122 | * we don't have to program other instance-0 registers like BW_BUDDY. | |||
| 1123 | */ | |||
| 1124 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 12) | |||
| 1125 | abox_regs |= BIT(0)(1UL << (0)); | |||
| 1126 | ||||
| 1127 | for_each_set_bit(i, &abox_regs, sizeof(abox_regs))for ((i) = find_first_bit((&abox_regs), (sizeof(abox_regs ))); (i) < (sizeof(abox_regs)); (i) = find_next_bit((& abox_regs), (sizeof(abox_regs)), (i) + 1)) | |||
| 1128 | intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45038, 0x45048 , 0x4504C })[i])) }), mask, val); | |||
| 1129 | } | |||
| 1130 | ||||
| 1131 | static void hsw_assert_cdclk(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1132 | { | |||
| 1133 | u32 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1134 | ||||
| 1135 | /* | |||
| 1136 | * The LCPLL register should be turned on by the BIOS. For now | |||
| 1137 | * let's just check its state and print errors in case | |||
| 1138 | * something is wrong. Don't even try to turn it on. | |||
| 1139 | */ | |||
| 1140 | ||||
| 1141 | if (val & LCPLL_CD_SOURCE_FCLK(1 << 21)) | |||
| 1142 | drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "CDCLK source is not LCPLL\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1143 | ||||
| 1144 | if (val & LCPLL_PLL_DISABLE(1 << 31)) | |||
| 1145 | drm_err(&dev_priv->drm, "LCPLL is disabled\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL is disabled\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1146 | ||||
| 1147 | if ((val & LCPLL_REF_MASK(3 << 28)) != LCPLL_REF_NON_SSC(0 << 28)) | |||
| 1148 | drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL not using non-SSC reference\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1149 | } | |||
| 1150 | ||||
| 1151 | static void assert_can_disable_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1152 | { | |||
| 1153 | struct drm_device *dev = &dev_priv->drm; | |||
| 1154 | struct intel_crtc *crtc; | |||
| 1155 | ||||
| 1156 | for_each_intel_crtc(dev, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(dev)->mode_config.crtc_list)-> next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(dev)->mode_config.crtc_list); crtc = ({ const __typeof ( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc-> base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );})) | |||
| 1157 | I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",({ int __ret_warn_on = !!(crtc->active); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CRTC for pipe %c enabled\n" , ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret), 0); } )) __drm_err("CRTC for pipe %c enabled\n", ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1158 | pipe_name(crtc->pipe))({ int __ret_warn_on = !!(crtc->active); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CRTC for pipe %c enabled\n" , ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret), 0); } )) __drm_err("CRTC for pipe %c enabled\n", ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1159 | ||||
| 1160 | I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x45404) }))); if (__builtin_expect(!!(__ret_warn_on ), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks ); if (__ret) printf("Display power well on\n"); __builtin_expect (!!(__ret), 0); })) __drm_err("Display power well on\n"); __builtin_expect (!!(__ret_warn_on), 0); }) | |||
| 1161 | "Display power well on\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x45404) }))); if (__builtin_expect(!!(__ret_warn_on ), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks ); if (__ret) printf("Display power well on\n"); __builtin_expect (!!(__ret), 0); })) __drm_err("Display power well on\n"); __builtin_expect (!!(__ret_warn_on), 0); }); | |||
| 1162 | I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x46020) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("SPLL enabled\n"); __builtin_expect (!!(__ret), 0); })) __drm_err("SPLL enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }) | |||
| 1163 | "SPLL enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x46020) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("SPLL enabled\n"); __builtin_expect (!!(__ret), 0); })) __drm_err("SPLL enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }); | |||
| 1164 | I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x46040) + (0) * ((0x46060) - (0x46040)))) })) & (1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0) ) if (!({ int __ret = !!(i915_modparams.verbose_state_checks) ; if (__ret) printf("WRPLL1 enabled\n"); __builtin_expect(!!( __ret), 0); })) __drm_err("WRPLL1 enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }) | |||
| 1165 | "WRPLL1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x46040) + (0) * ((0x46060) - (0x46040)))) })) & (1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0) ) if (!({ int __ret = !!(i915_modparams.verbose_state_checks) ; if (__ret) printf("WRPLL1 enabled\n"); __builtin_expect(!!( __ret), 0); })) __drm_err("WRPLL1 enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }); | |||
| 1166 | I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x46040) + (1) * ((0x46060) - (0x46040)))) })) & (1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0) ) if (!({ int __ret = !!(i915_modparams.verbose_state_checks) ; if (__ret) printf("WRPLL2 enabled\n"); __builtin_expect(!!( __ret), 0); })) __drm_err("WRPLL2 enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }) | |||
| 1167 | "WRPLL2 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((0x46040) + (1) * ((0x46060) - (0x46040)))) })) & (1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0) ) if (!({ int __ret = !!(i915_modparams.verbose_state_checks) ; if (__ret) printf("WRPLL2 enabled\n"); __builtin_expect(!!( __ret), 0); })) __drm_err("WRPLL2 enabled\n"); __builtin_expect (!!(__ret_warn_on), 0); }); | |||
| 1168 | I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (dev_priv->display.pps.mmio_base - 0x61200 + (0x61200 ) + (0) * 0x100) })) & ((u32)((1UL << (31)) + 0))); if (__builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks); if (__ret) printf ("Panel power on\n"); __builtin_expect(!!(__ret), 0); })) __drm_err ("Panel power on\n"); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1169 | "Panel power on\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (dev_priv->display.pps.mmio_base - 0x61200 + (0x61200 ) + (0) * 0x100) })) & ((u32)((1UL << (31)) + 0))); if (__builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks); if (__ret) printf ("Panel power on\n"); __builtin_expect(!!(__ret), 0); })) __drm_err ("Panel power on\n"); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1170 | I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48250) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CPU PWM1 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM1 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1171 | "CPU PWM1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48250) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CPU PWM1 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM1 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1172 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) | |||
| 1173 | I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48350) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CPU PWM2 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM2 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1174 | "CPU PWM2 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48350) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("CPU PWM2 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM2 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1175 | I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0xc8250) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("PCH PWM1 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH PWM1 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1176 | "PCH PWM1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0xc8250) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("PCH PWM1 enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH PWM1 enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1177 | I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48400) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("Utility pin enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("Utility pin enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1178 | "Utility pin enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0x48400) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("Utility pin enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("Utility pin enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1179 | I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0xe7000) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("PCH GTC enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH GTC enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }) | |||
| 1180 | "PCH GTC enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (0xe7000) })) & (1 << 31)); if (__builtin_expect (!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams .verbose_state_checks); if (__ret) printf("PCH GTC enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH GTC enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1181 | ||||
| 1182 | /* | |||
| 1183 | * In theory we can still leave IRQs enabled, as long as only the HPD | |||
| 1184 | * interrupts remain enabled. We used to check for that, but since it's | |||
| 1185 | * gen-specific and since we only disable LCPLL after we fully disable | |||
| 1186 | * the interrupts, the check below should be enough. | |||
| 1187 | */ | |||
| 1188 | I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n")({ int __ret_warn_on = !!(intel_irqs_enabled(dev_priv)); if ( __builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret = ! !(i915_modparams.verbose_state_checks); if (__ret) printf("IRQs enabled\n" ); __builtin_expect(!!(__ret), 0); })) __drm_err("IRQs enabled\n" ); __builtin_expect(!!(__ret_warn_on), 0); }); | |||
| 1189 | } | |||
| 1190 | ||||
| 1191 | static u32 hsw_read_dcomp(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1192 | { | |||
| 1193 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) | |||
| 1194 | return intel_de_read(dev_priv, D_COMP_HSW((const i915_reg_t){ .reg = (0x140000 + 0x5f0c) })); | |||
| 1195 | else | |||
| 1196 | return intel_de_read(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) })); | |||
| 1197 | } | |||
| 1198 | ||||
| 1199 | static void hsw_write_dcomp(struct drm_i915_privateinteldrm_softc *dev_priv, u32 val) | |||
| 1200 | { | |||
| 1201 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) { | |||
| 1202 | if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)snb_pcode_write_timeout(&dev_priv->uncore, 0x11, val, 500 , 0)) | |||
| 1203 | drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Failed to write to D_COMP\n" ) | |||
| 1204 | "Failed to write to D_COMP\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Failed to write to D_COMP\n" ); | |||
| 1205 | } else { | |||
| 1206 | intel_de_write(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) }), val); | |||
| 1207 | intel_de_posting_read(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) })); | |||
| 1208 | } | |||
| 1209 | } | |||
| 1210 | ||||
| 1211 | /* | |||
| 1212 | * This function implements pieces of two sequences from BSpec: | |||
| 1213 | * - Sequence for display software to disable LCPLL | |||
| 1214 | * - Sequence for display software to allow package C8+ | |||
| 1215 | * The steps implemented here are just the steps that actually touch the LCPLL | |||
| 1216 | * register. Callers should take care of disabling all the display engine | |||
| 1217 | * functions, doing the mode unset, fixing interrupts, etc. | |||
| 1218 | */ | |||
| 1219 | static void hsw_disable_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1220 | bool_Bool switch_to_fclk, bool_Bool allow_power_down) | |||
| 1221 | { | |||
| 1222 | u32 val; | |||
| 1223 | ||||
| 1224 | assert_can_disable_lcpll(dev_priv); | |||
| 1225 | ||||
| 1226 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1227 | ||||
| 1228 | if (switch_to_fclk) { | |||
| 1229 | val |= LCPLL_CD_SOURCE_FCLK(1 << 21); | |||
| 1230 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1231 | ||||
| 1232 | if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while (0); if (!(0)) { preempt_disable(); cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) preempt_enable(); __asm volatile("" : : : "memory"); if ((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19))) { ret = 0; break; } if (now - base >= timeout ) { ret = -60; break; } cpu_relax(); if (!(0)) { preempt_disable (); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; }) | |||
| 1233 | LCPLL_CD_SOURCE_FCLK_DONE, 1)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ( { int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while (0); if (!(0)) { preempt_disable(); cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) preempt_enable(); __asm volatile("" : : : "memory"); if ((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19))) { ret = 0; break; } if (now - base >= timeout ) { ret = -60; break; } cpu_relax(); if (!(0)) { preempt_disable (); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; })) | |||
| 1234 | drm_err(&dev_priv->drm, "Switching to FCLK failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching to FCLK failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1235 | ||||
| 1236 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1237 | } | |||
| 1238 | ||||
| 1239 | val |= LCPLL_PLL_DISABLE(1 << 31); | |||
| 1240 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1241 | intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1242 | ||||
| 1243 | if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), LCPLL_PLL_LOCK(1 << 30), 1)) | |||
| 1244 | drm_err(&dev_priv->drm, "LCPLL still locked\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL still locked\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1245 | ||||
| 1246 | val = hsw_read_dcomp(dev_priv); | |||
| 1247 | val |= D_COMP_COMP_DISABLE(1 << 0); | |||
| 1248 | hsw_write_dcomp(dev_priv, val); | |||
| 1249 | ndelay(100); | |||
| 1250 | ||||
| 1251 | if (wait_for((hsw_read_dcomp(dev_priv) &({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((hsw_read_dcomp (dev_priv) & (1 << 9)) == 0))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; }) | |||
| 1252 | D_COMP_RCOMP_IN_PROGRESS) == 0, 1)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((hsw_read_dcomp (dev_priv) & (1 << 9)) == 0))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; })) | |||
| 1253 | drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "D_COMP RCOMP still in progress\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1254 | ||||
| 1255 | if (allow_power_down) { | |||
| 1256 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1257 | val |= LCPLL_POWER_DOWN_ALLOW(1 << 22); | |||
| 1258 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1259 | intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1260 | } | |||
| 1261 | } | |||
| 1262 | ||||
| 1263 | /* | |||
| 1264 | * Fully restores LCPLL, disallowing power down and switching back to LCPLL | |||
| 1265 | * source. | |||
| 1266 | */ | |||
| 1267 | static void hsw_restore_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1268 | { | |||
| 1269 | u32 val; | |||
| 1270 | ||||
| 1271 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1272 | ||||
| 1273 | if ((val & (LCPLL_PLL_LOCK(1 << 30) | LCPLL_PLL_DISABLE(1 << 31) | LCPLL_CD_SOURCE_FCLK(1 << 21) | | |||
| 1274 | LCPLL_POWER_DOWN_ALLOW(1 << 22))) == LCPLL_PLL_LOCK(1 << 30)) | |||
| 1275 | return; | |||
| 1276 | ||||
| 1277 | /* | |||
| 1278 | * Make sure we're not on PC8 state before disabling PC8, otherwise | |||
| 1279 | * we'll hang the machine. To prevent PC8 state, just enable force_wake. | |||
| 1280 | */ | |||
| 1281 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); | |||
| 1282 | ||||
| 1283 | if (val & LCPLL_POWER_DOWN_ALLOW(1 << 22)) { | |||
| 1284 | val &= ~LCPLL_POWER_DOWN_ALLOW(1 << 22); | |||
| 1285 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1286 | intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1287 | } | |||
| 1288 | ||||
| 1289 | val = hsw_read_dcomp(dev_priv); | |||
| 1290 | val |= D_COMP_COMP_FORCE(1 << 8); | |||
| 1291 | val &= ~D_COMP_COMP_DISABLE(1 << 0); | |||
| 1292 | hsw_write_dcomp(dev_priv, val); | |||
| 1293 | ||||
| 1294 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1295 | val &= ~LCPLL_PLL_DISABLE(1 << 31); | |||
| 1296 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1297 | ||||
| 1298 | if (intel_de_wait_for_set(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), LCPLL_PLL_LOCK(1 << 30), 5)) | |||
| 1299 | drm_err(&dev_priv->drm, "LCPLL not locked yet\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL not locked yet\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1300 | ||||
| 1301 | if (val & LCPLL_CD_SOURCE_FCLK(1 << 21)) { | |||
| 1302 | val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) })); | |||
| 1303 | val &= ~LCPLL_CD_SOURCE_FCLK(1 << 21); | |||
| 1304 | intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val); | |||
| 1305 | ||||
| 1306 | if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)) == 0))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if ( wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while (0); if (!(0)) { preempt_disable(); cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) preempt_enable(); __asm volatile("" : : : "memory"); if (((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)) == 0)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { preempt_disable (); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; }) | |||
| 1307 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p( 1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10) ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw( ), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)) == 0))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if ( wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__ = ({ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while (0); if (!(0)) { preempt_disable(); cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64 now = local_clock(); if (!(0)) preempt_enable(); __asm volatile("" : : : "memory"); if (((intel_de_read (dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & ( 1 << 19)) == 0)) { ret = 0; break; } if (now - base >= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) { preempt_disable (); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (({struct cpu_info *__ci; asm volatile( "movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock (); } } } ret; }); ret__; })) | |||
| 1308 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching back to LCPLL failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 1309 | "Switching back to LCPLL failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching back to LCPLL failed\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 1310 | } | |||
| 1311 | ||||
| 1312 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); | |||
| 1313 | ||||
| 1314 | intel_update_cdclk(dev_priv); | |||
| 1315 | intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); | |||
| 1316 | } | |||
| 1317 | ||||
| 1318 | /* | |||
| 1319 | * Package states C8 and deeper are really deep PC states that can only be | |||
| 1320 | * reached when all the devices on the system allow it, so even if the graphics | |||
| 1321 | * device allows PC8+, it doesn't mean the system will actually get to these | |||
| 1322 | * states. Our driver only allows PC8+ when going into runtime PM. | |||
| 1323 | * | |||
| 1324 | * The requirements for PC8+ are that all the outputs are disabled, the power | |||
| 1325 | * well is disabled and most interrupts are disabled, and these are also | |||
| 1326 | * requirements for runtime PM. When these conditions are met, we manually do | |||
| 1327 | * the other conditions: disable the interrupts, clocks and switch LCPLL refclk | |||
| 1328 | * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard | |||
| 1329 | * hang the machine. | |||
| 1330 | * | |||
| 1331 | * When we really reach PC8 or deeper states (not just when we allow it) we lose | |||
| 1332 | * the state of some registers, so when we come back from PC8+ we need to | |||
| 1333 | * restore this state. We don't get into PC8+ if we're not in RC6, so we don't | |||
| 1334 | * need to take care of the registers kept by RC6. Notice that this happens even | |||
| 1335 | * if we don't put the device in PCI D3 state (which is what currently happens | |||
| 1336 | * because of the runtime PM support). | |||
| 1337 | * | |||
| 1338 | * For more, read "Display Sequences for Package C8" on the hardware | |||
| 1339 | * documentation. | |||
| 1340 | */ | |||
| 1341 | static void hsw_enable_pc8(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1342 | { | |||
| 1343 | u32 val; | |||
| 1344 | ||||
| 1345 | drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Enabling package C8+\n" ); | |||
| 1346 | ||||
| 1347 | if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id) == 0x9c80)) { | |||
| 1348 | val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) })); | |||
| 1349 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE(1 << 12); | |||
| 1350 | intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), val); | |||
| 1351 | } | |||
| 1352 | ||||
| 1353 | lpt_disable_clkout_dp(dev_priv); | |||
| 1354 | hsw_disable_lcpll(dev_priv, true1, true1); | |||
| 1355 | } | |||
| 1356 | ||||
| 1357 | static void hsw_disable_pc8(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1358 | { | |||
| 1359 | u32 val; | |||
| 1360 | ||||
| 1361 | drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Disabling package C8+\n" ); | |||
| 1362 | ||||
| 1363 | hsw_restore_lcpll(dev_priv); | |||
| 1364 | intel_init_pch_refclk(dev_priv); | |||
| 1365 | ||||
| 1366 | if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id) == 0x9c80)) { | |||
| 1367 | val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) })); | |||
| 1368 | val |= PCH_LP_PARTITION_LEVEL_DISABLE(1 << 12); | |||
| 1369 | intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), val); | |||
| 1370 | } | |||
| 1371 | } | |||
| 1372 | ||||
| 1373 | static void intel_pch_reset_handshake(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1374 | bool_Bool enable) | |||
| 1375 | { | |||
| 1376 | i915_reg_t reg; | |||
| 1377 | u32 reset_bits, val; | |||
| 1378 | ||||
| 1379 | if (IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)) { | |||
| 1380 | reg = GEN7_MSG_CTL((const i915_reg_t){ .reg = (0x45010) }); | |||
| 1381 | reset_bits = WAIT_FOR_PCH_FLR_ACK(1 << 0) | WAIT_FOR_PCH_RESET_ACK(1 << 1); | |||
| 1382 | } else { | |||
| 1383 | reg = HSW_NDE_RSTWRN_OPT((const i915_reg_t){ .reg = (0x46408) }); | |||
| 1384 | reset_bits = RESET_PCH_HANDSHAKE_ENABLE((u32)((1UL << (4)) + 0)); | |||
| 1385 | } | |||
| 1386 | ||||
| 1387 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 14) | |||
| 1388 | reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN((u32)((1UL << (6)) + 0)); | |||
| 1389 | ||||
| 1390 | val = intel_de_read(dev_priv, reg); | |||
| 1391 | ||||
| 1392 | if (enable) | |||
| 1393 | val |= reset_bits; | |||
| 1394 | else | |||
| 1395 | val &= ~reset_bits; | |||
| 1396 | ||||
| 1397 | intel_de_write(dev_priv, reg, val); | |||
| 1398 | } | |||
| 1399 | ||||
| 1400 | static void skl_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1401 | bool_Bool resume) | |||
| 1402 | { | |||
| 1403 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1404 | struct i915_power_well *well; | |||
| 1405 | ||||
| 1406 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0); | |||
| 1407 | ||||
| 1408 | /* enable PCH reset handshake */ | |||
| 1409 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP)); | |||
| 1410 | ||||
| 1411 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1412 | return; | |||
| 1413 | ||||
| 1414 | /* enable PG1 and Misc I/O */ | |||
| 1415 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1416 | ||||
| 1417 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1418 | intel_power_well_enable(dev_priv, well); | |||
| 1419 | ||||
| 1420 | well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); | |||
| 1421 | intel_power_well_enable(dev_priv, well); | |||
| 1422 | ||||
| 1423 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1424 | ||||
| 1425 | intel_cdclk_init_hw(dev_priv); | |||
| 1426 | ||||
| 1427 | gen9_dbuf_enable(dev_priv); | |||
| 1428 | ||||
| 1429 | if (resume) | |||
| 1430 | intel_dmc_load_program(dev_priv); | |||
| 1431 | } | |||
| 1432 | ||||
| 1433 | static void skl_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1434 | { | |||
| 1435 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1436 | struct i915_power_well *well; | |||
| 1437 | ||||
| 1438 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1439 | return; | |||
| 1440 | ||||
| 1441 | gen9_disable_dc_states(dev_priv); | |||
| 1442 | /* TODO: disable DMC program */ | |||
| 1443 | ||||
| 1444 | gen9_dbuf_disable(dev_priv); | |||
| 1445 | ||||
| 1446 | intel_cdclk_uninit_hw(dev_priv); | |||
| 1447 | ||||
| 1448 | /* The spec doesn't call for removing the reset handshake flag */ | |||
| 1449 | /* disable PG1 and Misc I/O */ | |||
| 1450 | ||||
| 1451 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1452 | ||||
| 1453 | /* | |||
| 1454 | * BSpec says to keep the MISC IO power well enabled here, only | |||
| 1455 | * remove our request for power well 1. | |||
| 1456 | * Note that even though the driver's request is removed power well 1 | |||
| 1457 | * may stay enabled after this due to DMC's own request on it. | |||
| 1458 | */ | |||
| 1459 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1460 | intel_power_well_disable(dev_priv, well); | |||
| 1461 | ||||
| 1462 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1463 | ||||
| 1464 | usleep_range(10, 30); /* 10 us delay per Bspec */ | |||
| 1465 | } | |||
| 1466 | ||||
| 1467 | static void bxt_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool resume) | |||
| 1468 | { | |||
| 1469 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1470 | struct i915_power_well *well; | |||
| 1471 | ||||
| 1472 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0); | |||
| 1473 | ||||
| 1474 | /* | |||
| 1475 | * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT | |||
| 1476 | * or else the reset will hang because there is no PCH to respond. | |||
| 1477 | * Move the handshake programming to initialization sequence. | |||
| 1478 | * Previously was left up to BIOS. | |||
| 1479 | */ | |||
| 1480 | intel_pch_reset_handshake(dev_priv, false0); | |||
| 1481 | ||||
| 1482 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1483 | return; | |||
| 1484 | ||||
| 1485 | /* Enable PG1 */ | |||
| 1486 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1487 | ||||
| 1488 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1489 | intel_power_well_enable(dev_priv, well); | |||
| 1490 | ||||
| 1491 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1492 | ||||
| 1493 | intel_cdclk_init_hw(dev_priv); | |||
| 1494 | ||||
| 1495 | gen9_dbuf_enable(dev_priv); | |||
| 1496 | ||||
| 1497 | if (resume) | |||
| 1498 | intel_dmc_load_program(dev_priv); | |||
| 1499 | } | |||
| 1500 | ||||
| 1501 | static void bxt_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1502 | { | |||
| 1503 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1504 | struct i915_power_well *well; | |||
| 1505 | ||||
| 1506 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1507 | return; | |||
| 1508 | ||||
| 1509 | gen9_disable_dc_states(dev_priv); | |||
| 1510 | /* TODO: disable DMC program */ | |||
| 1511 | ||||
| 1512 | gen9_dbuf_disable(dev_priv); | |||
| 1513 | ||||
| 1514 | intel_cdclk_uninit_hw(dev_priv); | |||
| 1515 | ||||
| 1516 | /* The spec doesn't call for removing the reset handshake flag */ | |||
| 1517 | ||||
| 1518 | /* | |||
| 1519 | * Disable PW1 (PG1). | |||
| 1520 | * Note that even though the driver's request is removed power well 1 | |||
| 1521 | * may stay enabled after this due to DMC's own request on it. | |||
| 1522 | */ | |||
| 1523 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1524 | ||||
| 1525 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1526 | intel_power_well_disable(dev_priv, well); | |||
| 1527 | ||||
| 1528 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1529 | ||||
| 1530 | usleep_range(10, 30); /* 10 us delay per Bspec */ | |||
| 1531 | } | |||
| 1532 | ||||
| 1533 | struct buddy_page_mask { | |||
| 1534 | u32 page_mask; | |||
| 1535 | u8 type; | |||
| 1536 | u8 num_channels; | |||
| 1537 | }; | |||
| 1538 | ||||
| 1539 | static const struct buddy_page_mask tgl_buddy_page_masks[] = { | |||
| 1540 | { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, | |||
| 1541 | { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, | |||
| 1542 | { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, | |||
| 1543 | { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, | |||
| 1544 | { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, | |||
| 1545 | { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, | |||
| 1546 | { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, | |||
| 1547 | { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, | |||
| 1548 | {} | |||
| 1549 | }; | |||
| 1550 | ||||
| 1551 | static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { | |||
| 1552 | { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, | |||
| 1553 | { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, | |||
| 1554 | { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, | |||
| 1555 | { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, | |||
| 1556 | { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, | |||
| 1557 | { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, | |||
| 1558 | { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, | |||
| 1559 | { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, | |||
| 1560 | {} | |||
| 1561 | }; | |||
| 1562 | ||||
| 1563 | static void tgl_bw_buddy_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1564 | { | |||
| 1565 | enum intel_dram_type type = dev_priv->dram_info.type; | |||
| 1566 | u8 num_channels = dev_priv->dram_info.num_channels; | |||
| 1567 | const struct buddy_page_mask *table; | |||
| 1568 | unsigned long abox_mask = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->display.abox_mask; | |||
| 1569 | int config, i; | |||
| 1570 | ||||
| 1571 | /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ | |||
| 1572 | if (IS_DGFX(dev_priv)((&(dev_priv)->__info)->is_dgfx) && !IS_DG1(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG1)) | |||
| 1573 | return; | |||
| 1574 | ||||
| 1575 | if (IS_ALDERLAKE_S(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) || | |||
| 1576 | IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_DG1) && (({ int __ret = ! !((((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)-> __runtime)->step.display_step) >= (STEP_A0) && ( (&(dev_priv)->__runtime)->step.display_step) < ( STEP_B0))) || | |||
| 1577 | IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) && (({ int __ret = !!((((&(dev_priv)->__runtime)->step.display_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)-> __runtime)->step.display_step) >= (STEP_A0) && ( (&(dev_priv)->__runtime)->step.display_step) < ( STEP_B0))) || | |||
| 1578 | IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret = !!((((&(dev_priv)->__runtime)->step.display_step ) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE" ")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)-> __runtime)->step.display_step) >= (STEP_A0) && ( (&(dev_priv)->__runtime)->step.display_step) < ( STEP_C0)))) | |||
| 1579 | /* Wa_1409767108:tgl,dg1,adl-s */ | |||
| 1580 | table = wa_1409767108_buddy_page_masks; | |||
| 1581 | else | |||
| 1582 | table = tgl_buddy_page_masks; | |||
| 1583 | ||||
| 1584 | for (config = 0; table[config].page_mask != 0; config++) | |||
| 1585 | if (table[config].num_channels == num_channels && | |||
| 1586 | table[config].type == type) | |||
| 1587 | break; | |||
| 1588 | ||||
| 1589 | if (table[config].page_mask == 0) { | |||
| 1590 | drm_dbg(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown memory configuration; disabling address buddy logic.\n" ) | |||
| 1591 | "Unknown memory configuration; disabling address buddy logic.\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Unknown memory configuration; disabling address buddy logic.\n" ); | |||
| 1592 | for_each_set_bit(i, &abox_mask, sizeof(abox_mask))for ((i) = find_first_bit((&abox_mask), (sizeof(abox_mask ))); (i) < (sizeof(abox_mask)); (i) = find_next_bit((& abox_mask), (sizeof(abox_mask)), (i) + 1)) | |||
| 1593 | intel_de_write(dev_priv, BW_BUDDY_CTL(i)((const i915_reg_t){ .reg = (((0x45130) + (i) * ((0x45140) - ( 0x45130)))) }), | |||
| 1594 | BW_BUDDY_DISABLE((u32)((1UL << (31)) + 0))); | |||
| 1595 | } else { | |||
| 1596 | for_each_set_bit(i, &abox_mask, sizeof(abox_mask))for ((i) = find_first_bit((&abox_mask), (sizeof(abox_mask ))); (i) < (sizeof(abox_mask)); (i) = find_next_bit((& abox_mask), (sizeof(abox_mask)), (i) + 1)) { | |||
| 1597 | intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i)((const i915_reg_t){ .reg = (((0x45134) + (i) * ((0x45144) - ( 0x45134)))) }), | |||
| 1598 | table[config].page_mask); | |||
| 1599 | ||||
| 1600 | /* Wa_22010178259:tgl,dg1,rkl,adl-s */ | |||
| 1601 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 12) | |||
| 1602 | intel_de_rmw(dev_priv, BW_BUDDY_CTL(i)((const i915_reg_t){ .reg = (((0x45130) + (i) * ((0x45140) - ( 0x45130)))) }), | |||
| 1603 | BW_BUDDY_TLB_REQ_TIMER_MASK((u32)((((~0UL) >> (64 - (21) - 1)) & ((~0UL) << (16))) + 0)), | |||
| 1604 | BW_BUDDY_TLB_REQ_TIMER(0x8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (21) - 1)) & ((~0UL) << (16))) + 0))))(0x8) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (21) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (21 ) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0 ))); | |||
| 1605 | } | |||
| 1606 | } | |||
| 1607 | } | |||
| 1608 | ||||
| 1609 | static void icl_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1610 | bool_Bool resume) | |||
| 1611 | { | |||
| 1612 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1613 | struct i915_power_well *well; | |||
| 1614 | u32 val; | |||
| 1615 | ||||
| 1616 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0); | |||
| 1617 | ||||
| 1618 | /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ | |||
| 1619 | if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_TGP && | |||
| 1620 | INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) < PCH_DG1) | |||
| 1621 | intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), 0, | |||
| 1622 | PCH_DPMGUNIT_CLOCK_GATE_DISABLE(1 << 15)); | |||
| 1623 | ||||
| 1624 | /* 1. Enable PCH reset handshake. */ | |||
| 1625 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP)); | |||
| 1626 | ||||
| 1627 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1628 | return; | |||
| 1629 | ||||
| 1630 | /* 2. Initialize all combo phys */ | |||
| 1631 | intel_combo_phy_init(dev_priv); | |||
| 1632 | ||||
| 1633 | /* | |||
| 1634 | * 3. Enable Power Well 1 (PG1). | |||
| 1635 | * The AUX IO power wells will be enabled on demand. | |||
| 1636 | */ | |||
| 1637 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1638 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1639 | intel_power_well_enable(dev_priv, well); | |||
| 1640 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1641 | ||||
| 1642 | /* 4. Enable CDCLK. */ | |||
| 1643 | intel_cdclk_init_hw(dev_priv); | |||
| 1644 | ||||
| 1645 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) | |||
| 1646 | gen12_dbuf_slices_config(dev_priv); | |||
| 1647 | ||||
| 1648 | /* 5. Enable DBUF. */ | |||
| 1649 | gen9_dbuf_enable(dev_priv); | |||
| 1650 | ||||
| 1651 | /* 6. Setup MBUS. */ | |||
| 1652 | icl_mbus_init(dev_priv); | |||
| 1653 | ||||
| 1654 | /* 7. Program arbiter BW_BUDDY registers */ | |||
| 1655 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) | |||
| 1656 | tgl_bw_buddy_init(dev_priv); | |||
| 1657 | ||||
| 1658 | /* 8. Ensure PHYs have completed calibration and adaptation */ | |||
| 1659 | if (IS_DG2(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG2)) | |||
| 1660 | intel_snps_phy_wait_for_calibration(dev_priv); | |||
| 1661 | ||||
| 1662 | if (resume) | |||
| 1663 | intel_dmc_load_program(dev_priv); | |||
| 1664 | ||||
| 1665 | /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ | |||
| 1666 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) { | |||
| 1667 | val = DCPR_CLEAR_MEMSTAT_DIS((u32)((1UL << (24)) + 0)) | DCPR_SEND_RESP_IMM((u32)((1UL << (25)) + 0)) | | |||
| 1668 | DCPR_MASK_LPMODE((u32)((1UL << (26)) + 0)) | DCPR_MASK_MAXLATENCY_MEMUP_CLR((u32)((1UL << (27)) + 0)); | |||
| 1669 | intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2((const i915_reg_t){ .reg = (0x46434) }), 0, val); | |||
| 1670 | } | |||
| 1671 | ||||
| 1672 | /* Wa_14011503030:xelpd */ | |||
| 1673 | if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 13) | |||
| 1674 | intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK((const i915_reg_t){ .reg = (0x4421c) }), ~0); | |||
| 1675 | } | |||
| 1676 | ||||
| 1677 | static void icl_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1678 | { | |||
| 1679 | struct i915_power_domains *power_domains = &dev_priv->display.power.domains; | |||
| 1680 | struct i915_power_well *well; | |||
| 1681 | ||||
| 1682 | if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) | |||
| 1683 | return; | |||
| 1684 | ||||
| 1685 | gen9_disable_dc_states(dev_priv); | |||
| 1686 | intel_dmc_disable_program(dev_priv); | |||
| 1687 | ||||
| 1688 | /* 1. Disable all display engine functions -> aready done */ | |||
| 1689 | ||||
| 1690 | /* 2. Disable DBUF */ | |||
| 1691 | gen9_dbuf_disable(dev_priv); | |||
| 1692 | ||||
| 1693 | /* 3. Disable CD clock */ | |||
| 1694 | intel_cdclk_uninit_hw(dev_priv); | |||
| 1695 | ||||
| 1696 | /* | |||
| 1697 | * 4. Disable Power Well 1 (PG1). | |||
| 1698 | * The AUX IO power wells are toggled on demand, so they are already | |||
| 1699 | * disabled at this point. | |||
| 1700 | */ | |||
| 1701 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1702 | well = lookup_power_well(dev_priv, SKL_DISP_PW_1); | |||
| 1703 | intel_power_well_disable(dev_priv, well); | |||
| 1704 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1705 | ||||
| 1706 | /* 5. */ | |||
| 1707 | intel_combo_phy_uninit(dev_priv); | |||
| 1708 | } | |||
| 1709 | ||||
| 1710 | static void chv_phy_control_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1711 | { | |||
| 1712 | struct i915_power_well *cmn_bc = | |||
| 1713 | lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); | |||
| 1714 | struct i915_power_well *cmn_d = | |||
| 1715 | lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); | |||
| 1716 | ||||
| 1717 | /* | |||
| 1718 | * DISPLAY_PHY_CONTROL can get corrupted if read. As a | |||
| 1719 | * workaround never ever read DISPLAY_PHY_CONTROL, and | |||
| 1720 | * instead maintain a shadow copy ourselves. Use the actual | |||
| 1721 | * power well state and lane status to reconstruct the | |||
| 1722 | * expected initial value. | |||
| 1723 | */ | |||
| 1724 | dev_priv->display.power.chv_phy_control = | |||
| 1725 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0)((0x2) << (2 * (DPIO_PHY0) + 23)) | | |||
| 1726 | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1)((0x2) << (2 * (DPIO_PHY1) + 23)) | | |||
| 1727 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0)((0x7) << (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + 2)) | | |||
| 1728 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1)((0x7) << (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + 2)) | | |||
| 1729 | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0)((0x7) << (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + 2)); | |||
| 1730 | ||||
| 1731 | /* | |||
| 1732 | * If all lanes are disabled we leave the override disabled | |||
| 1733 | * with all power down bits cleared to match the state we | |||
| 1734 | * would use after disabling the port. Otherwise enable the | |||
| 1735 | * override and set the lane powerdown bits accding to the | |||
| 1736 | * current lane status. | |||
| 1737 | */ | |||
| 1738 | if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { | |||
| 1739 | u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv )->__info)->display.mmio_offset) + 0x6014), (((&(dev_priv )->__info)->display.mmio_offset) + 0x6018), (((&(dev_priv )->__info)->display.mmio_offset) + 0x6030) })[(PIPE_A)] )) })); | |||
| 1740 | unsigned int mask; | |||
| 1741 | ||||
| 1742 | mask = status & DPLL_PORTB_READY_MASK(0xf); | |||
| 1743 | if (mask == 0xf) | |||
| 1744 | mask = 0x0; | |||
| 1745 | else | |||
| 1746 | dev_priv->display.power.chv_phy_control |= | |||
| 1747 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)(1 << (2 * (DPIO_PHY0) + (DPIO_CH0) + 27)); | |||
| 1748 | ||||
| 1749 | dev_priv->display.power.chv_phy_control |= | |||
| 1750 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0)((mask) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11)); | |||
| 1751 | ||||
| 1752 | mask = (status & DPLL_PORTC_READY_MASK(0xf << 4)) >> 4; | |||
| 1753 | if (mask == 0xf) | |||
| 1754 | mask = 0x0; | |||
| 1755 | else | |||
| 1756 | dev_priv->display.power.chv_phy_control |= | |||
| 1757 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)(1 << (2 * (DPIO_PHY0) + (DPIO_CH1) + 27)); | |||
| 1758 | ||||
| 1759 | dev_priv->display.power.chv_phy_control |= | |||
| 1760 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1)((mask) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11)); | |||
| 1761 | ||||
| 1762 | dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0)(1 << (DPIO_PHY0)); | |||
| 1763 | ||||
| 1764 | dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false0; | |||
| 1765 | } else { | |||
| 1766 | dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true1; | |||
| 1767 | } | |||
| 1768 | ||||
| 1769 | if (intel_power_well_is_enabled(dev_priv, cmn_d)) { | |||
| 1770 | u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS((const i915_reg_t){ .reg = (0x180000 + 0x6240) })); | |||
| 1771 | unsigned int mask; | |||
| 1772 | ||||
| 1773 | mask = status & DPLL_PORTD_READY_MASK(0xf); | |||
| 1774 | ||||
| 1775 | if (mask == 0xf) | |||
| 1776 | mask = 0x0; | |||
| 1777 | else | |||
| 1778 | dev_priv->display.power.chv_phy_control |= | |||
| 1779 | PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)(1 << (2 * (DPIO_PHY1) + (DPIO_CH0) + 27)); | |||
| 1780 | ||||
| 1781 | dev_priv->display.power.chv_phy_control |= | |||
| 1782 | PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0)((mask) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11)); | |||
| 1783 | ||||
| 1784 | dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1)(1 << (DPIO_PHY1)); | |||
| 1785 | ||||
| 1786 | dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false0; | |||
| 1787 | } else { | |||
| 1788 | dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true1; | |||
| 1789 | } | |||
| 1790 | ||||
| 1791 | drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Initial PHY_CONTROL=0x%08x\n" , dev_priv->display.power.chv_phy_control) | |||
| 1792 | dev_priv->display.power.chv_phy_control)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "Initial PHY_CONTROL=0x%08x\n" , dev_priv->display.power.chv_phy_control); | |||
| 1793 | ||||
| 1794 | /* Defer application of initial phy_control to enabling the powerwell */ | |||
| 1795 | } | |||
| 1796 | ||||
| 1797 | static void vlv_cmnlane_wa(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1798 | { | |||
| 1799 | struct i915_power_well *cmn = | |||
| 1800 | lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); | |||
| 1801 | struct i915_power_well *disp2d = | |||
| 1802 | lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); | |||
| 1803 | ||||
| 1804 | /* If the display might be already active skip this */ | |||
| 1805 | if (intel_power_well_is_enabled(dev_priv, cmn) && | |||
| 1806 | intel_power_well_is_enabled(dev_priv, disp2d) && | |||
| 1807 | intel_de_read(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) })) & DPIO_CMNRST(1 << 0)) | |||
| 1808 | return; | |||
| 1809 | ||||
| 1810 | drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv ->drm)->dev : ((void *)0), DRM_UT_KMS, "toggling display PHY side reset\n" ); | |||
| 1811 | ||||
| 1812 | /* cmnlane needs DPLL registers */ | |||
| 1813 | intel_power_well_enable(dev_priv, disp2d); | |||
| 1814 | ||||
| 1815 | /* | |||
| 1816 | * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: | |||
| 1817 | * Need to assert and de-assert PHY SB reset by gating the | |||
| 1818 | * common lane power, then un-gating it. | |||
| 1819 | * Simply ungating isn't enough to reset the PHY enough to get | |||
| 1820 | * ports and lanes running. | |||
| 1821 | */ | |||
| 1822 | intel_power_well_disable(dev_priv, cmn); | |||
| 1823 | } | |||
| 1824 | ||||
| 1825 | static bool_Bool vlv_punit_is_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv, u32 reg0) | |||
| 1826 | { | |||
| 1827 | bool_Bool ret; | |||
| 1828 | ||||
| 1829 | vlv_punit_get(dev_priv); | |||
| 1830 | ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK((0x3) << 0)) == SSPM0_SSC_PWR_GATE((0x3) << 0); | |||
| 1831 | vlv_punit_put(dev_priv); | |||
| 1832 | ||||
| 1833 | return ret; | |||
| 1834 | } | |||
| 1835 | ||||
| 1836 | static void assert_ved_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1837 | { | |||
| 1838 | drm_WARN(&dev_priv->drm,({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32)); if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string ((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret ), 0); }) | |||
| 1839 | !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32)); if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string ((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret ), 0); }) | |||
| 1840 | "VED not power gated\n")({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32)); if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string ((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret ), 0); }); | |||
| 1841 | } | |||
| 1842 | ||||
| 1843 | static void assert_isp_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1844 | { | |||
| 1845 | #ifdef notyet | |||
| 1846 | static const struct pci_device_id isp_ids[] = { | |||
| 1847 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38).vendor = (0x8086), .device = (0x0f38), .subvendor = (uint16_t ) (~0U), .subdevice = (uint16_t) (~0U)}, | |||
| 1848 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8).vendor = (0x8086), .device = (0x22b8), .subvendor = (uint16_t ) (~0U), .subdevice = (uint16_t) (~0U)}, | |||
| 1849 | {} | |||
| 1850 | }; | |||
| 1851 | ||||
| 1852 | drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated (dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n" , dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect (!!(__ret), 0); }) | |||
| 1853 | !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated (dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n" , dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect (!!(__ret), 0); }) | |||
| 1854 | "ISP not power gated\n")({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated (dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n" , dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect (!!(__ret), 0); }); | |||
| 1855 | #endif | |||
| 1856 | } | |||
| 1857 | ||||
| 1858 | static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *dev_priv); | |||
| 1859 | ||||
| 1860 | /** | |||
| 1861 | * intel_power_domains_init_hw - initialize hardware power domain state | |||
| 1862 | * @i915: i915 device instance | |||
| 1863 | * @resume: Called from resume code paths or not | |||
| 1864 | * | |||
| 1865 | * This function initializes the hardware power domain state and enables all | |||
| 1866 | * power wells belonging to the INIT power domain. Power wells in other | |||
| 1867 | * domains (and not in the INIT domain) are referenced or disabled by | |||
| 1868 | * intel_modeset_readout_hw_state(). After that the reference count of each | |||
| 1869 | * power well must match its HW enabled state, see | |||
| 1870 | * intel_power_domains_verify_state(). | |||
| 1871 | * | |||
| 1872 | * It will return with power domains disabled (to be enabled later by | |||
| 1873 | * intel_power_domains_enable()) and must be paired with | |||
| 1874 | * intel_power_domains_driver_remove(). | |||
| 1875 | */ | |||
| 1876 | void intel_power_domains_init_hw(struct drm_i915_privateinteldrm_softc *i915, bool_Bool resume) | |||
| 1877 | { | |||
| 1878 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 1879 | ||||
| 1880 | power_domains->initializing = true1; | |||
| 1881 | ||||
| 1882 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) { | |||
| 1883 | icl_display_core_init(i915, resume); | |||
| 1884 | } else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 1885 | bxt_display_core_init(i915, resume); | |||
| 1886 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9) { | |||
| 1887 | skl_display_core_init(i915, resume); | |||
| 1888 | } else if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) { | |||
| 1889 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1890 | chv_phy_control_init(i915); | |||
| 1891 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1892 | assert_isp_power_gated(i915); | |||
| 1893 | } else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) { | |||
| 1894 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1895 | vlv_cmnlane_wa(i915); | |||
| 1896 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1897 | assert_ved_power_gated(i915); | |||
| 1898 | assert_isp_power_gated(i915); | |||
| 1899 | } else if (IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL) || IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) { | |||
| 1900 | hsw_assert_cdclk(i915); | |||
| 1901 | intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)(((i915)->pch_type) == PCH_NOP)); | |||
| 1902 | } else if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) { | |||
| 1903 | intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)(((i915)->pch_type) == PCH_NOP)); | |||
| 1904 | } | |||
| 1905 | ||||
| 1906 | /* | |||
| 1907 | * Keep all power wells enabled for any dependent HW access during | |||
| 1908 | * initialization and to make sure we keep BIOS enabled display HW | |||
| 1909 | * resources powered until display HW readout is complete. We drop | |||
| 1910 | * this reference in intel_power_domains_enable(). | |||
| 1911 | */ | |||
| 1912 | drm_WARN_ON(&i915->drm, power_domains->init_wakeref)({ int __ret = !!((power_domains->init_wakeref)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&i915->drm ))->dev), "", "drm_WARN_ON(" "power_domains->init_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1913 | power_domains->init_wakeref = | |||
| 1914 | intel_display_power_get(i915, POWER_DOMAIN_INIT); | |||
| 1915 | ||||
| 1916 | /* Disable power support if the user asked so. */ | |||
| 1917 | if (!i915->params.disable_power_well) { | |||
| 1918 | drm_WARN_ON(&i915->drm, power_domains->disable_wakeref)({ int __ret = !!((power_domains->disable_wakeref)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&i915->drm ))->dev), "", "drm_WARN_ON(" "power_domains->disable_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1919 | i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, | |||
| 1920 | POWER_DOMAIN_INIT); | |||
| 1921 | } | |||
| 1922 | intel_power_domains_sync_hw(i915); | |||
| 1923 | ||||
| 1924 | power_domains->initializing = false0; | |||
| 1925 | } | |||
| 1926 | ||||
| 1927 | /** | |||
| 1928 | * intel_power_domains_driver_remove - deinitialize hw power domain state | |||
| 1929 | * @i915: i915 device instance | |||
| 1930 | * | |||
| 1931 | * De-initializes the display power domain HW state. It also ensures that the | |||
| 1932 | * device stays powered up so that the driver can be reloaded. | |||
| 1933 | * | |||
| 1934 | * It must be called with power domains already disabled (after a call to | |||
| 1935 | * intel_power_domains_disable()) and must be paired with | |||
| 1936 | * intel_power_domains_init_hw(). | |||
| 1937 | */ | |||
| 1938 | void intel_power_domains_driver_remove(struct drm_i915_privateinteldrm_softc *i915) | |||
| 1939 | { | |||
| 1940 | intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) = | |||
| 1941 | fetch_and_zero(&i915->display.power.domains.init_wakeref)({ typeof(*&i915->display.power.domains.init_wakeref) __T = *(&i915->display.power.domains.init_wakeref); *(& i915->display.power.domains.init_wakeref) = (typeof(*& i915->display.power.domains.init_wakeref))0; __T; }); | |||
| 1942 | ||||
| 1943 | /* Remove the refcount we took to keep power well support disabled. */ | |||
| 1944 | if (!i915->params.disable_power_well) | |||
| 1945 | intel_display_power_put(i915, POWER_DOMAIN_INIT, | |||
| 1946 | fetch_and_zero(&i915->display.power.domains.disable_wakeref)({ typeof(*&i915->display.power.domains.disable_wakeref ) __T = *(&i915->display.power.domains.disable_wakeref ); *(&i915->display.power.domains.disable_wakeref) = ( typeof(*&i915->display.power.domains.disable_wakeref)) 0; __T; })); | |||
| 1947 | ||||
| 1948 | intel_display_power_flush_work_sync(i915); | |||
| 1949 | ||||
| 1950 | intel_power_domains_verify_state(i915); | |||
| 1951 | ||||
| 1952 | /* Keep the power well enabled, but cancel its rpm wakeref. */ | |||
| 1953 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | |||
| 1954 | } | |||
| 1955 | ||||
| 1956 | /** | |||
| 1957 | * intel_power_domains_sanitize_state - sanitize power domains state | |||
| 1958 | * @i915: i915 device instance | |||
| 1959 | * | |||
| 1960 | * Sanitize the power domains state during driver loading and system resume. | |||
| 1961 | * The function will disable all display power wells that BIOS has enabled | |||
| 1962 | * without a user for it (any user for a power well has taken a reference | |||
| 1963 | * on it by the time this function is called, after the state of all the | |||
| 1964 | * pipe, encoder, etc. HW resources have been sanitized). | |||
| 1965 | */ | |||
| 1966 | void intel_power_domains_sanitize_state(struct drm_i915_privateinteldrm_softc *i915) | |||
| 1967 | { | |||
| 1968 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 1969 | struct i915_power_well *power_well; | |||
| 1970 | ||||
| 1971 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 1972 | ||||
| 1973 | for_each_power_well_reverse(i915, power_well)for ((power_well) = (i915)->display.power.domains.power_wells + (i915)->display.power.domains.power_well_count - 1; (power_well ) - (i915)->display.power.domains.power_wells >= 0; (power_well )--) { | |||
| 1974 | if (power_well->desc->always_on || power_well->count || | |||
| 1975 | !intel_power_well_is_enabled(i915, power_well)) | |||
| 1976 | continue; | |||
| 1977 | ||||
| 1978 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "BIOS left unused %s power well enabled, disabling it\n" , intel_power_well_name(power_well)) | |||
| 1979 | "BIOS left unused %s power well enabled, disabling it\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "BIOS left unused %s power well enabled, disabling it\n" , intel_power_well_name(power_well)) | |||
| 1980 | intel_power_well_name(power_well))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "BIOS left unused %s power well enabled, disabling it\n" , intel_power_well_name(power_well)); | |||
| 1981 | intel_power_well_disable(i915, power_well); | |||
| 1982 | } | |||
| 1983 | ||||
| 1984 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 1985 | } | |||
| 1986 | ||||
| 1987 | /** | |||
| 1988 | * intel_power_domains_enable - enable toggling of display power wells | |||
| 1989 | * @i915: i915 device instance | |||
| 1990 | * | |||
| 1991 | * Enable the ondemand enabling/disabling of the display power wells. Note that | |||
| 1992 | * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled | |||
| 1993 | * only at specific points of the display modeset sequence, thus they are not | |||
| 1994 | * affected by the intel_power_domains_enable()/disable() calls. The purpose | |||
| 1995 | * of these function is to keep the rest of power wells enabled until the end | |||
| 1996 | * of display HW readout (which will acquire the power references reflecting | |||
| 1997 | * the current HW state). | |||
| 1998 | */ | |||
| 1999 | void intel_power_domains_enable(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2000 | { | |||
| 2001 | intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) = | |||
| 2002 | fetch_and_zero(&i915->display.power.domains.init_wakeref)({ typeof(*&i915->display.power.domains.init_wakeref) __T = *(&i915->display.power.domains.init_wakeref); *(& i915->display.power.domains.init_wakeref) = (typeof(*& i915->display.power.domains.init_wakeref))0; __T; }); | |||
| 2003 | ||||
| 2004 | intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); | |||
| 2005 | intel_power_domains_verify_state(i915); | |||
| 2006 | } | |||
| 2007 | ||||
| 2008 | /** | |||
| 2009 | * intel_power_domains_disable - disable toggling of display power wells | |||
| 2010 | * @i915: i915 device instance | |||
| 2011 | * | |||
| 2012 | * Disable the ondemand enabling/disabling of the display power wells. See | |||
| 2013 | * intel_power_domains_enable() for which power wells this call controls. | |||
| 2014 | */ | |||
| 2015 | void intel_power_domains_disable(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2016 | { | |||
| 2017 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2018 | ||||
| 2019 | drm_WARN_ON(&i915->drm, power_domains->init_wakeref)({ int __ret = !!((power_domains->init_wakeref)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&i915->drm ))->dev), "", "drm_WARN_ON(" "power_domains->init_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 2020 | power_domains->init_wakeref = | |||
| 2021 | intel_display_power_get(i915, POWER_DOMAIN_INIT); | |||
| 2022 | ||||
| 2023 | intel_power_domains_verify_state(i915); | |||
| 2024 | } | |||
| 2025 | ||||
| 2026 | /** | |||
| 2027 | * intel_power_domains_suspend - suspend power domain state | |||
| 2028 | * @i915: i915 device instance | |||
| 2029 | * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) | |||
| 2030 | * | |||
| 2031 | * This function prepares the hardware power domain state before entering | |||
| 2032 | * system suspend. | |||
| 2033 | * | |||
| 2034 | * It must be called with power domains already disabled (after a call to | |||
| 2035 | * intel_power_domains_disable()) and paired with intel_power_domains_resume(). | |||
| 2036 | */ | |||
| 2037 | void intel_power_domains_suspend(struct drm_i915_privateinteldrm_softc *i915, | |||
| 2038 | enum i915_drm_suspend_mode suspend_mode) | |||
| 2039 | { | |||
| 2040 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2041 | intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) = | |||
| 2042 | fetch_and_zero(&power_domains->init_wakeref)({ typeof(*&power_domains->init_wakeref) __T = *(& power_domains->init_wakeref); *(&power_domains->init_wakeref ) = (typeof(*&power_domains->init_wakeref))0; __T; }); | |||
| 2043 | ||||
| 2044 | intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); | |||
| 2045 | ||||
| 2046 | /* | |||
| 2047 | * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 | |||
| 2048 | * support don't manually deinit the power domains. This also means the | |||
| 2049 | * DMC firmware will stay active, it will power down any HW | |||
| 2050 | * resources as required and also enable deeper system power states | |||
| 2051 | * that would be blocked if the firmware was inactive. | |||
| 2052 | */ | |||
| 2053 | if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9(1 << 3)) && | |||
| 2054 | suspend_mode == I915_DRM_SUSPEND_IDLE && | |||
| 2055 | intel_dmc_has_payload(i915)) { | |||
| 2056 | intel_display_power_flush_work(i915); | |||
| 2057 | intel_power_domains_verify_state(i915); | |||
| 2058 | return; | |||
| 2059 | } | |||
| 2060 | ||||
| 2061 | /* | |||
| 2062 | * Even if power well support was disabled we still want to disable | |||
| 2063 | * power wells if power domains must be deinitialized for suspend. | |||
| 2064 | */ | |||
| 2065 | if (!i915->params.disable_power_well) | |||
| 2066 | intel_display_power_put(i915, POWER_DOMAIN_INIT, | |||
| 2067 | fetch_and_zero(&i915->display.power.domains.disable_wakeref)({ typeof(*&i915->display.power.domains.disable_wakeref ) __T = *(&i915->display.power.domains.disable_wakeref ); *(&i915->display.power.domains.disable_wakeref) = ( typeof(*&i915->display.power.domains.disable_wakeref)) 0; __T; })); | |||
| 2068 | ||||
| 2069 | intel_display_power_flush_work(i915); | |||
| 2070 | intel_power_domains_verify_state(i915); | |||
| 2071 | ||||
| 2072 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) | |||
| 2073 | icl_display_core_uninit(i915); | |||
| 2074 | else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) | |||
| 2075 | bxt_display_core_uninit(i915); | |||
| 2076 | else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9) | |||
| 2077 | skl_display_core_uninit(i915); | |||
| 2078 | ||||
| 2079 | power_domains->display_core_suspended = true1; | |||
| 2080 | } | |||
| 2081 | ||||
| 2082 | /** | |||
| 2083 | * intel_power_domains_resume - resume power domain state | |||
| 2084 | * @i915: i915 device instance | |||
| 2085 | * | |||
| 2086 | * This function resume the hardware power domain state during system resume. | |||
| 2087 | * | |||
| 2088 | * It will return with power domain support disabled (to be enabled later by | |||
| 2089 | * intel_power_domains_enable()) and must be paired with | |||
| 2090 | * intel_power_domains_suspend(). | |||
| 2091 | */ | |||
| 2092 | void intel_power_domains_resume(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2093 | { | |||
| 2094 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2095 | ||||
| 2096 | if (power_domains->display_core_suspended) { | |||
| 2097 | intel_power_domains_init_hw(i915, true1); | |||
| 2098 | power_domains->display_core_suspended = false0; | |||
| 2099 | } else { | |||
| 2100 | drm_WARN_ON(&i915->drm, power_domains->init_wakeref)({ int __ret = !!((power_domains->init_wakeref)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&i915->drm ))->dev), "", "drm_WARN_ON(" "power_domains->init_wakeref" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 2101 | power_domains->init_wakeref = | |||
| 2102 | intel_display_power_get(i915, POWER_DOMAIN_INIT); | |||
| 2103 | } | |||
| 2104 | ||||
| 2105 | intel_power_domains_verify_state(i915); | |||
| 2106 | } | |||
| 2107 | ||||
| 2108 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 | |||
| 2109 | ||||
| 2110 | static void intel_power_domains_dump_info(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2111 | { | |||
| 2112 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2113 | struct i915_power_well *power_well; | |||
| 2114 | ||||
| 2115 | for_each_power_well(i915, power_well)for ((power_well) = (i915)->display.power.domains.power_wells ; (power_well) - (i915)->display.power.domains.power_wells < (i915)->display.power.domains.power_well_count; (power_well )++) { | |||
| 2116 | enum intel_display_power_domain domain; | |||
| 2117 | ||||
| 2118 | drm_dbg(&i915->drm, "%-25s %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%-25s %d\n", intel_power_well_name (power_well), intel_power_well_refcount(power_well)) | |||
| 2119 | intel_power_well_name(power_well), intel_power_well_refcount(power_well))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "%-25s %d\n", intel_power_well_name (power_well), intel_power_well_refcount(power_well)); | |||
| 2120 | ||||
| 2121 | for_each_power_domain(domain, intel_power_well_domains(power_well))for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (intel_power_well_domains(power_well ))->bits))) {} else | |||
| 2122 | drm_dbg(&i915->drm, " %-23s %d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, " %-23s %d\n", intel_display_power_domain_str (domain), power_domains->domain_use_count[domain]) | |||
| 2123 | intel_display_power_domain_str(domain),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, " %-23s %d\n", intel_display_power_domain_str (domain), power_domains->domain_use_count[domain]) | |||
| 2124 | power_domains->domain_use_count[domain])__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, " %-23s %d\n", intel_display_power_domain_str (domain), power_domains->domain_use_count[domain]); | |||
| 2125 | } | |||
| 2126 | } | |||
| 2127 | ||||
| 2128 | /** | |||
| 2129 | * intel_power_domains_verify_state - verify the HW/SW state for all power wells | |||
| 2130 | * @i915: i915 device instance | |||
| 2131 | * | |||
| 2132 | * Verify if the reference count of each power well matches its HW enabled | |||
| 2133 | * state and the total refcount of the domains it belongs to. This must be | |||
| 2134 | * called after modeset HW state sanitization, which is responsible for | |||
| 2135 | * acquiring reference counts for any power wells in use and disabling the | |||
| 2136 | * ones left on by BIOS but not required by any active output. | |||
| 2137 | */ | |||
| 2138 | static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2139 | { | |||
| 2140 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2141 | struct i915_power_well *power_well; | |||
| 2142 | bool_Bool dump_domain_info; | |||
| 2143 | ||||
| 2144 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 2145 | ||||
| 2146 | verify_async_put_domains_state(power_domains); | |||
| 2147 | ||||
| 2148 | dump_domain_info = false0; | |||
| 2149 | for_each_power_well(i915, power_well)for ((power_well) = (i915)->display.power.domains.power_wells ; (power_well) - (i915)->display.power.domains.power_wells < (i915)->display.power.domains.power_well_count; (power_well )++) { | |||
| 2150 | enum intel_display_power_domain domain; | |||
| 2151 | int domains_count; | |||
| 2152 | bool_Bool enabled; | |||
| 2153 | ||||
| 2154 | enabled = intel_power_well_is_enabled(i915, power_well); | |||
| 2155 | if ((intel_power_well_refcount(power_well) || | |||
| 2156 | intel_power_well_is_always_on(power_well)) != | |||
| 2157 | enabled) | |||
| 2158 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_power_well_name (power_well), intel_power_well_refcount(power_well), enabled) | |||
| 2159 | "power well %s state mismatch (refcount %d/enabled %d)",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_power_well_name (power_well), intel_power_well_refcount(power_well), enabled) | |||
| 2160 | intel_power_well_name(power_well),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_power_well_name (power_well), intel_power_well_refcount(power_well), enabled) | |||
| 2161 | intel_power_well_refcount(power_well), enabled)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_power_well_name (power_well), intel_power_well_refcount(power_well), enabled); | |||
| 2162 | ||||
| 2163 | domains_count = 0; | |||
| 2164 | for_each_power_domain(domain, intel_power_well_domains(power_well))for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++ ) if (!(test_bit((domain), (intel_power_well_domains(power_well ))->bits))) {} else | |||
| 2165 | domains_count += power_domains->domain_use_count[domain]; | |||
| 2166 | ||||
| 2167 | if (intel_power_well_refcount(power_well) != domains_count) { | |||
| 2168 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count) | |||
| 2169 | "power well %s refcount/domain refcount mismatch "printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count) | |||
| 2170 | "(refcount %d/domains refcount %d)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count) | |||
| 2171 | intel_power_well_name(power_well),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count) | |||
| 2172 | intel_power_well_refcount(power_well),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count) | |||
| 2173 | domains_count)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , intel_power_well_name(power_well), intel_power_well_refcount (power_well), domains_count); | |||
| 2174 | dump_domain_info = true1; | |||
| 2175 | } | |||
| 2176 | } | |||
| 2177 | ||||
| 2178 | if (dump_domain_info) { | |||
| 2179 | static bool_Bool dumped; | |||
| 2180 | ||||
| 2181 | if (!dumped) { | |||
| 2182 | intel_power_domains_dump_info(i915); | |||
| 2183 | dumped = true1; | |||
| 2184 | } | |||
| 2185 | } | |||
| 2186 | ||||
| 2187 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 2188 | } | |||
| 2189 | ||||
| 2190 | #else | |||
| 2191 | ||||
| 2192 | static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2193 | { | |||
| 2194 | } | |||
| 2195 | ||||
| 2196 | #endif | |||
| 2197 | ||||
| 2198 | void intel_display_power_suspend_late(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2199 | { | |||
| 2200 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11 || IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || | |||
| 2201 | IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 2202 | bxt_enable_dc9(i915); | |||
| 2203 | } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) { | |||
| 2204 | hsw_enable_pc8(i915); | |||
| 2205 | } | |||
| 2206 | ||||
| 2207 | /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ | |||
| 2208 | if (INTEL_PCH_TYPE(i915)((i915)->pch_type) >= PCH_CNP && INTEL_PCH_TYPE(i915)((i915)->pch_type) < PCH_DG1) | |||
| 2209 | intel_de_rmw(i915, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }), SBCLK_RUN_REFCLK_DIS(1 << 7), SBCLK_RUN_REFCLK_DIS(1 << 7)); | |||
| 2210 | } | |||
| 2211 | ||||
| 2212 | void intel_display_power_resume_early(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2213 | { | |||
| 2214 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11 || IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || | |||
| 2215 | IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 2216 | gen9_sanitize_dc_state(i915); | |||
| 2217 | bxt_disable_dc9(i915); | |||
| 2218 | } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) { | |||
| 2219 | hsw_disable_pc8(i915); | |||
| 2220 | } | |||
| 2221 | ||||
| 2222 | /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ | |||
| 2223 | if (INTEL_PCH_TYPE(i915)((i915)->pch_type) >= PCH_CNP && INTEL_PCH_TYPE(i915)((i915)->pch_type) < PCH_DG1) | |||
| 2224 | intel_de_rmw(i915, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }), SBCLK_RUN_REFCLK_DIS(1 << 7), 0); | |||
| 2225 | } | |||
| 2226 | ||||
| 2227 | void intel_display_power_suspend(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2228 | { | |||
| 2229 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) { | |||
| 2230 | icl_display_core_uninit(i915); | |||
| 2231 | bxt_enable_dc9(i915); | |||
| 2232 | } else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 2233 | bxt_display_core_uninit(i915); | |||
| 2234 | bxt_enable_dc9(i915); | |||
| 2235 | } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) { | |||
| 2236 | hsw_enable_pc8(i915); | |||
| 2237 | } | |||
| 2238 | } | |||
| 2239 | ||||
| 2240 | void intel_display_power_resume(struct drm_i915_privateinteldrm_softc *i915) | |||
| 2241 | { | |||
| 2242 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) { | |||
| ||||
| 2243 | bxt_disable_dc9(i915); | |||
| 2244 | icl_display_core_init(i915, true1); | |||
| 2245 | if (intel_dmc_has_payload(i915)) { | |||
| 2246 | if (i915->display.dmc.allowed_dc_mask & | |||
| 2247 | DC_STATE_EN_UPTO_DC6(2 << 0)) | |||
| 2248 | skl_enable_dc6(i915); | |||
| 2249 | else if (i915->display.dmc.allowed_dc_mask & | |||
| 2250 | DC_STATE_EN_UPTO_DC5(1 << 0)) | |||
| 2251 | gen9_enable_dc5(i915); | |||
| 2252 | } | |||
| 2253 | } else if (IS_GEMINILAKE(i915)IS_PLATFORM(i915, INTEL_GEMINILAKE) || IS_BROXTON(i915)IS_PLATFORM(i915, INTEL_BROXTON)) { | |||
| 2254 | bxt_disable_dc9(i915); | |||
| 2255 | bxt_display_core_init(i915, true1); | |||
| 2256 | if (intel_dmc_has_payload(i915) && | |||
| 2257 | (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5(1 << 0))) | |||
| 2258 | gen9_enable_dc5(i915); | |||
| 2259 | } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) { | |||
| 2260 | hsw_disable_pc8(i915); | |||
| 2261 | } | |||
| 2262 | } | |||
| 2263 | ||||
| 2264 | void intel_display_power_debug(struct drm_i915_privateinteldrm_softc *i915, struct seq_file *m) | |||
| 2265 | { | |||
| 2266 | struct i915_power_domains *power_domains = &i915->display.power.domains; | |||
| 2267 | int i; | |||
| 2268 | ||||
| 2269 | mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock); | |||
| 2270 | ||||
| 2271 | seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); | |||
| 2272 | for (i = 0; i < power_domains->power_well_count; i++) { | |||
| 2273 | struct i915_power_well *power_well; | |||
| 2274 | enum intel_display_power_domain power_domain; | |||
| 2275 | ||||
| 2276 | power_well = &power_domains->power_wells[i]; | |||
| 2277 | seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), | |||
| 2278 | intel_power_well_refcount(power_well)); | |||
| 2279 | ||||
| 2280 | for_each_power_domain(power_domain, intel_power_well_domains(power_well))for ((power_domain) = 0; (power_domain) < POWER_DOMAIN_NUM ; (power_domain)++) if (!(test_bit((power_domain), (intel_power_well_domains (power_well))->bits))) {} else | |||
| 2281 | seq_printf(m, " %-23s %d\n", | |||
| 2282 | intel_display_power_domain_str(power_domain), | |||
| 2283 | power_domains->domain_use_count[power_domain]); | |||
| 2284 | } | |||
| 2285 | ||||
| 2286 | mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock); | |||
| 2287 | } | |||
| 2288 | ||||
| 2289 | struct intel_ddi_port_domains { | |||
| 2290 | enum port port_start; | |||
| 2291 | enum port port_end; | |||
| 2292 | enum aux_ch aux_ch_start; | |||
| 2293 | enum aux_ch aux_ch_end; | |||
| 2294 | ||||
| 2295 | enum intel_display_power_domain ddi_lanes; | |||
| 2296 | enum intel_display_power_domain ddi_io; | |||
| 2297 | enum intel_display_power_domain aux_legacy_usbc; | |||
| 2298 | enum intel_display_power_domain aux_tbt; | |||
| 2299 | }; | |||
| 2300 | ||||
| 2301 | static const struct intel_ddi_port_domains | |||
| 2302 | i9xx_port_domains[] = { | |||
| 2303 | { | |||
| 2304 | .port_start = PORT_A, | |||
| 2305 | .port_end = PORT_F, | |||
| 2306 | .aux_ch_start = AUX_CH_A, | |||
| 2307 | .aux_ch_end = AUX_CH_F, | |||
| 2308 | ||||
| 2309 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, | |||
| 2310 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, | |||
| 2311 | .aux_legacy_usbc = POWER_DOMAIN_AUX_A, | |||
| 2312 | .aux_tbt = POWER_DOMAIN_INVALID, | |||
| 2313 | }, | |||
| 2314 | }; | |||
| 2315 | ||||
| 2316 | static const struct intel_ddi_port_domains | |||
| 2317 | d11_port_domains[] = { | |||
| 2318 | { | |||
| 2319 | .port_start = PORT_A, | |||
| 2320 | .port_end = PORT_B, | |||
| 2321 | .aux_ch_start = AUX_CH_A, | |||
| 2322 | .aux_ch_end = AUX_CH_B, | |||
| 2323 | ||||
| 2324 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, | |||
| 2325 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, | |||
| 2326 | .aux_legacy_usbc = POWER_DOMAIN_AUX_A, | |||
| 2327 | .aux_tbt = POWER_DOMAIN_INVALID, | |||
| 2328 | }, { | |||
| 2329 | .port_start = PORT_C, | |||
| 2330 | .port_end = PORT_F, | |||
| 2331 | .aux_ch_start = AUX_CH_C, | |||
| 2332 | .aux_ch_end = AUX_CH_F, | |||
| 2333 | ||||
| 2334 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, | |||
| 2335 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, | |||
| 2336 | .aux_legacy_usbc = POWER_DOMAIN_AUX_C, | |||
| 2337 | .aux_tbt = POWER_DOMAIN_AUX_TBT1, | |||
| 2338 | }, | |||
| 2339 | }; | |||
| 2340 | ||||
| 2341 | static const struct intel_ddi_port_domains | |||
| 2342 | d12_port_domains[] = { | |||
| 2343 | { | |||
| 2344 | .port_start = PORT_A, | |||
| 2345 | .port_end = PORT_C, | |||
| 2346 | .aux_ch_start = AUX_CH_A, | |||
| 2347 | .aux_ch_end = AUX_CH_C, | |||
| 2348 | ||||
| 2349 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, | |||
| 2350 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, | |||
| 2351 | .aux_legacy_usbc = POWER_DOMAIN_AUX_A, | |||
| 2352 | .aux_tbt = POWER_DOMAIN_INVALID, | |||
| 2353 | }, { | |||
| 2354 | .port_start = PORT_TC1, | |||
| 2355 | .port_end = PORT_TC6, | |||
| 2356 | .aux_ch_start = AUX_CH_USBC1, | |||
| 2357 | .aux_ch_end = AUX_CH_USBC6, | |||
| 2358 | ||||
| 2359 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, | |||
| 2360 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, | |||
| 2361 | .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, | |||
| 2362 | .aux_tbt = POWER_DOMAIN_AUX_TBT1, | |||
| 2363 | }, | |||
| 2364 | }; | |||
| 2365 | ||||
| 2366 | static const struct intel_ddi_port_domains | |||
| 2367 | d13_port_domains[] = { | |||
| 2368 | { | |||
| 2369 | .port_start = PORT_A, | |||
| 2370 | .port_end = PORT_C, | |||
| 2371 | .aux_ch_start = AUX_CH_A, | |||
| 2372 | .aux_ch_end = AUX_CH_C, | |||
| 2373 | ||||
| 2374 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, | |||
| 2375 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, | |||
| 2376 | .aux_legacy_usbc = POWER_DOMAIN_AUX_A, | |||
| 2377 | .aux_tbt = POWER_DOMAIN_INVALID, | |||
| 2378 | }, { | |||
| 2379 | .port_start = PORT_TC1, | |||
| 2380 | .port_end = PORT_TC4, | |||
| 2381 | .aux_ch_start = AUX_CH_USBC1, | |||
| 2382 | .aux_ch_end = AUX_CH_USBC4, | |||
| 2383 | ||||
| 2384 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, | |||
| 2385 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, | |||
| 2386 | .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, | |||
| 2387 | .aux_tbt = POWER_DOMAIN_AUX_TBT1, | |||
| 2388 | }, { | |||
| 2389 | .port_start = PORT_D_XELPD, | |||
| 2390 | .port_end = PORT_E_XELPD, | |||
| 2391 | .aux_ch_start = AUX_CH_D_XELPD, | |||
| 2392 | .aux_ch_end = AUX_CH_E_XELPD, | |||
| 2393 | ||||
| 2394 | .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, | |||
| 2395 | .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, | |||
| 2396 | .aux_legacy_usbc = POWER_DOMAIN_AUX_D, | |||
| 2397 | .aux_tbt = POWER_DOMAIN_INVALID, | |||
| 2398 | }, | |||
| 2399 | }; | |||
| 2400 | ||||
| 2401 | static void | |||
| 2402 | intel_port_domains_for_platform(struct drm_i915_privateinteldrm_softc *i915, | |||
| 2403 | const struct intel_ddi_port_domains **domains, | |||
| 2404 | int *domains_size) | |||
| 2405 | { | |||
| 2406 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 13) { | |||
| 2407 | *domains = d13_port_domains; | |||
| 2408 | *domains_size = ARRAY_SIZE(d13_port_domains)(sizeof((d13_port_domains)) / sizeof((d13_port_domains)[0])); | |||
| 2409 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { | |||
| 2410 | *domains = d12_port_domains; | |||
| 2411 | *domains_size = ARRAY_SIZE(d12_port_domains)(sizeof((d12_port_domains)) / sizeof((d12_port_domains)[0])); | |||
| 2412 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) { | |||
| 2413 | *domains = d11_port_domains; | |||
| 2414 | *domains_size = ARRAY_SIZE(d11_port_domains)(sizeof((d11_port_domains)) / sizeof((d11_port_domains)[0])); | |||
| 2415 | } else { | |||
| 2416 | *domains = i9xx_port_domains; | |||
| 2417 | *domains_size = ARRAY_SIZE(i9xx_port_domains)(sizeof((i9xx_port_domains)) / sizeof((i9xx_port_domains)[0]) ); | |||
| 2418 | } | |||
| 2419 | } | |||
| 2420 | ||||
| 2421 | static const struct intel_ddi_port_domains * | |||
| 2422 | intel_port_domains_for_port(struct drm_i915_privateinteldrm_softc *i915, enum port port) | |||
| 2423 | { | |||
| 2424 | const struct intel_ddi_port_domains *domains; | |||
| 2425 | int domains_size; | |||
| 2426 | int i; | |||
| 2427 | ||||
| 2428 | intel_port_domains_for_platform(i915, &domains, &domains_size); | |||
| 2429 | for (i = 0; i < domains_size; i++) | |||
| 2430 | if (port >= domains[i].port_start && port <= domains[i].port_end) | |||
| 2431 | return &domains[i]; | |||
| 2432 | ||||
| 2433 | return NULL((void *)0); | |||
| 2434 | } | |||
| 2435 | ||||
| 2436 | enum intel_display_power_domain | |||
| 2437 | intel_display_power_ddi_io_domain(struct drm_i915_privateinteldrm_softc *i915, enum port port) | |||
| 2438 | { | |||
| 2439 | const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); | |||
| 2440 | ||||
| 2441 | if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)({ int __ret = !!((!domains || domains->ddi_io == POWER_DOMAIN_INVALID )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!domains || domains->ddi_io == POWER_DOMAIN_INVALID" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2442 | return POWER_DOMAIN_PORT_DDI_IO_A; | |||
| 2443 | ||||
| 2444 | return domains->ddi_io + (int)(port - domains->port_start); | |||
| 2445 | } | |||
| 2446 | ||||
| 2447 | enum intel_display_power_domain | |||
| 2448 | intel_display_power_ddi_lanes_domain(struct drm_i915_privateinteldrm_softc *i915, enum port port) | |||
| 2449 | { | |||
| 2450 | const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); | |||
| 2451 | ||||
| 2452 | if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)({ int __ret = !!((!domains || domains->ddi_lanes == POWER_DOMAIN_INVALID )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!domains || domains->ddi_lanes == POWER_DOMAIN_INVALID" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2453 | return POWER_DOMAIN_PORT_DDI_LANES_A; | |||
| 2454 | ||||
| 2455 | return domains->ddi_lanes + (int)(port - domains->port_start); | |||
| 2456 | } | |||
| 2457 | ||||
| 2458 | static const struct intel_ddi_port_domains * | |||
| 2459 | intel_port_domains_for_aux_ch(struct drm_i915_privateinteldrm_softc *i915, enum aux_ch aux_ch) | |||
| 2460 | { | |||
| 2461 | const struct intel_ddi_port_domains *domains; | |||
| 2462 | int domains_size; | |||
| 2463 | int i; | |||
| 2464 | ||||
| 2465 | intel_port_domains_for_platform(i915, &domains, &domains_size); | |||
| 2466 | for (i = 0; i < domains_size; i++) | |||
| 2467 | if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) | |||
| 2468 | return &domains[i]; | |||
| 2469 | ||||
| 2470 | return NULL((void *)0); | |||
| 2471 | } | |||
| 2472 | ||||
| 2473 | enum intel_display_power_domain | |||
| 2474 | intel_display_power_legacy_aux_domain(struct drm_i915_privateinteldrm_softc *i915, enum aux_ch aux_ch) | |||
| 2475 | { | |||
| 2476 | const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); | |||
| 2477 | ||||
| 2478 | if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)({ int __ret = !!((!domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "!domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2479 | return POWER_DOMAIN_AUX_A; | |||
| 2480 | ||||
| 2481 | return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); | |||
| 2482 | } | |||
| 2483 | ||||
| 2484 | enum intel_display_power_domain | |||
| 2485 | intel_display_power_tbt_aux_domain(struct drm_i915_privateinteldrm_softc *i915, enum aux_ch aux_ch) | |||
| 2486 | { | |||
| 2487 | const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); | |||
| 2488 | ||||
| 2489 | if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)({ int __ret = !!((!domains || domains->aux_tbt == POWER_DOMAIN_INVALID )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!domains || domains->aux_tbt == POWER_DOMAIN_INVALID" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2490 | return POWER_DOMAIN_AUX_TBT1; | |||
| 2491 | ||||
| 2492 | return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); | |||
| 2493 | } |
| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | /* |
| 3 | * Copyright © 2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef __INTEL_DE_H__ |
| 7 | #define __INTEL_DE_H__ |
| 8 | |
| 9 | #include "i915_drv.h" |
| 10 | #include "i915_trace.h" |
| 11 | #include "intel_uncore.h" |
| 12 | |
| 13 | static inline u32 |
| 14 | intel_de_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg) |
| 15 | { |
| 16 | return intel_uncore_read(&i915->uncore, reg); |
| 17 | } |
| 18 | |
| 19 | static inline void |
| 20 | intel_de_posting_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg) |
| 21 | { |
| 22 | intel_uncore_posting_read(&i915->uncore, reg)((void)intel_uncore_read_notrace(&i915->uncore, reg)); |
| 23 | } |
| 24 | |
| 25 | static inline void |
| 26 | intel_de_write(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val) |
| 27 | { |
| 28 | intel_uncore_write(&i915->uncore, reg, val); |
| 29 | } |
| 30 | |
| 31 | static inline void |
| 32 | intel_de_rmw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 clear, u32 set) |
| 33 | { |
| 34 | intel_uncore_rmw(&i915->uncore, reg, clear, set); |
| 35 | } |
| 36 | |
| 37 | static inline int |
| 38 | intel_de_wait_for_register(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, |
| 39 | u32 mask, u32 value, unsigned int timeout) |
| 40 | { |
| 41 | return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout); |
| 42 | } |
| 43 | |
| 44 | static inline int |
| 45 | intel_de_wait_for_set(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, |
| 46 | u32 mask, unsigned int timeout) |
| 47 | { |
| 48 | return intel_de_wait_for_register(i915, reg, mask, mask, timeout); |
| 49 | } |
| 50 | |
| 51 | static inline int |
| 52 | intel_de_wait_for_clear(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, |
| 53 | u32 mask, unsigned int timeout) |
| 54 | { |
| 55 | return intel_de_wait_for_register(i915, reg, mask, 0, timeout); |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Unlocked mmio-accessors, think carefully before using these. |
| 60 | * |
| 61 | * Certain architectures will die if the same cacheline is concurrently accessed |
| 62 | * by different clients (e.g. on Ivybridge). Access to registers should |
| 63 | * therefore generally be serialised, by either the dev_priv->uncore.lock or |
| 64 | * a more localised lock guarding all access to that bank of registers. |
| 65 | */ |
| 66 | static inline u32 |
| 67 | intel_de_read_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg) |
| 68 | { |
| 69 | u32 val; |
| 70 | |
| 71 | val = intel_uncore_read_fw(&i915->uncore, reg)__raw_uncore_read32(&i915->uncore, reg); |
| 72 | trace_i915_reg_rw(false, reg, val, sizeof(val), true); |
| 73 | |
| 74 | return val; |
| 75 | } |
| 76 | |
| 77 | static inline void |
| 78 | intel_de_write_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val) |
| 79 | { |
| 80 | trace_i915_reg_rw(true, reg, val, sizeof(val), true); |
| 81 | intel_uncore_write_fw(&i915->uncore, reg, val)__raw_uncore_write32(&i915->uncore, reg, val); |
| 82 | } |
| 83 | |
| 84 | #endif /* __INTEL_DE_H__ */ |
| 1 | /* | |||
| 2 | * Copyright © 2017 Intel Corporation | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice (including the next | |||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | |||
| 13 | * Software. | |||
| 14 | * | |||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
| 21 | * IN THE SOFTWARE. | |||
| 22 | * | |||
| 23 | */ | |||
| 24 | ||||
| 25 | #ifndef __INTEL_UNCORE_H__ | |||
| 26 | #define __INTEL_UNCORE_H__ | |||
| 27 | ||||
| 28 | #include <linux/spinlock.h> | |||
| 29 | #include <linux/notifier.h> | |||
| 30 | #include <linux/hrtimer.h> | |||
| 31 | #include <linux/io-64-nonatomic-lo-hi.h> | |||
| 32 | #include <linux/types.h> | |||
| 33 | ||||
| 34 | #include "i915_reg_defs.h" | |||
| 35 | ||||
| 36 | struct drm_device; | |||
| 37 | struct drm_i915_privateinteldrm_softc; | |||
| 38 | struct intel_runtime_pm; | |||
| 39 | struct intel_uncore; | |||
| 40 | struct intel_gt; | |||
| 41 | ||||
| 42 | struct intel_uncore_mmio_debug { | |||
| 43 | spinlock_t lock; /** lock is also taken in irq contexts. */ | |||
| 44 | int unclaimed_mmio_check; | |||
| 45 | int saved_mmio_check; | |||
| 46 | u32 suspend_count; | |||
| 47 | }; | |||
| 48 | ||||
| 49 | enum forcewake_domain_id { | |||
| 50 | FW_DOMAIN_ID_RENDER = 0, | |||
| 51 | FW_DOMAIN_ID_GT, /* also includes blitter engine */ | |||
| 52 | FW_DOMAIN_ID_MEDIA, | |||
| 53 | FW_DOMAIN_ID_MEDIA_VDBOX0, | |||
| 54 | FW_DOMAIN_ID_MEDIA_VDBOX1, | |||
| 55 | FW_DOMAIN_ID_MEDIA_VDBOX2, | |||
| 56 | FW_DOMAIN_ID_MEDIA_VDBOX3, | |||
| 57 | FW_DOMAIN_ID_MEDIA_VDBOX4, | |||
| 58 | FW_DOMAIN_ID_MEDIA_VDBOX5, | |||
| 59 | FW_DOMAIN_ID_MEDIA_VDBOX6, | |||
| 60 | FW_DOMAIN_ID_MEDIA_VDBOX7, | |||
| 61 | FW_DOMAIN_ID_MEDIA_VEBOX0, | |||
| 62 | FW_DOMAIN_ID_MEDIA_VEBOX1, | |||
| 63 | FW_DOMAIN_ID_MEDIA_VEBOX2, | |||
| 64 | FW_DOMAIN_ID_MEDIA_VEBOX3, | |||
| 65 | ||||
| 66 | FW_DOMAIN_ID_COUNT | |||
| 67 | }; | |||
| 68 | ||||
| 69 | enum forcewake_domains { | |||
| 70 | FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER)(1UL << (FW_DOMAIN_ID_RENDER)), | |||
| 71 | FORCEWAKE_GT = BIT(FW_DOMAIN_ID_GT)(1UL << (FW_DOMAIN_ID_GT)), | |||
| 72 | FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA)(1UL << (FW_DOMAIN_ID_MEDIA)), | |||
| 73 | FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX0)), | |||
| 74 | FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX1)), | |||
| 75 | FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX2)), | |||
| 76 | FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX3)), | |||
| 77 | FORCEWAKE_MEDIA_VDBOX4 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX4)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX4)), | |||
| 78 | FORCEWAKE_MEDIA_VDBOX5 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX5)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX5)), | |||
| 79 | FORCEWAKE_MEDIA_VDBOX6 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX6)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX6)), | |||
| 80 | FORCEWAKE_MEDIA_VDBOX7 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX7)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX7)), | |||
| 81 | FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX0)), | |||
| 82 | FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX1)), | |||
| 83 | FORCEWAKE_MEDIA_VEBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX2)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX2)), | |||
| 84 | FORCEWAKE_MEDIA_VEBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX3)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX3)), | |||
| 85 | ||||
| 86 | FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT)(1UL << (FW_DOMAIN_ID_COUNT)) - 1, | |||
| 87 | }; | |||
| 88 | ||||
| 89 | struct intel_uncore_fw_get { | |||
| 90 | void (*force_wake_get)(struct intel_uncore *uncore, | |||
| 91 | enum forcewake_domains domains); | |||
| 92 | }; | |||
| 93 | ||||
| 94 | struct intel_uncore_funcs { | |||
| 95 | enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore, | |||
| 96 | i915_reg_t r); | |||
| 97 | enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore, | |||
| 98 | i915_reg_t r); | |||
| 99 | ||||
| 100 | u8 (*mmio_readb)(struct intel_uncore *uncore, | |||
| 101 | i915_reg_t r, bool_Bool trace); | |||
| 102 | u16 (*mmio_readw)(struct intel_uncore *uncore, | |||
| 103 | i915_reg_t r, bool_Bool trace); | |||
| 104 | u32 (*mmio_readl)(struct intel_uncore *uncore, | |||
| 105 | i915_reg_t r, bool_Bool trace); | |||
| 106 | u64 (*mmio_readq)(struct intel_uncore *uncore, | |||
| 107 | i915_reg_t r, bool_Bool trace); | |||
| 108 | ||||
| 109 | void (*mmio_writeb)(struct intel_uncore *uncore, | |||
| 110 | i915_reg_t r, u8 val, bool_Bool trace); | |||
| 111 | void (*mmio_writew)(struct intel_uncore *uncore, | |||
| 112 | i915_reg_t r, u16 val, bool_Bool trace); | |||
| 113 | void (*mmio_writel)(struct intel_uncore *uncore, | |||
| 114 | i915_reg_t r, u32 val, bool_Bool trace); | |||
| 115 | }; | |||
| 116 | ||||
| 117 | struct intel_forcewake_range { | |||
| 118 | u32 start; | |||
| 119 | u32 end; | |||
| 120 | ||||
| 121 | enum forcewake_domains domains; | |||
| 122 | }; | |||
| 123 | ||||
| 124 | /* Other register ranges (e.g., shadow tables, MCR tables, etc.) */ | |||
| 125 | struct i915_range { | |||
| 126 | u32 start; | |||
| 127 | u32 end; | |||
| 128 | }; | |||
| 129 | ||||
| 130 | struct intel_uncore { | |||
| 131 | void __iomem *regs; | |||
| 132 | ||||
| 133 | struct drm_i915_privateinteldrm_softc *i915; | |||
| 134 | struct intel_gt *gt; | |||
| 135 | struct intel_runtime_pm *rpm; | |||
| 136 | ||||
| 137 | spinlock_t lock; /** lock is also taken in irq contexts. */ | |||
| 138 | ||||
| 139 | /* | |||
| 140 | * Do we need to apply an additional offset to reach the beginning | |||
| 141 | * of the basic non-engine GT registers (referred to as "GSI" on | |||
| 142 | * newer platforms, or "GT block" on older platforms)? If so, we'll | |||
| 143 | * track that here and apply it transparently to registers in the | |||
| 144 | * appropriate range to maintain compatibility with our existing | |||
| 145 | * register definitions and GT code. | |||
| 146 | */ | |||
| 147 | u32 gsi_offset; | |||
| 148 | ||||
| 149 | unsigned int flags; | |||
| 150 | #define UNCORE_HAS_FORCEWAKE(1UL << (0)) BIT(0)(1UL << (0)) | |||
| 151 | #define UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1)) BIT(1)(1UL << (1)) | |||
| 152 | #define UNCORE_HAS_DBG_UNCLAIMED(1UL << (2)) BIT(2)(1UL << (2)) | |||
| 153 | #define UNCORE_HAS_FIFO(1UL << (3)) BIT(3)(1UL << (3)) | |||
| 154 | ||||
| 155 | const struct intel_forcewake_range *fw_domains_table; | |||
| 156 | unsigned int fw_domains_table_entries; | |||
| 157 | ||||
| 158 | /* | |||
| 159 | * Shadowed registers are special cases where we can safely write | |||
| 160 | * to the register *without* grabbing forcewake. | |||
| 161 | */ | |||
| 162 | const struct i915_range *shadowed_reg_table; | |||
| 163 | unsigned int shadowed_reg_table_entries; | |||
| 164 | ||||
| 165 | struct notifier_block pmic_bus_access_nb; | |||
| 166 | const struct intel_uncore_fw_get *fw_get_funcs; | |||
| 167 | struct intel_uncore_funcs funcs; | |||
| 168 | ||||
| 169 | unsigned int fifo_count; | |||
| 170 | ||||
| 171 | enum forcewake_domains fw_domains; | |||
| 172 | enum forcewake_domains fw_domains_active; | |||
| 173 | enum forcewake_domains fw_domains_timer; | |||
| 174 | enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ | |||
| 175 | ||||
| 176 | struct intel_uncore_forcewake_domain { | |||
| 177 | struct intel_uncore *uncore; | |||
| 178 | enum forcewake_domain_id id; | |||
| 179 | enum forcewake_domains mask; | |||
| 180 | unsigned int wake_count; | |||
| 181 | bool_Bool active; | |||
| 182 | struct timeout timer; | |||
| 183 | u32 __iomem *reg_set; | |||
| 184 | u32 __iomem *reg_ack; | |||
| 185 | } *fw_domain[FW_DOMAIN_ID_COUNT]; | |||
| 186 | ||||
| 187 | unsigned int user_forcewake_count; | |||
| 188 | ||||
| 189 | struct intel_uncore_mmio_debug *debug; | |||
| 190 | }; | |||
| 191 | ||||
| 192 | /* Iterate over initialised fw domains */ | |||
| 193 | #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__)for (tmp__ = (mask__); tmp__ ;) if (!(domain__ = (uncore__)-> fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })])) {} else \ | |||
| 194 | for (tmp__ = (mask__); tmp__ ;) \ | |||
| 195 | for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])if (!(domain__ = (uncore__)->fw_domain[({ int __idx = ffs( tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })]) ) {} else | |||
| 196 | ||||
| 197 | #define for_each_fw_domain(domain__, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__ = (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })])) {} else \ | |||
| 198 | for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__ = (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })])) {} else | |||
| 199 | ||||
| 200 | static inline bool_Bool | |||
| 201 | intel_uncore_has_forcewake(const struct intel_uncore *uncore) | |||
| 202 | { | |||
| 203 | return uncore->flags & UNCORE_HAS_FORCEWAKE(1UL << (0)); | |||
| 204 | } | |||
| 205 | ||||
| 206 | static inline bool_Bool | |||
| 207 | intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore) | |||
| 208 | { | |||
| 209 | return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1)); | |||
| 210 | } | |||
| 211 | ||||
| 212 | static inline bool_Bool | |||
| 213 | intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore) | |||
| 214 | { | |||
| 215 | return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED(1UL << (2)); | |||
| 216 | } | |||
| 217 | ||||
| 218 | static inline bool_Bool | |||
| 219 | intel_uncore_has_fifo(const struct intel_uncore *uncore) | |||
| 220 | { | |||
| 221 | return uncore->flags & UNCORE_HAS_FIFO(1UL << (3)); | |||
| 222 | } | |||
| 223 | ||||
| 224 | void intel_uncore_mmio_debug_init_early(struct drm_i915_privateinteldrm_softc *i915); | |||
| 225 | void intel_uncore_init_early(struct intel_uncore *uncore, | |||
| 226 | struct intel_gt *gt); | |||
| 227 | int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr); | |||
| 228 | int intel_uncore_init_mmio(struct intel_uncore *uncore); | |||
| 229 | void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, | |||
| 230 | struct intel_gt *gt); | |||
| 231 | bool_Bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); | |||
| 232 | bool_Bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); | |||
| 233 | void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); | |||
| 234 | void intel_uncore_fini_mmio(struct drm_device *dev, void *data); | |||
| 235 | void intel_uncore_suspend(struct intel_uncore *uncore); | |||
| 236 | void intel_uncore_resume_early(struct intel_uncore *uncore); | |||
| 237 | void intel_uncore_runtime_resume(struct intel_uncore *uncore); | |||
| 238 | ||||
| 239 | void assert_forcewakes_inactive(struct intel_uncore *uncore); | |||
| 240 | void assert_forcewakes_active(struct intel_uncore *uncore, | |||
| 241 | enum forcewake_domains fw_domains); | |||
| 242 | const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); | |||
| 243 | ||||
| 244 | enum forcewake_domains | |||
| 245 | intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, | |||
| 246 | i915_reg_t reg, unsigned int op); | |||
| 247 | #define FW_REG_READ(1) (1) | |||
| 248 | #define FW_REG_WRITE(2) (2) | |||
| 249 | ||||
| 250 | void intel_uncore_forcewake_get(struct intel_uncore *uncore, | |||
| 251 | enum forcewake_domains domains); | |||
| 252 | void intel_uncore_forcewake_put(struct intel_uncore *uncore, | |||
| 253 | enum forcewake_domains domains); | |||
| 254 | void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, | |||
| 255 | enum forcewake_domains domains); | |||
| 256 | void intel_uncore_forcewake_flush(struct intel_uncore *uncore, | |||
| 257 | enum forcewake_domains fw_domains); | |||
| 258 | ||||
| 259 | /* | |||
| 260 | * Like above but the caller must manage the uncore.lock itself. | |||
| 261 | * Must be used with intel_uncore_read_fw() and friends. | |||
| 262 | */ | |||
| 263 | void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, | |||
| 264 | enum forcewake_domains domains); | |||
| 265 | void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, | |||
| 266 | enum forcewake_domains domains); | |||
| 267 | ||||
| 268 | void intel_uncore_forcewake_user_get(struct intel_uncore *uncore); | |||
| 269 | void intel_uncore_forcewake_user_put(struct intel_uncore *uncore); | |||
| 270 | ||||
| 271 | int __intel_wait_for_register(struct intel_uncore *uncore, | |||
| 272 | i915_reg_t reg, | |||
| 273 | u32 mask, | |||
| 274 | u32 value, | |||
| 275 | unsigned int fast_timeout_us, | |||
| 276 | unsigned int slow_timeout_ms, | |||
| 277 | u32 *out_value); | |||
| 278 | static inline int | |||
| 279 | intel_wait_for_register(struct intel_uncore *uncore, | |||
| 280 | i915_reg_t reg, | |||
| 281 | u32 mask, | |||
| 282 | u32 value, | |||
| 283 | unsigned int timeout_ms) | |||
| 284 | { | |||
| 285 | return __intel_wait_for_register(uncore, reg, mask, value, 2, | |||
| 286 | timeout_ms, NULL((void *)0)); | |||
| 287 | } | |||
| 288 | ||||
| 289 | int __intel_wait_for_register_fw(struct intel_uncore *uncore, | |||
| 290 | i915_reg_t reg, | |||
| 291 | u32 mask, | |||
| 292 | u32 value, | |||
| 293 | unsigned int fast_timeout_us, | |||
| 294 | unsigned int slow_timeout_ms, | |||
| 295 | u32 *out_value); | |||
| 296 | static inline int | |||
| 297 | intel_wait_for_register_fw(struct intel_uncore *uncore, | |||
| 298 | i915_reg_t reg, | |||
| 299 | u32 mask, | |||
| 300 | u32 value, | |||
| 301 | unsigned int timeout_ms) | |||
| 302 | { | |||
| 303 | return __intel_wait_for_register_fw(uncore, reg, mask, value, | |||
| 304 | 2, timeout_ms, NULL((void *)0)); | |||
| 305 | } | |||
| 306 | ||||
| 307 | #define IS_GSI_REG(reg)((reg) < 0x40000) ((reg) < 0x40000) | |||
| 308 | ||||
| 309 | /* register access functions */ | |||
| 310 | #define __raw_read(x__, s__) \ | |||
| 311 | static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ | |||
| 312 | i915_reg_t reg) \ | |||
| 313 | { \ | |||
| 314 | u32 offset = i915_mmio_reg_offset(reg); \ | |||
| 315 | if (IS_GSI_REG(offset)((offset) < 0x40000)) \ | |||
| 316 | offset += uncore->gsi_offset; \ | |||
| 317 | return read##s__(uncore->regs + offset); \ | |||
| 318 | } | |||
| 319 | ||||
| 320 | #define __raw_write(x__, s__) \ | |||
| 321 | static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ | |||
| 322 | i915_reg_t reg, u##x__ val) \ | |||
| 323 | { \ | |||
| 324 | u32 offset = i915_mmio_reg_offset(reg); \ | |||
| 325 | if (IS_GSI_REG(offset)((offset) < 0x40000)) \ | |||
| 326 | offset += uncore->gsi_offset; \ | |||
| 327 | write##s__(val, uncore->regs + offset); \ | |||
| 328 | } | |||
| 329 | __raw_read(8, b) | |||
| 330 | __raw_read(16, w) | |||
| 331 | __raw_read(32, l) | |||
| 332 | __raw_read(64, q) | |||
| 333 | ||||
| 334 | __raw_write(8, b) | |||
| 335 | __raw_write(16, w) | |||
| 336 | __raw_write(32, l) | |||
| 337 | __raw_write(64, q) | |||
| 338 | ||||
| 339 | #undef __raw_read | |||
| 340 | #undef __raw_write | |||
| 341 | ||||
| 342 | #define __uncore_read(name__, x__, s__, trace__) \ | |||
| 343 | static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \ | |||
| 344 | i915_reg_t reg) \ | |||
| 345 | { \ | |||
| 346 | return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \ | |||
| 347 | } | |||
| 348 | ||||
| 349 | #define __uncore_write(name__, x__, s__, trace__) \ | |||
| 350 | static inline void intel_uncore_##name__(struct intel_uncore *uncore, \ | |||
| 351 | i915_reg_t reg, u##x__ val) \ | |||
| 352 | { \ | |||
| 353 | uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \ | |||
| 354 | } | |||
| 355 | ||||
| 356 | __uncore_read(read8, 8, b, true1) | |||
| 357 | __uncore_read(read16, 16, w, true1) | |||
| 358 | __uncore_read(read, 32, l, true1) | |||
| ||||
| 359 | __uncore_read(read16_notrace, 16, w, false0) | |||
| 360 | __uncore_read(read_notrace, 32, l, false0) | |||
| 361 | ||||
| 362 | __uncore_write(write8, 8, b, true1) | |||
| 363 | __uncore_write(write16, 16, w, true1) | |||
| 364 | __uncore_write(write, 32, l, true1) | |||
| 365 | __uncore_write(write_notrace, 32, l, false0) | |||
| 366 | ||||
| 367 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they | |||
| 368 | * will be implemented using 2 32-bit writes in an arbitrary order with | |||
| 369 | * an arbitrary delay between them. This can cause the hardware to | |||
| 370 | * act upon the intermediate value, possibly leading to corruption and | |||
| 371 | * machine death. For this reason we do not support intel_uncore_write64, | |||
| 372 | * or uncore->funcs.mmio_writeq. | |||
| 373 | * | |||
| 374 | * When reading a 64-bit value as two 32-bit values, the delay may cause | |||
| 375 | * the two reads to mismatch, e.g. a timestamp overflowing. Also note that | |||
| 376 | * occasionally a 64-bit register does not actually support a full readq | |||
| 377 | * and must be read using two 32-bit reads. | |||
| 378 | * | |||
| 379 | * You have been warned. | |||
| 380 | */ | |||
| 381 | __uncore_read(read64, 64, q, true1) | |||
| 382 | ||||
| 383 | static inline u64 | |||
| 384 | intel_uncore_read64_2x32(struct intel_uncore *uncore, | |||
| 385 | i915_reg_t lower_reg, i915_reg_t upper_reg) | |||
| 386 | { | |||
| 387 | u32 upper, lower, old_upper, loop = 0; | |||
| 388 | upper = intel_uncore_read(uncore, upper_reg); | |||
| 389 | do { | |||
| 390 | old_upper = upper; | |||
| 391 | lower = intel_uncore_read(uncore, lower_reg); | |||
| 392 | upper = intel_uncore_read(uncore, upper_reg); | |||
| 393 | } while (upper != old_upper && loop++ < 2); | |||
| 394 | return (u64)upper << 32 | lower; | |||
| 395 | } | |||
| 396 | ||||
| 397 | #define intel_uncore_posting_read(...)((void)intel_uncore_read_notrace(...)) ((void)intel_uncore_read_notrace(__VA_ARGS__)) | |||
| 398 | #define intel_uncore_posting_read16(...)((void)intel_uncore_read16_notrace(...)) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) | |||
| 399 | ||||
| 400 | #undef __uncore_read | |||
| 401 | #undef __uncore_write | |||
| 402 | ||||
| 403 | /* These are untraced mmio-accessors that are only valid to be used inside | |||
| 404 | * critical sections, such as inside IRQ handlers, where forcewake is explicitly | |||
| 405 | * controlled. | |||
| 406 | * | |||
| 407 | * Think twice, and think again, before using these. | |||
| 408 | * | |||
| 409 | * As an example, these accessors can possibly be used between: | |||
| 410 | * | |||
| 411 | * spin_lock_irq(&uncore->lock); | |||
| 412 | * intel_uncore_forcewake_get__locked(); | |||
| 413 | * | |||
| 414 | * and | |||
| 415 | * | |||
| 416 | * intel_uncore_forcewake_put__locked(); | |||
| 417 | * spin_unlock_irq(&uncore->lock); | |||
| 418 | * | |||
| 419 | * | |||
| 420 | * Note: some registers may not need forcewake held, so | |||
| 421 | * intel_uncore_forcewake_{get,put} can be omitted, see | |||
| 422 | * intel_uncore_forcewake_for_reg(). | |||
| 423 | * | |||
| 424 | * Certain architectures will die if the same cacheline is concurrently accessed | |||
| 425 | * by different clients (e.g. on Ivybridge). Access to registers should | |||
| 426 | * therefore generally be serialised, by either the dev_priv->uncore.lock or | |||
| 427 | * a more localised lock guarding all access to that bank of registers. | |||
| 428 | */ | |||
| 429 | #define intel_uncore_read_fw(...)__raw_uncore_read32(...) __raw_uncore_read32(__VA_ARGS__) | |||
| 430 | #define intel_uncore_write_fw(...)__raw_uncore_write32(...) __raw_uncore_write32(__VA_ARGS__) | |||
| 431 | #define intel_uncore_write64_fw(...)__raw_uncore_write64(...) __raw_uncore_write64(__VA_ARGS__) | |||
| 432 | #define intel_uncore_posting_read_fw(...)((void)__raw_uncore_read32(...)) ((void)intel_uncore_read_fw(__VA_ARGS__)__raw_uncore_read32(__VA_ARGS__)) | |||
| 433 | ||||
| 434 | static inline void intel_uncore_rmw(struct intel_uncore *uncore, | |||
| 435 | i915_reg_t reg, u32 clear, u32 set) | |||
| 436 | { | |||
| 437 | u32 old, val; | |||
| 438 | ||||
| 439 | old = intel_uncore_read(uncore, reg); | |||
| 440 | val = (old & ~clear) | set; | |||
| 441 | if (val != old) | |||
| 442 | intel_uncore_write(uncore, reg, val); | |||
| 443 | } | |||
| 444 | ||||
| 445 | static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, | |||
| 446 | i915_reg_t reg, u32 clear, u32 set) | |||
| 447 | { | |||
| 448 | u32 old, val; | |||
| 449 | ||||
| 450 | old = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg); | |||
| 451 | val = (old & ~clear) | set; | |||
| 452 | if (val != old) | |||
| 453 | intel_uncore_write_fw(uncore, reg, val)__raw_uncore_write32(uncore, reg, val); | |||
| 454 | } | |||
| 455 | ||||
| 456 | static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, | |||
| 457 | i915_reg_t reg, u32 val, | |||
| 458 | u32 mask, u32 expected_val) | |||
| 459 | { | |||
| 460 | u32 reg_val; | |||
| 461 | ||||
| 462 | intel_uncore_write(uncore, reg, val); | |||
| 463 | reg_val = intel_uncore_read(uncore, reg); | |||
| 464 | ||||
| 465 | return (reg_val & mask) != expected_val ? -EINVAL22 : 0; | |||
| 466 | } | |||
| 467 | ||||
| 468 | /* | |||
| 469 | * The raw_reg_{read,write} macros are intended as a micro-optimization for | |||
| 470 | * interrupt handlers so that the pointer indirection on uncore->regs can | |||
| 471 | * be computed once (and presumably cached in a register) instead of generating | |||
| 472 | * extra load instructions for each MMIO access. | |||
| 473 | * | |||
| 474 | * Given that these macros are only intended for non-GSI interrupt registers | |||
| 475 | * (and the goal is to avoid extra instructions generated by the compiler), | |||
| 476 | * these macros do not account for uncore->gsi_offset. Any caller that needs | |||
| 477 | * to use these macros on a GSI register is responsible for adding the | |||
| 478 | * appropriate GSI offset to the 'base' parameter. | |||
| 479 | */ | |||
| 480 | #define raw_reg_read(base, reg)ioread32(base + i915_mmio_reg_offset(reg)) \ | |||
| 481 | readl(base + i915_mmio_reg_offset(reg))ioread32(base + i915_mmio_reg_offset(reg)) | |||
| 482 | #define raw_reg_write(base, reg, value)iowrite32(value, base + i915_mmio_reg_offset(reg)) \ | |||
| 483 | writel(value, base + i915_mmio_reg_offset(reg))iowrite32(value, base + i915_mmio_reg_offset(reg)) | |||
| 484 | ||||
| 485 | #endif /* !__INTEL_UNCORE_H__ */ |