| File: | dev/pci/drm/i915/intel_pm.c |
| Warning: | line 4232, column 54 Division by zero |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright © 2012 Intel Corporation | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice (including the next | |||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | |||
| 13 | * Software. | |||
| 14 | * | |||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |||
| 21 | * IN THE SOFTWARE. | |||
| 22 | * | |||
| 23 | * Authors: | |||
| 24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> | |||
| 25 | * | |||
| 26 | */ | |||
| 27 | ||||
| 28 | #include <linux/module.h> | |||
| 29 | #include <linux/pm_runtime.h> | |||
| 30 | ||||
| 31 | #include <drm/drm_atomic_helper.h> | |||
| 32 | #include <drm/drm_fourcc.h> | |||
| 33 | #include <drm/drm_plane_helper.h> | |||
| 34 | ||||
| 35 | #include "display/intel_atomic.h" | |||
| 36 | #include "display/intel_bw.h" | |||
| 37 | #include "display/intel_display_types.h" | |||
| 38 | #include "display/intel_fbc.h" | |||
| 39 | #include "display/intel_sprite.h" | |||
| 40 | ||||
| 41 | #include "gt/intel_llc.h" | |||
| 42 | ||||
| 43 | #include "i915_drv.h" | |||
| 44 | #include "i915_fixed.h" | |||
| 45 | #include "i915_irq.h" | |||
| 46 | #include "i915_trace.h" | |||
| 47 | #include "intel_pm.h" | |||
| 48 | #include "intel_sideband.h" | |||
| 49 | #ifdef __linux__ | |||
| 50 | #include "../../../platform/x86/intel_ips.h" | |||
| 51 | #endif | |||
| 52 | ||||
| 53 | /* Stores plane specific WM parameters */ | |||
| 54 | struct skl_wm_params { | |||
| 55 | bool_Bool x_tiled, y_tiled; | |||
| 56 | bool_Bool rc_surface; | |||
| 57 | bool_Bool is_planar; | |||
| 58 | u32 width; | |||
| 59 | u8 cpp; | |||
| 60 | u32 plane_pixel_rate; | |||
| 61 | u32 y_min_scanlines; | |||
| 62 | u32 plane_bytes_per_line; | |||
| 63 | uint_fixed_16_16_t plane_blocks_per_line; | |||
| 64 | uint_fixed_16_16_t y_tile_minimum; | |||
| 65 | u32 linetime_us; | |||
| 66 | u32 dbuf_block_size; | |||
| 67 | }; | |||
| 68 | ||||
| 69 | /* used in computing the new watermarks state */ | |||
| 70 | struct intel_wm_config { | |||
| 71 | unsigned int num_pipes_active; | |||
| 72 | bool_Bool sprites_enabled; | |||
| 73 | bool_Bool sprites_scaled; | |||
| 74 | }; | |||
| 75 | ||||
| 76 | static void gen9_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 77 | { | |||
| 78 | if (HAS_LLC(dev_priv)((&(dev_priv)->__info)->has_llc)) { | |||
| 79 | /* | |||
| 80 | * WaCompressedResourceDisplayNewHashMode:skl,kbl | |||
| 81 | * Display WA #0390: skl,kbl | |||
| 82 | * | |||
| 83 | * Must match Sampler, Pixel Back End, and Media. See | |||
| 84 | * WaCompressedResourceSamplerPbeMediaNewHashMode. | |||
| 85 | */ | |||
| 86 | I915_WRITE(CHICKEN_PAR1_1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 15))) | |||
| 87 | I915_READ(CHICKEN_PAR1_1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 15))) | |||
| 88 | SKL_DE_COMPRESSED_HASH_MODE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 15))); | |||
| 89 | } | |||
| 90 | ||||
| 91 | /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ | |||
| 92 | I915_WRITE(CHICKEN_PAR1_1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 3))) | |||
| 93 | I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 3))); | |||
| 94 | ||||
| 95 | /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ | |||
| 96 | I915_WRITE(GEN8_CHICKEN_DCPR_1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46430) }))) | (1 << 13))) | |||
| 97 | I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46430) }))) | (1 << 13))); | |||
| 98 | ||||
| 99 | /* | |||
| 100 | * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl | |||
| 101 | * Display WA #0859: skl,bxt,kbl,glk,cfl | |||
| 102 | */ | |||
| 103 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 31))) | |||
| 104 | DISP_FBC_MEMORY_WAKE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 31))); | |||
| 105 | } | |||
| 106 | ||||
| 107 | static void bxt_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 108 | { | |||
| 109 | gen9_init_clock_gating(dev_priv); | |||
| 110 | ||||
| 111 | /* WaDisableSDEUnitClockGating:bxt */ | |||
| 112 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))) | |||
| 113 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))); | |||
| 114 | ||||
| 115 | /* | |||
| 116 | * FIXME: | |||
| 117 | * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. | |||
| 118 | */ | |||
| 119 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 28))) | |||
| 120 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 28))); | |||
| 121 | ||||
| 122 | /* | |||
| 123 | * Wa: Backlight PWM may stop in the asserted state, causing backlight | |||
| 124 | * to stay fully on. | |||
| 125 | */ | |||
| 126 | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46530) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46530) }))) | (1 << 13) | (1 << 14))) | |||
| 127 | PWM1_GATING_DIS | PWM2_GATING_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46530) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46530) }))) | (1 << 13) | (1 << 14))); | |||
| 128 | ||||
| 129 | /* | |||
| 130 | * Lower the display internal timeout. | |||
| 131 | * This is needed to avoid any hard hangs when DSI port PLL | |||
| 132 | * is off and a MMIO access is attempted by any privilege | |||
| 133 | * application, using batch buffers or any other means. | |||
| 134 | */ | |||
| 135 | I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42060) })), (((950) << 0))); | |||
| 136 | ||||
| 137 | /* | |||
| 138 | * WaFbcTurnOffFbcWatermark:bxt | |||
| 139 | * Display WA #0562: bxt | |||
| 140 | */ | |||
| 141 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))) | |||
| 142 | DISP_FBC_WM_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))); | |||
| 143 | ||||
| 144 | /* | |||
| 145 | * WaFbcHighMemBwCorruptionAvoidance:bxt | |||
| 146 | * Display WA #0883: bxt | |||
| 147 | */ | |||
| 148 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 8))) | |||
| 149 | ILK_DPFC_DISABLE_DUMMY0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 8))); | |||
| 150 | } | |||
| 151 | ||||
| 152 | static void glk_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 153 | { | |||
| 154 | gen9_init_clock_gating(dev_priv); | |||
| 155 | ||||
| 156 | /* | |||
| 157 | * WaDisablePWMClockGating:glk | |||
| 158 | * Backlight PWM may stop in the asserted state, causing backlight | |||
| 159 | * to stay fully on. | |||
| 160 | */ | |||
| 161 | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46530) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46530) }))) | (1 << 13) | (1 << 14))) | |||
| 162 | PWM1_GATING_DIS | PWM2_GATING_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46530) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46530) }))) | (1 << 13) | (1 << 14))); | |||
| 163 | } | |||
| 164 | ||||
| 165 | static void pnv_get_mem_freq(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 166 | { | |||
| 167 | u32 tmp; | |||
| 168 | ||||
| 169 | tmp = I915_READ(CLKCFG)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x10000 + 0xc00) }))); | |||
| 170 | ||||
| 171 | switch (tmp & CLKCFG_FSB_MASK(7 << 0)) { | |||
| 172 | case CLKCFG_FSB_533(1 << 0): | |||
| 173 | dev_priv->fsb_freq = 533; /* 133*4 */ | |||
| 174 | break; | |||
| 175 | case CLKCFG_FSB_800(2 << 0): | |||
| 176 | dev_priv->fsb_freq = 800; /* 200*4 */ | |||
| 177 | break; | |||
| 178 | case CLKCFG_FSB_667(3 << 0): | |||
| 179 | dev_priv->fsb_freq = 667; /* 167*4 */ | |||
| 180 | break; | |||
| 181 | case CLKCFG_FSB_400(0 << 0): | |||
| 182 | dev_priv->fsb_freq = 400; /* 100*4 */ | |||
| 183 | break; | |||
| 184 | } | |||
| 185 | ||||
| 186 | switch (tmp & CLKCFG_MEM_MASK(7 << 4)) { | |||
| 187 | case CLKCFG_MEM_533(1 << 4): | |||
| 188 | dev_priv->mem_freq = 533; | |||
| 189 | break; | |||
| 190 | case CLKCFG_MEM_667(2 << 4): | |||
| 191 | dev_priv->mem_freq = 667; | |||
| 192 | break; | |||
| 193 | case CLKCFG_MEM_800(3 << 4): | |||
| 194 | dev_priv->mem_freq = 800; | |||
| 195 | break; | |||
| 196 | } | |||
| 197 | ||||
| 198 | /* detect pineview DDR3 setting */ | |||
| 199 | tmp = I915_READ(CSHRDDR3CTL)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x10000 + 0x1a8) }))); | |||
| 200 | dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3(1 << 2)) ? 1 : 0; | |||
| 201 | } | |||
| 202 | ||||
| 203 | static void ilk_get_mem_freq(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 204 | { | |||
| 205 | u16 ddrpll, csipll; | |||
| 206 | ||||
| 207 | ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1((const i915_reg_t){ .reg = (0X12c20) })); | |||
| 208 | csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0((const i915_reg_t){ .reg = (0x12c10) })); | |||
| 209 | ||||
| 210 | switch (ddrpll & 0xff) { | |||
| 211 | case 0xc: | |||
| 212 | dev_priv->mem_freq = 800; | |||
| 213 | break; | |||
| 214 | case 0x10: | |||
| 215 | dev_priv->mem_freq = 1066; | |||
| 216 | break; | |||
| 217 | case 0x14: | |||
| 218 | dev_priv->mem_freq = 1333; | |||
| 219 | break; | |||
| 220 | case 0x18: | |||
| 221 | dev_priv->mem_freq = 1600; | |||
| 222 | break; | |||
| 223 | default: | |||
| 224 | drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "unknown memory frequency 0x%02x\n" , ddrpll & 0xff) | |||
| 225 | ddrpll & 0xff)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "unknown memory frequency 0x%02x\n" , ddrpll & 0xff); | |||
| 226 | dev_priv->mem_freq = 0; | |||
| 227 | break; | |||
| 228 | } | |||
| 229 | ||||
| 230 | switch (csipll & 0x3ff) { | |||
| 231 | case 0x00c: | |||
| 232 | dev_priv->fsb_freq = 3200; | |||
| 233 | break; | |||
| 234 | case 0x00e: | |||
| 235 | dev_priv->fsb_freq = 3733; | |||
| 236 | break; | |||
| 237 | case 0x010: | |||
| 238 | dev_priv->fsb_freq = 4266; | |||
| 239 | break; | |||
| 240 | case 0x012: | |||
| 241 | dev_priv->fsb_freq = 4800; | |||
| 242 | break; | |||
| 243 | case 0x014: | |||
| 244 | dev_priv->fsb_freq = 5333; | |||
| 245 | break; | |||
| 246 | case 0x016: | |||
| 247 | dev_priv->fsb_freq = 5866; | |||
| 248 | break; | |||
| 249 | case 0x018: | |||
| 250 | dev_priv->fsb_freq = 6400; | |||
| 251 | break; | |||
| 252 | default: | |||
| 253 | drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "unknown fsb frequency 0x%04x\n" , csipll & 0x3ff) | |||
| 254 | csipll & 0x3ff)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "unknown fsb frequency 0x%04x\n" , csipll & 0x3ff); | |||
| 255 | dev_priv->fsb_freq = 0; | |||
| 256 | break; | |||
| 257 | } | |||
| 258 | } | |||
| 259 | ||||
| 260 | static const struct cxsr_latency cxsr_latency_table[] = { | |||
| 261 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | |||
| 262 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | |||
| 263 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | |||
| 264 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ | |||
| 265 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ | |||
| 266 | ||||
| 267 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | |||
| 268 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |||
| 269 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | |||
| 270 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ | |||
| 271 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ | |||
| 272 | ||||
| 273 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | |||
| 274 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | |||
| 275 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | |||
| 276 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ | |||
| 277 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ | |||
| 278 | ||||
| 279 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | |||
| 280 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |||
| 281 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | |||
| 282 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ | |||
| 283 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ | |||
| 284 | ||||
| 285 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | |||
| 286 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | |||
| 287 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | |||
| 288 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ | |||
| 289 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ | |||
| 290 | ||||
| 291 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | |||
| 292 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | |||
| 293 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | |||
| 294 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ | |||
| 295 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | |||
| 296 | }; | |||
| 297 | ||||
| 298 | static const struct cxsr_latency *intel_get_cxsr_latency(bool_Bool is_desktop, | |||
| 299 | bool_Bool is_ddr3, | |||
| 300 | int fsb, | |||
| 301 | int mem) | |||
| 302 | { | |||
| 303 | const struct cxsr_latency *latency; | |||
| 304 | int i; | |||
| 305 | ||||
| 306 | if (fsb == 0 || mem == 0) | |||
| 307 | return NULL((void *)0); | |||
| 308 | ||||
| 309 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table)(sizeof((cxsr_latency_table)) / sizeof((cxsr_latency_table)[0 ])); i++) { | |||
| 310 | latency = &cxsr_latency_table[i]; | |||
| 311 | if (is_desktop == latency->is_desktop && | |||
| 312 | is_ddr3 == latency->is_ddr3 && | |||
| 313 | fsb == latency->fsb_freq && mem == latency->mem_freq) | |||
| 314 | return latency; | |||
| 315 | } | |||
| 316 | ||||
| 317 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n")__drm_dbg(DRM_UT_KMS, "Unknown FSB/MEM found, disable CxSR\n" ); | |||
| 318 | ||||
| 319 | return NULL((void *)0); | |||
| 320 | } | |||
| 321 | ||||
| 322 | static void chv_set_memory_dvfs(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool enable) | |||
| 323 | { | |||
| 324 | u32 val; | |||
| 325 | ||||
| 326 | vlv_punit_get(dev_priv); | |||
| 327 | ||||
| 328 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP20x139); | |||
| 329 | if (enable) | |||
| 330 | val &= ~FORCE_DDR_HIGH_FREQ(1 << 0); | |||
| 331 | else | |||
| 332 | val |= FORCE_DDR_HIGH_FREQ(1 << 0); | |||
| 333 | val &= ~FORCE_DDR_LOW_FREQ(1 << 1); | |||
| 334 | val |= FORCE_DDR_FREQ_REQ_ACK(1 << 8); | |||
| 335 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP20x139, val); | |||
| 336 | ||||
| 337 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((3) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((vlv_punit_read (dev_priv, 0x139) & (1 << 8)) == 0))) { ret__ = 0; break ; } if (expired__) { ret__ = -60; break; } usleep_range(wait__ , wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; }) | |||
| 338 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((3) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((vlv_punit_read (dev_priv, 0x139) & (1 << 8)) == 0))) { ret__ = 0; break ; } if (expired__) { ret__ = -60; break; } usleep_range(wait__ , wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; })) | |||
| 339 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timed out waiting for Punit DDR DVFS request\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
| 340 | "timed out waiting for Punit DDR DVFS request\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timed out waiting for Punit DDR DVFS request\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 341 | ||||
| 342 | vlv_punit_put(dev_priv); | |||
| 343 | } | |||
| 344 | ||||
| 345 | static void chv_set_memory_pm5(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool enable) | |||
| 346 | { | |||
| 347 | u32 val; | |||
| 348 | ||||
| 349 | vlv_punit_get(dev_priv); | |||
| 350 | ||||
| 351 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36); | |||
| 352 | if (enable) | |||
| 353 | val |= DSP_MAXFIFO_PM5_ENABLE(1 << 6); | |||
| 354 | else | |||
| 355 | val &= ~DSP_MAXFIFO_PM5_ENABLE(1 << 6); | |||
| 356 | vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM0x36, val); | |||
| 357 | ||||
| 358 | vlv_punit_put(dev_priv); | |||
| 359 | } | |||
| 360 | ||||
| 361 | #define FW_WM(value, plane) \ | |||
| 362 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) | |||
| 363 | ||||
| 364 | static bool_Bool _intel_set_memory_cxsr(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool enable) | |||
| 365 | { | |||
| 366 | bool_Bool was_enabled; | |||
| 367 | u32 val; | |||
| 368 | ||||
| 369 | if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 370 | was_enabled = I915_READ(FW_BLC_SELF_VLV)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x6500) }))) & FW_CSPWRDWNEN(1 << 15); | |||
| 371 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x6500) })), (enable ? (1 << 15) : 0)); | |||
| 372 | POSTING_READ(FW_BLC_SELF_VLV)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (0x180000 + 0x6500) })))); | |||
| 373 | } else if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45 )) || IS_I965GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I965GM)) { | |||
| 374 | was_enabled = I915_READ(FW_BLC_SELF)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) }))) & FW_BLC_SELF_EN(1 << 15); | |||
| 375 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) })), (enable ? (1 << 15) : 0)); | |||
| 376 | POSTING_READ(FW_BLC_SELF)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (0x20e0) })))); | |||
| 377 | } else if (IS_PINEVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_PINEVIEW)) { | |||
| 378 | val = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 379 | was_enabled = val & PINEVIEW_SELF_REFRESH_EN(1 << 30); | |||
| 380 | if (enable) | |||
| 381 | val |= PINEVIEW_SELF_REFRESH_EN(1 << 30); | |||
| 382 | else | |||
| 383 | val &= ~PINEVIEW_SELF_REFRESH_EN(1 << 30); | |||
| 384 | I915_WRITE(DSPFW3, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (val)); | |||
| 385 | POSTING_READ(DSPFW3)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> display_mmio_offset) + 0x7003c) })))); | |||
| 386 | } else if (IS_I945G(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945G) || IS_I945GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945GM)) { | |||
| 387 | was_enabled = I915_READ(FW_BLC_SELF)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) }))) & FW_BLC_SELF_EN(1 << 15); | |||
| 388 | val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN)({ typeof((1 << 15)) _a = ((1 << 15)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }) : | |||
| 389 | _MASKED_BIT_DISABLE(FW_BLC_SELF_EN)(({ if (__builtin_constant_p(((1 << 15)))) do { } while (0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p (((1 << 15))) && __builtin_constant_p(0)) do { } while (0); ((((1 << 15))) << 16 | (0)); })); | |||
| 390 | I915_WRITE(FW_BLC_SELF, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) })), (val)); | |||
| 391 | POSTING_READ(FW_BLC_SELF)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (0x20e0) })))); | |||
| 392 | } else if (IS_I915GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I915GM)) { | |||
| 393 | /* | |||
| 394 | * FIXME can't find a bit like this for 915G, and | |||
| 395 | * and yet it does have the related watermark in | |||
| 396 | * FW_BLC_SELF. What's going on? | |||
| 397 | */ | |||
| 398 | was_enabled = I915_READ(INSTPM)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20c0) }))) & INSTPM_SELF_EN(1 << 12); | |||
| 399 | val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN)({ typeof((1 << 12)) _a = ((1 << 12)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); }) : | |||
| 400 | _MASKED_BIT_DISABLE(INSTPM_SELF_EN)(({ if (__builtin_constant_p(((1 << 12)))) do { } while (0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p (((1 << 12))) && __builtin_constant_p(0)) do { } while (0); ((((1 << 12))) << 16 | (0)); })); | |||
| 401 | I915_WRITE(INSTPM, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20c0) })), (val)); | |||
| 402 | POSTING_READ(INSTPM)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (0x20c0) })))); | |||
| 403 | } else { | |||
| 404 | return false0; | |||
| 405 | } | |||
| 406 | ||||
| 407 | trace_intel_memory_cxsr(dev_priv, was_enabled, enable); | |||
| 408 | ||||
| 409 | drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "memory self-refresh is %s (was %s)\n" , enableddisabled(enable), enableddisabled(was_enabled)) | |||
| 410 | enableddisabled(enable),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "memory self-refresh is %s (was %s)\n" , enableddisabled(enable), enableddisabled(was_enabled)) | |||
| 411 | enableddisabled(was_enabled))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "memory self-refresh is %s (was %s)\n" , enableddisabled(enable), enableddisabled(was_enabled)); | |||
| 412 | ||||
| 413 | return was_enabled; | |||
| 414 | } | |||
| 415 | ||||
| 416 | /** | |||
| 417 | * intel_set_memory_cxsr - Configure CxSR state | |||
| 418 | * @dev_priv: i915 device | |||
| 419 | * @enable: Allow vs. disallow CxSR | |||
| 420 | * | |||
| 421 | * Allow or disallow the system to enter a special CxSR | |||
| 422 | * (C-state self refresh) state. What typically happens in CxSR mode | |||
| 423 | * is that several display FIFOs may get combined into a single larger | |||
| 424 | * FIFO for a particular plane (so called max FIFO mode) to allow the | |||
| 425 | * system to defer memory fetches longer, and the memory will enter | |||
| 426 | * self refresh. | |||
| 427 | * | |||
| 428 | * Note that enabling CxSR does not guarantee that the system enter | |||
| 429 | * this special mode, nor does it guarantee that the system stays | |||
| 430 | * in that mode once entered. So this just allows/disallows the system | |||
| 431 | * to autonomously utilize the CxSR mode. Other factors such as core | |||
| 432 | * C-states will affect when/if the system actually enters/exits the | |||
| 433 | * CxSR mode. | |||
| 434 | * | |||
| 435 | * Note that on VLV/CHV this actually only controls the max FIFO mode, | |||
| 436 | * and the system is free to enter/exit memory self refresh at any time | |||
| 437 | * even when the use of CxSR has been disallowed. | |||
| 438 | * | |||
| 439 | * While the system is actually in the CxSR/max FIFO mode, some plane | |||
| 440 | * control registers will not get latched on vblank. Thus in order to | |||
| 441 | * guarantee the system will respond to changes in the plane registers | |||
| 442 | * we must always disallow CxSR prior to making changes to those registers. | |||
| 443 | * Unfortunately the system will re-evaluate the CxSR conditions at | |||
| 444 | * frame start which happens after vblank start (which is when the plane | |||
| 445 | * registers would get latched), so we can't proceed with the plane update | |||
| 446 | * during the same frame where we disallowed CxSR. | |||
| 447 | * | |||
| 448 | * Certain platforms also have a deeper HPLL SR mode. Fortunately the | |||
| 449 | * HPLL SR mode depends on CxSR itself, so we don't have to hand hold | |||
| 450 | * the hardware w.r.t. HPLL SR when writing to plane registers. | |||
| 451 | * Disallowing just CxSR is sufficient. | |||
| 452 | */ | |||
| 453 | bool_Bool intel_set_memory_cxsr(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool enable) | |||
| 454 | { | |||
| 455 | bool_Bool ret; | |||
| 456 | ||||
| 457 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 458 | ret = _intel_set_memory_cxsr(dev_priv, enable); | |||
| 459 | if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) | |||
| 460 | dev_priv->wm.vlv.cxsr = enable; | |||
| 461 | else if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45 ))) | |||
| 462 | dev_priv->wm.g4x.cxsr = enable; | |||
| 463 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 464 | ||||
| 465 | return ret; | |||
| 466 | } | |||
| 467 | ||||
| 468 | /* | |||
| 469 | * Latency for FIFO fetches is dependent on several factors: | |||
| 470 | * - memory configuration (speed, channels) | |||
| 471 | * - chipset | |||
| 472 | * - current MCH state | |||
| 473 | * It can be fairly high in some situations, so here we assume a fairly | |||
| 474 | * pessimal value. It's a tradeoff between extra memory fetches (if we | |||
| 475 | * set this value too high, the FIFO will fetch frequently to stay full) | |||
| 476 | * and power consumption (set it too low to save power and we might see | |||
| 477 | * FIFO underruns and display "flicker"). | |||
| 478 | * | |||
| 479 | * A value of 5us seems to be a good balance; safe for very low end | |||
| 480 | * platforms but not overly aggressive on lower latency configs. | |||
| 481 | */ | |||
| 482 | static const int pessimal_latency_ns = 5000; | |||
| 483 | ||||
| 484 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift)((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) \ | |||
| 485 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) | |||
| 486 | ||||
| 487 | static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) | |||
| 488 | { | |||
| 489 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 490 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 491 | struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; | |||
| 492 | enum pipe pipe = crtc->pipe; | |||
| 493 | int sprite0_start, sprite1_start; | |||
| 494 | u32 dsparb, dsparb2, dsparb3; | |||
| 495 | ||||
| 496 | switch (pipe) { | |||
| 497 | case PIPE_A: | |||
| 498 | dsparb = I915_READ(DSPARB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70030) }))); | |||
| 499 | dsparb2 = I915_READ(DSPARB2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70060) }))); | |||
| 500 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0)((((dsparb) >> (0)) & 0xff) | ((((dsparb2) >> (0)) & 0x1) << 8)); | |||
| 501 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4)((((dsparb) >> (8)) & 0xff) | ((((dsparb2) >> (4)) & 0x1) << 8)); | |||
| 502 | break; | |||
| 503 | case PIPE_B: | |||
| 504 | dsparb = I915_READ(DSPARB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70030) }))); | |||
| 505 | dsparb2 = I915_READ(DSPARB2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70060) }))); | |||
| 506 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8)((((dsparb) >> (16)) & 0xff) | ((((dsparb2) >> (8)) & 0x1) << 8)); | |||
| 507 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12)((((dsparb) >> (24)) & 0xff) | ((((dsparb2) >> (12)) & 0x1) << 8)); | |||
| 508 | break; | |||
| 509 | case PIPE_C: | |||
| 510 | dsparb2 = I915_READ(DSPARB2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70060) }))); | |||
| 511 | dsparb3 = I915_READ(DSPARB3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7006c) }))); | |||
| 512 | sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16)((((dsparb3) >> (0)) & 0xff) | ((((dsparb2) >> (16)) & 0x1) << 8)); | |||
| 513 | sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20)((((dsparb3) >> (8)) & 0xff) | ((((dsparb2) >> (20)) & 0x1) << 8)); | |||
| 514 | break; | |||
| 515 | default: | |||
| 516 | MISSING_CASE(pipe)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "pipe", (long)(pipe)); __builtin_expect(!!(__ret), 0); }); | |||
| 517 | return; | |||
| 518 | } | |||
| 519 | ||||
| 520 | fifo_state->plane[PLANE_PRIMARY] = sprite0_start; | |||
| 521 | fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; | |||
| 522 | fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; | |||
| 523 | fifo_state->plane[PLANE_CURSOR] = 63; | |||
| 524 | } | |||
| 525 | ||||
| 526 | static int i9xx_get_fifo_size(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 527 | enum i9xx_plane_id i9xx_plane) | |||
| 528 | { | |||
| 529 | u32 dsparb = I915_READ(DSPARB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70030) }))); | |||
| 530 | int size; | |||
| 531 | ||||
| 532 | size = dsparb & 0x7f; | |||
| 533 | if (i9xx_plane == PLANE_B) | |||
| 534 | size = ((dsparb >> DSPARB_CSTART_SHIFT7) & 0x7f) - size; | |||
| 535 | ||||
| 536 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size) | |||
| 537 | dsparb, plane_name(i9xx_plane), size)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size); | |||
| 538 | ||||
| 539 | return size; | |||
| 540 | } | |||
| 541 | ||||
| 542 | static int i830_get_fifo_size(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 543 | enum i9xx_plane_id i9xx_plane) | |||
| 544 | { | |||
| 545 | u32 dsparb = I915_READ(DSPARB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70030) }))); | |||
| 546 | int size; | |||
| 547 | ||||
| 548 | size = dsparb & 0x1ff; | |||
| 549 | if (i9xx_plane == PLANE_B) | |||
| 550 | size = ((dsparb >> DSPARB_BEND_SHIFT9) & 0x1ff) - size; | |||
| 551 | size >>= 1; /* Convert to cachelines */ | |||
| 552 | ||||
| 553 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size) | |||
| 554 | dsparb, plane_name(i9xx_plane), size)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size); | |||
| 555 | ||||
| 556 | return size; | |||
| 557 | } | |||
| 558 | ||||
| 559 | static int i845_get_fifo_size(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 560 | enum i9xx_plane_id i9xx_plane) | |||
| 561 | { | |||
| 562 | u32 dsparb = I915_READ(DSPARB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70030) }))); | |||
| 563 | int size; | |||
| 564 | ||||
| 565 | size = dsparb & 0x7f; | |||
| 566 | size >>= 2; /* Convert to cachelines */ | |||
| 567 | ||||
| 568 | drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size) | |||
| 569 | dsparb, plane_name(i9xx_plane), size)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO size - (0x%08x) %c: %d\n" , dsparb, ((i9xx_plane) + 'A'), size); | |||
| 570 | ||||
| 571 | return size; | |||
| 572 | } | |||
| 573 | ||||
| 574 | /* Pineview has different values for various configs */ | |||
| 575 | static const struct intel_watermark_params pnv_display_wm = { | |||
| 576 | .fifo_size = PINEVIEW_DISPLAY_FIFO512, | |||
| 577 | .max_wm = PINEVIEW_MAX_WM0x1ff, | |||
| 578 | .default_wm = PINEVIEW_DFT_WM0x3f, | |||
| 579 | .guard_size = PINEVIEW_GUARD_WM10, | |||
| 580 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE64, | |||
| 581 | }; | |||
| 582 | ||||
| 583 | static const struct intel_watermark_params pnv_display_hplloff_wm = { | |||
| 584 | .fifo_size = PINEVIEW_DISPLAY_FIFO512, | |||
| 585 | .max_wm = PINEVIEW_MAX_WM0x1ff, | |||
| 586 | .default_wm = PINEVIEW_DFT_HPLLOFF_WM0, | |||
| 587 | .guard_size = PINEVIEW_GUARD_WM10, | |||
| 588 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE64, | |||
| 589 | }; | |||
| 590 | ||||
| 591 | static const struct intel_watermark_params pnv_cursor_wm = { | |||
| 592 | .fifo_size = PINEVIEW_CURSOR_FIFO64, | |||
| 593 | .max_wm = PINEVIEW_CURSOR_MAX_WM0x3f, | |||
| 594 | .default_wm = PINEVIEW_CURSOR_DFT_WM0, | |||
| 595 | .guard_size = PINEVIEW_CURSOR_GUARD_WM5, | |||
| 596 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE64, | |||
| 597 | }; | |||
| 598 | ||||
| 599 | static const struct intel_watermark_params pnv_cursor_hplloff_wm = { | |||
| 600 | .fifo_size = PINEVIEW_CURSOR_FIFO64, | |||
| 601 | .max_wm = PINEVIEW_CURSOR_MAX_WM0x3f, | |||
| 602 | .default_wm = PINEVIEW_CURSOR_DFT_WM0, | |||
| 603 | .guard_size = PINEVIEW_CURSOR_GUARD_WM5, | |||
| 604 | .cacheline_size = PINEVIEW_FIFO_LINE_SIZE64, | |||
| 605 | }; | |||
| 606 | ||||
| 607 | static const struct intel_watermark_params i965_cursor_wm_info = { | |||
| 608 | .fifo_size = I965_CURSOR_FIFO64, | |||
| 609 | .max_wm = I965_CURSOR_MAX_WM32, | |||
| 610 | .default_wm = I965_CURSOR_DFT_WM8, | |||
| 611 | .guard_size = 2, | |||
| 612 | .cacheline_size = I915_FIFO_LINE_SIZE64, | |||
| 613 | }; | |||
| 614 | ||||
| 615 | static const struct intel_watermark_params i945_wm_info = { | |||
| 616 | .fifo_size = I945_FIFO_SIZE127, | |||
| 617 | .max_wm = I915_MAX_WM0x3f, | |||
| 618 | .default_wm = 1, | |||
| 619 | .guard_size = 2, | |||
| 620 | .cacheline_size = I915_FIFO_LINE_SIZE64, | |||
| 621 | }; | |||
| 622 | ||||
| 623 | static const struct intel_watermark_params i915_wm_info = { | |||
| 624 | .fifo_size = I915_FIFO_SIZE95, | |||
| 625 | .max_wm = I915_MAX_WM0x3f, | |||
| 626 | .default_wm = 1, | |||
| 627 | .guard_size = 2, | |||
| 628 | .cacheline_size = I915_FIFO_LINE_SIZE64, | |||
| 629 | }; | |||
| 630 | ||||
| 631 | static const struct intel_watermark_params i830_a_wm_info = { | |||
| 632 | .fifo_size = I855GM_FIFO_SIZE127, | |||
| 633 | .max_wm = I915_MAX_WM0x3f, | |||
| 634 | .default_wm = 1, | |||
| 635 | .guard_size = 2, | |||
| 636 | .cacheline_size = I830_FIFO_LINE_SIZE32, | |||
| 637 | }; | |||
| 638 | ||||
| 639 | static const struct intel_watermark_params i830_bc_wm_info = { | |||
| 640 | .fifo_size = I855GM_FIFO_SIZE127, | |||
| 641 | .max_wm = I915_MAX_WM0x3f/2, | |||
| 642 | .default_wm = 1, | |||
| 643 | .guard_size = 2, | |||
| 644 | .cacheline_size = I830_FIFO_LINE_SIZE32, | |||
| 645 | }; | |||
| 646 | ||||
| 647 | static const struct intel_watermark_params i845_wm_info = { | |||
| 648 | .fifo_size = I830_FIFO_SIZE95, | |||
| 649 | .max_wm = I915_MAX_WM0x3f, | |||
| 650 | .default_wm = 1, | |||
| 651 | .guard_size = 2, | |||
| 652 | .cacheline_size = I830_FIFO_LINE_SIZE32, | |||
| 653 | }; | |||
| 654 | ||||
| 655 | /** | |||
| 656 | * intel_wm_method1 - Method 1 / "small buffer" watermark formula | |||
| 657 | * @pixel_rate: Pipe pixel rate in kHz | |||
| 658 | * @cpp: Plane bytes per pixel | |||
| 659 | * @latency: Memory wakeup latency in 0.1us units | |||
| 660 | * | |||
| 661 | * Compute the watermark using the method 1 or "small buffer" | |||
| 662 | * formula. The caller may additonally add extra cachelines | |||
| 663 | * to account for TLB misses and clock crossings. | |||
| 664 | * | |||
| 665 | * This method is concerned with the short term drain rate | |||
| 666 | * of the FIFO, ie. it does not account for blanking periods | |||
| 667 | * which would effectively reduce the average drain rate across | |||
| 668 | * a longer period. The name "small" refers to the fact the | |||
| 669 | * FIFO is relatively small compared to the amount of data | |||
| 670 | * fetched. | |||
| 671 | * | |||
| 672 | * The FIFO level vs. time graph might look something like: | |||
| 673 | * | |||
| 674 | * |\ |\ | |||
| 675 | * | \ | \ | |||
| 676 | * __---__---__ (- plane active, _ blanking) | |||
| 677 | * -> time | |||
| 678 | * | |||
| 679 | * or perhaps like this: | |||
| 680 | * | |||
| 681 | * |\|\ |\|\ | |||
| 682 | * __----__----__ (- plane active, _ blanking) | |||
| 683 | * -> time | |||
| 684 | * | |||
| 685 | * Returns: | |||
| 686 | * The watermark in bytes | |||
| 687 | */ | |||
| 688 | static unsigned int intel_wm_method1(unsigned int pixel_rate, | |||
| 689 | unsigned int cpp, | |||
| 690 | unsigned int latency) | |||
| 691 | { | |||
| 692 | u64 ret; | |||
| 693 | ||||
| 694 | ret = mul_u32_u32(pixel_rate, cpp * latency); | |||
| 695 | ret = DIV_ROUND_UP_ULL(ret, 10000)(((ret) + ((10000) - 1)) / (10000)); | |||
| 696 | ||||
| 697 | return ret; | |||
| 698 | } | |||
| 699 | ||||
| 700 | /** | |||
| 701 | * intel_wm_method2 - Method 2 / "large buffer" watermark formula | |||
| 702 | * @pixel_rate: Pipe pixel rate in kHz | |||
| 703 | * @htotal: Pipe horizontal total | |||
| 704 | * @width: Plane width in pixels | |||
| 705 | * @cpp: Plane bytes per pixel | |||
| 706 | * @latency: Memory wakeup latency in 0.1us units | |||
| 707 | * | |||
| 708 | * Compute the watermark using the method 2 or "large buffer" | |||
| 709 | * formula. The caller may additonally add extra cachelines | |||
| 710 | * to account for TLB misses and clock crossings. | |||
| 711 | * | |||
| 712 | * This method is concerned with the long term drain rate | |||
| 713 | * of the FIFO, ie. it does account for blanking periods | |||
| 714 | * which effectively reduce the average drain rate across | |||
| 715 | * a longer period. The name "large" refers to the fact the | |||
| 716 | * FIFO is relatively large compared to the amount of data | |||
| 717 | * fetched. | |||
| 718 | * | |||
| 719 | * The FIFO level vs. time graph might look something like: | |||
| 720 | * | |||
| 721 | * |\___ |\___ | |||
| 722 | * | \___ | \___ | |||
| 723 | * | \ | \ | |||
| 724 | * __ --__--__--__--__--__--__ (- plane active, _ blanking) | |||
| 725 | * -> time | |||
| 726 | * | |||
| 727 | * Returns: | |||
| 728 | * The watermark in bytes | |||
| 729 | */ | |||
| 730 | static unsigned int intel_wm_method2(unsigned int pixel_rate, | |||
| 731 | unsigned int htotal, | |||
| 732 | unsigned int width, | |||
| 733 | unsigned int cpp, | |||
| 734 | unsigned int latency) | |||
| 735 | { | |||
| 736 | unsigned int ret; | |||
| 737 | ||||
| 738 | /* | |||
| 739 | * FIXME remove once all users are computing | |||
| 740 | * watermarks in the correct place. | |||
| 741 | */ | |||
| 742 | if (WARN_ON_ONCE(htotal == 0)({ static int __warned; int __ret = !!((htotal == 0)); if (__ret && !__warned) { printf("%s", "WARN_ON_ONCE(" "htotal == 0" ")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })) | |||
| 743 | htotal = 1; | |||
| 744 | ||||
| 745 | ret = (latency * pixel_rate) / (htotal * 10000); | |||
| 746 | ret = (ret + 1) * width * cpp; | |||
| 747 | ||||
| 748 | return ret; | |||
| 749 | } | |||
| 750 | ||||
| 751 | /** | |||
| 752 | * intel_calculate_wm - calculate watermark level | |||
| 753 | * @pixel_rate: pixel clock | |||
| 754 | * @wm: chip FIFO params | |||
| 755 | * @fifo_size: size of the FIFO buffer | |||
| 756 | * @cpp: bytes per pixel | |||
| 757 | * @latency_ns: memory latency for the platform | |||
| 758 | * | |||
| 759 | * Calculate the watermark level (the level at which the display plane will | |||
| 760 | * start fetching from memory again). Each chip has a different display | |||
| 761 | * FIFO size and allocation, so the caller needs to figure that out and pass | |||
| 762 | * in the correct intel_watermark_params structure. | |||
| 763 | * | |||
| 764 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | |||
| 765 | * on the pixel size. When it reaches the watermark level, it'll start | |||
| 766 | * fetching FIFO line sized based chunks from memory until the FIFO fills | |||
| 767 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | |||
| 768 | * will occur, and a display engine hang could result. | |||
| 769 | */ | |||
| 770 | static unsigned int intel_calculate_wm(int pixel_rate, | |||
| 771 | const struct intel_watermark_params *wm, | |||
| 772 | int fifo_size, int cpp, | |||
| 773 | unsigned int latency_ns) | |||
| 774 | { | |||
| 775 | int entries, wm_size; | |||
| 776 | ||||
| 777 | /* | |||
| 778 | * Note: we need to make sure we don't overflow for various clock & | |||
| 779 | * latency values. | |||
| 780 | * clocks go from a few thousand to several hundred thousand. | |||
| 781 | * latency is usually a few thousand | |||
| 782 | */ | |||
| 783 | entries = intel_wm_method1(pixel_rate, cpp, | |||
| 784 | latency_ns / 100); | |||
| 785 | entries = DIV_ROUND_UP(entries, wm->cacheline_size)(((entries) + ((wm->cacheline_size) - 1)) / (wm->cacheline_size )) + | |||
| 786 | wm->guard_size; | |||
| 787 | DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries)__drm_dbg(DRM_UT_KMS, "FIFO entries required for mode: %d\n", entries); | |||
| 788 | ||||
| 789 | wm_size = fifo_size - entries; | |||
| 790 | DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size)__drm_dbg(DRM_UT_KMS, "FIFO watermark level: %d\n", wm_size); | |||
| 791 | ||||
| 792 | /* Don't promote wm_size to unsigned... */ | |||
| 793 | if (wm_size > wm->max_wm) | |||
| 794 | wm_size = wm->max_wm; | |||
| 795 | if (wm_size <= 0) | |||
| 796 | wm_size = wm->default_wm; | |||
| 797 | ||||
| 798 | /* | |||
| 799 | * Bspec seems to indicate that the value shouldn't be lower than | |||
| 800 | * 'burst size + 1'. Certainly 830 is quite unhappy with low values. | |||
| 801 | * Lets go for 8 which is the burst size since certain platforms | |||
| 802 | * already use a hardcoded 8 (which is what the spec says should be | |||
| 803 | * done). | |||
| 804 | */ | |||
| 805 | if (wm_size <= 8) | |||
| 806 | wm_size = 8; | |||
| 807 | ||||
| 808 | return wm_size; | |||
| 809 | } | |||
| 810 | ||||
| 811 | static bool_Bool is_disabling(int old, int new, int threshold) | |||
| 812 | { | |||
| 813 | return old >= threshold && new < threshold; | |||
| 814 | } | |||
| 815 | ||||
| 816 | static bool_Bool is_enabling(int old, int new, int threshold) | |||
| 817 | { | |||
| 818 | return old < threshold && new >= threshold; | |||
| 819 | } | |||
| 820 | ||||
| 821 | static int intel_wm_num_levels(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 822 | { | |||
| 823 | return dev_priv->wm.max_level + 1; | |||
| 824 | } | |||
| 825 | ||||
| 826 | static bool_Bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, | |||
| 827 | const struct intel_plane_state *plane_state) | |||
| 828 | { | |||
| 829 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 830 | ||||
| 831 | /* FIXME check the 'enable' instead */ | |||
| 832 | if (!crtc_state->hw.active) | |||
| 833 | return false0; | |||
| 834 | ||||
| 835 | /* | |||
| 836 | * Treat cursor with fb as always visible since cursor updates | |||
| 837 | * can happen faster than the vrefresh rate, and the current | |||
| 838 | * watermark code doesn't handle that correctly. Cursor updates | |||
| 839 | * which set/clear the fb or change the cursor size are going | |||
| 840 | * to get throttled by intel_legacy_cursor_update() to work | |||
| 841 | * around this problem with the watermark code. | |||
| 842 | */ | |||
| 843 | if (plane->id == PLANE_CURSOR) | |||
| 844 | return plane_state->hw.fb != NULL((void *)0); | |||
| 845 | else | |||
| 846 | return plane_state->uapi.visible; | |||
| 847 | } | |||
| 848 | ||||
| 849 | static bool_Bool intel_crtc_active(struct intel_crtc *crtc) | |||
| 850 | { | |||
| 851 | /* Be paranoid as we can arrive here with only partial | |||
| 852 | * state retrieved from the hardware during setup. | |||
| 853 | * | |||
| 854 | * We can ditch the adjusted_mode.crtc_clock check as soon | |||
| 855 | * as Haswell has gained clock readout/fastboot support. | |||
| 856 | * | |||
| 857 | * We can ditch the crtc->primary->state->fb check as soon as we can | |||
| 858 | * properly reconstruct framebuffers. | |||
| 859 | * | |||
| 860 | * FIXME: The intel_crtc->active here should be switched to | |||
| 861 | * crtc->state->active once we have proper CRTC states wired up | |||
| 862 | * for atomic. | |||
| 863 | */ | |||
| 864 | return crtc->active && crtc->base.primary->state->fb && | |||
| 865 | crtc->config->hw.adjusted_mode.crtc_clock; | |||
| 866 | } | |||
| 867 | ||||
| 868 | static struct intel_crtc *single_enabled_crtc(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 869 | { | |||
| 870 | struct intel_crtc *crtc, *enabled = NULL((void *)0); | |||
| 871 | ||||
| 872 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 873 | if (intel_crtc_active(crtc)) { | |||
| 874 | if (enabled) | |||
| 875 | return NULL((void *)0); | |||
| 876 | enabled = crtc; | |||
| 877 | } | |||
| 878 | } | |||
| 879 | ||||
| 880 | return enabled; | |||
| 881 | } | |||
| 882 | ||||
| 883 | static void pnv_update_wm(struct intel_crtc *unused_crtc) | |||
| 884 | { | |||
| 885 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(unused_crtc->base.dev); | |||
| 886 | struct intel_crtc *crtc; | |||
| 887 | const struct cxsr_latency *latency; | |||
| 888 | u32 reg; | |||
| 889 | unsigned int wm; | |||
| 890 | ||||
| 891 | latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv)((&(dev_priv)->__info)->is_mobile), | |||
| 892 | dev_priv->is_ddr3, | |||
| 893 | dev_priv->fsb_freq, | |||
| 894 | dev_priv->mem_freq); | |||
| 895 | if (!latency) { | |||
| 896 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Unknown FSB/MEM found, disable CxSR\n" ) | |||
| 897 | "Unknown FSB/MEM found, disable CxSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Unknown FSB/MEM found, disable CxSR\n" ); | |||
| 898 | intel_set_memory_cxsr(dev_priv, false0); | |||
| 899 | return; | |||
| 900 | } | |||
| 901 | ||||
| 902 | crtc = single_enabled_crtc(dev_priv); | |||
| 903 | if (crtc) { | |||
| 904 | const struct drm_display_mode *adjusted_mode = | |||
| 905 | &crtc->config->hw.adjusted_mode; | |||
| 906 | const struct drm_framebuffer *fb = | |||
| 907 | crtc->base.primary->state->fb; | |||
| 908 | int cpp = fb->format->cpp[0]; | |||
| 909 | int clock = adjusted_mode->crtc_clock; | |||
| 910 | ||||
| 911 | /* Display SR */ | |||
| 912 | wm = intel_calculate_wm(clock, &pnv_display_wm, | |||
| 913 | pnv_display_wm.fifo_size, | |||
| 914 | cpp, latency->display_sr); | |||
| 915 | reg = I915_READ(DSPFW1)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) }))); | |||
| 916 | reg &= ~DSPFW_SR_MASK(0x1ff << 23); | |||
| 917 | reg |= FW_WM(wm, SR); | |||
| 918 | I915_WRITE(DSPFW1, reg)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (reg)); | |||
| 919 | drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "DSPFW1 register is %x\n" , reg); | |||
| 920 | ||||
| 921 | /* cursor SR */ | |||
| 922 | wm = intel_calculate_wm(clock, &pnv_cursor_wm, | |||
| 923 | pnv_display_wm.fifo_size, | |||
| 924 | 4, latency->cursor_sr); | |||
| 925 | reg = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 926 | reg &= ~DSPFW_CURSOR_SR_MASK(0x3f << 24); | |||
| 927 | reg |= FW_WM(wm, CURSOR_SR); | |||
| 928 | I915_WRITE(DSPFW3, reg)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (reg)); | |||
| 929 | ||||
| 930 | /* Display HPLL off SR */ | |||
| 931 | wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, | |||
| 932 | pnv_display_hplloff_wm.fifo_size, | |||
| 933 | cpp, latency->display_hpll_disable); | |||
| 934 | reg = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 935 | reg &= ~DSPFW_HPLL_SR_MASK(0x1ff << 0); | |||
| 936 | reg |= FW_WM(wm, HPLL_SR); | |||
| 937 | I915_WRITE(DSPFW3, reg)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (reg)); | |||
| 938 | ||||
| 939 | /* cursor HPLL off SR */ | |||
| 940 | wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, | |||
| 941 | pnv_display_hplloff_wm.fifo_size, | |||
| 942 | 4, latency->cursor_hpll_disable); | |||
| 943 | reg = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 944 | reg &= ~DSPFW_HPLL_CURSOR_MASK(0x3f << 16); | |||
| 945 | reg |= FW_WM(wm, HPLL_CURSOR); | |||
| 946 | I915_WRITE(DSPFW3, reg)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (reg)); | |||
| 947 | drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "DSPFW3 register is %x\n" , reg); | |||
| 948 | ||||
| 949 | intel_set_memory_cxsr(dev_priv, true1); | |||
| 950 | } else { | |||
| 951 | intel_set_memory_cxsr(dev_priv, false0); | |||
| 952 | } | |||
| 953 | } | |||
| 954 | ||||
| 955 | /* | |||
| 956 | * Documentation says: | |||
| 957 | * "If the line size is small, the TLB fetches can get in the way of the | |||
| 958 | * data fetches, causing some lag in the pixel data return which is not | |||
| 959 | * accounted for in the above formulas. The following adjustment only | |||
| 960 | * needs to be applied if eight whole lines fit in the buffer at once. | |||
| 961 | * The WM is adjusted upwards by the difference between the FIFO size | |||
| 962 | * and the size of 8 whole lines. This adjustment is always performed | |||
| 963 | * in the actual pixel depth regardless of whether FBC is enabled or not." | |||
| 964 | */ | |||
| 965 | static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) | |||
| 966 | { | |||
| 967 | int tlb_miss = fifo_size * 64 - width * cpp * 8; | |||
| 968 | ||||
| 969 | return max(0, tlb_miss)(((0)>(tlb_miss))?(0):(tlb_miss)); | |||
| 970 | } | |||
| 971 | ||||
| 972 | static void g4x_write_wm_values(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 973 | const struct g4x_wm_values *wm) | |||
| 974 | { | |||
| 975 | enum pipe pipe; | |||
| 976 | ||||
| 977 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else | |||
| 978 | trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); | |||
| 979 | ||||
| 980 | I915_WRITE(DSPFW1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe [PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A ].plane[PLANE_PRIMARY], PLANEA))) | |||
| 981 | FW_WM(wm->sr.plane, SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe [PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A ].plane[PLANE_PRIMARY], PLANEA))) | |||
| 982 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe [PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A ].plane[PLANE_PRIMARY], PLANEA))) | |||
| 983 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe [PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A ].plane[PLANE_PRIMARY], PLANEA))) | |||
| 984 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe [PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A ].plane[PLANE_PRIMARY], PLANEA))); | |||
| 985 | I915_WRITE(DSPFW2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 986 | (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 987 | FW_WM(wm->sr.fbc, FBC_SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 988 | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 989 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 990 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))) | |||
| 991 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), ((wm->fbc_en ? (1 << 31) : 0) | FW_WM (wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM (wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))); | |||
| 992 | I915_WRITE(DSPFW3,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), ((wm->hpll_en ? (1 << 31) : 0) | FW_WM (wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR ) | FW_WM(wm->hpll.plane, HPLL_SR))) | |||
| 993 | (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), ((wm->hpll_en ? (1 << 31) : 0) | FW_WM (wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR ) | FW_WM(wm->hpll.plane, HPLL_SR))) | |||
| 994 | FW_WM(wm->sr.cursor, CURSOR_SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), ((wm->hpll_en ? (1 << 31) : 0) | FW_WM (wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR ) | FW_WM(wm->hpll.plane, HPLL_SR))) | |||
| 995 | FW_WM(wm->hpll.cursor, HPLL_CURSOR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), ((wm->hpll_en ? (1 << 31) : 0) | FW_WM (wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR ) | FW_WM(wm->hpll.plane, HPLL_SR))) | |||
| 996 | FW_WM(wm->hpll.plane, HPLL_SR))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), ((wm->hpll_en ? (1 << 31) : 0) | FW_WM (wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR ) | FW_WM(wm->hpll.plane, HPLL_SR))); | |||
| 997 | ||||
| 998 | POSTING_READ(DSPFW1)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> display_mmio_offset) + 0x70034) })))); | |||
| 999 | } | |||
| 1000 | ||||
| 1001 | #define FW_WM_VLV(value, plane) \ | |||
| 1002 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) | |||
| 1003 | ||||
| 1004 | static void vlv_write_wm_values(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1005 | const struct vlv_wm_values *wm) | |||
| 1006 | { | |||
| 1007 | enum pipe pipe; | |||
| 1008 | ||||
| 1009 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 1010 | trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); | |||
| 1011 | ||||
| 1012 | I915_WRITE(VLV_DDL(pipe),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) })), ((wm->ddl [pipe].plane[PLANE_CURSOR] << 24) | (wm->ddl[pipe].plane [PLANE_SPRITE1] << (8 + 8 * (1))) | (wm->ddl[pipe].plane [PLANE_SPRITE0] << (8 + 8 * (0))) | (wm->ddl[pipe].plane [PLANE_PRIMARY] << 0))) | |||
| 1013 | (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) })), ((wm->ddl [pipe].plane[PLANE_CURSOR] << 24) | (wm->ddl[pipe].plane [PLANE_SPRITE1] << (8 + 8 * (1))) | (wm->ddl[pipe].plane [PLANE_SPRITE0] << (8 + 8 * (0))) | (wm->ddl[pipe].plane [PLANE_PRIMARY] << 0))) | |||
| 1014 | (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) })), ((wm->ddl [pipe].plane[PLANE_CURSOR] << 24) | (wm->ddl[pipe].plane [PLANE_SPRITE1] << (8 + 8 * (1))) | (wm->ddl[pipe].plane [PLANE_SPRITE0] << (8 + 8 * (0))) | (wm->ddl[pipe].plane [PLANE_PRIMARY] << 0))) | |||
| 1015 | (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) })), ((wm->ddl [pipe].plane[PLANE_CURSOR] << 24) | (wm->ddl[pipe].plane [PLANE_SPRITE1] << (8 + 8 * (1))) | (wm->ddl[pipe].plane [PLANE_SPRITE0] << (8 + 8 * (0))) | (wm->ddl[pipe].plane [PLANE_PRIMARY] << 0))) | |||
| 1016 | (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) })), ((wm->ddl [pipe].plane[PLANE_CURSOR] << 24) | (wm->ddl[pipe].plane [PLANE_SPRITE1] << (8 + 8 * (1))) | (wm->ddl[pipe].plane [PLANE_SPRITE0] << (8 + 8 * (0))) | (wm->ddl[pipe].plane [PLANE_PRIMARY] << 0))); | |||
| 1017 | } | |||
| 1018 | ||||
| 1019 | /* | |||
| 1020 | * Zero the (unused) WM1 watermarks, and also clear all the | |||
| 1021 | * high order bits so that there are no out of bounds values | |||
| 1022 | * present in the registers during the reprogramming. | |||
| 1023 | */ | |||
| 1024 | I915_WRITE(DSPHOWM, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (0)); | |||
| 1025 | I915_WRITE(DSPHOWM1, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70068) })), (0)); | |||
| 1026 | I915_WRITE(DSPFW4, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70070) })), (0)); | |||
| 1027 | I915_WRITE(DSPFW5, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70074) })), (0)); | |||
| 1028 | I915_WRITE(DSPFW6, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70078) })), (0)); | |||
| 1029 | ||||
| 1030 | I915_WRITE(DSPFW1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm-> pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm-> pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))) | |||
| 1031 | FW_WM(wm->sr.plane, SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm-> pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm-> pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))) | |||
| 1032 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm-> pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm-> pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))) | |||
| 1033 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm-> pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm-> pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))) | |||
| 1034 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(wm->sr.plane, SR) | FW_WM(wm-> pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm-> pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm-> pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA))); | |||
| 1035 | I915_WRITE(DSPFW2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA ) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA ))) | |||
| 1036 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA ) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA ))) | |||
| 1037 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA ) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA ))) | |||
| 1038 | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA ) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA ))); | |||
| 1039 | I915_WRITE(DSPFW3,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (FW_WM(wm->sr.cursor, CURSOR_SR))) | |||
| 1040 | FW_WM(wm->sr.cursor, CURSOR_SR))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (FW_WM(wm->sr.cursor, CURSOR_SR))); | |||
| 1041 | ||||
| 1042 | if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 1043 | I915_WRITE(DSPFW7_CHV,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b4) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))) | |||
| 1044 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b4) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))) | |||
| 1045 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b4) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))); | |||
| 1046 | I915_WRITE(DSPFW8_CHV,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b8) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE1], SPRITEF) | FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE0], SPRITEE))) | |||
| 1047 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b8) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE1], SPRITEF) | FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE0], SPRITEE))) | |||
| 1048 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b8) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE1], SPRITEF) | FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_SPRITE0], SPRITEE))); | |||
| 1049 | I915_WRITE(DSPFW9_CHV,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_PRIMARY], PLANEC) | FW_WM(wm->pipe[PIPE_C].plane [PLANE_CURSOR], CURSORC))) | |||
| 1050 | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_PRIMARY], PLANEC) | FW_WM(wm->pipe[PIPE_C].plane [PLANE_CURSOR], CURSORC))) | |||
| 1051 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_C ].plane[PLANE_PRIMARY], PLANEC) | FW_WM(wm->pipe[PIPE_C].plane [PLANE_CURSOR], CURSORC))); | |||
| 1052 | I915_WRITE(DSPHOWM,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1053 | FW_WM(wm->sr.plane >> 9, SR_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1054 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1055 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1056 | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1057 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1058 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1059 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1060 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1061 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1062 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0 ] >> 8, SPRITEE_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY ] >> 8, PLANEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1 ] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))); | |||
| 1063 | } else { | |||
| 1064 | I915_WRITE(DSPFW7,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))) | |||
| 1065 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))) | |||
| 1066 | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) })), (FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B ].plane[PLANE_SPRITE0], SPRITEC))); | |||
| 1067 | I915_WRITE(DSPHOWM,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1068 | FW_WM(wm->sr.plane >> 9, SR_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1069 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1070 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1071 | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1072 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1073 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))) | |||
| 1074 | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) })), (FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0 ] >> 8, SPRITEC_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY ] >> 8, PLANEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1 ] >> 8, SPRITEB_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0 ] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY ] >> 8, PLANEA_HI))); | |||
| 1075 | } | |||
| 1076 | ||||
| 1077 | POSTING_READ(DSPFW1)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> display_mmio_offset) + 0x70034) })))); | |||
| 1078 | } | |||
| 1079 | ||||
| 1080 | #undef FW_WM_VLV | |||
| 1081 | ||||
| 1082 | static void g4x_setup_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1083 | { | |||
| 1084 | /* all latencies in usec */ | |||
| 1085 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; | |||
| 1086 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; | |||
| 1087 | dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; | |||
| 1088 | ||||
| 1089 | dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; | |||
| 1090 | } | |||
| 1091 | ||||
| 1092 | static int g4x_plane_fifo_size(enum plane_id plane_id, int level) | |||
| 1093 | { | |||
| 1094 | /* | |||
| 1095 | * DSPCNTR[13] supposedly controls whether the | |||
| 1096 | * primary plane can use the FIFO space otherwise | |||
| 1097 | * reserved for the sprite plane. It's not 100% clear | |||
| 1098 | * what the actual FIFO size is, but it looks like we | |||
| 1099 | * can happily set both primary and sprite watermarks | |||
| 1100 | * up to 127 cachelines. So that would seem to mean | |||
| 1101 | * that either DSPCNTR[13] doesn't do anything, or that | |||
| 1102 | * the total FIFO is >= 256 cachelines in size. Either | |||
| 1103 | * way, we don't seem to have to worry about this | |||
| 1104 | * repartitioning as the maximum watermark value the | |||
| 1105 | * register can hold for each plane is lower than the | |||
| 1106 | * minimum FIFO size. | |||
| 1107 | */ | |||
| 1108 | switch (plane_id) { | |||
| 1109 | case PLANE_CURSOR: | |||
| 1110 | return 63; | |||
| 1111 | case PLANE_PRIMARY: | |||
| 1112 | return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; | |||
| 1113 | case PLANE_SPRITE0: | |||
| 1114 | return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; | |||
| 1115 | default: | |||
| 1116 | MISSING_CASE(plane_id)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "plane_id", (long)(plane_id)); __builtin_expect(!!(__ret), 0 ); }); | |||
| 1117 | return 0; | |||
| 1118 | } | |||
| 1119 | } | |||
| 1120 | ||||
| 1121 | static int g4x_fbc_fifo_size(int level) | |||
| 1122 | { | |||
| 1123 | switch (level) { | |||
| 1124 | case G4X_WM_LEVEL_SR: | |||
| 1125 | return 7; | |||
| 1126 | case G4X_WM_LEVEL_HPLL: | |||
| 1127 | return 15; | |||
| 1128 | default: | |||
| 1129 | MISSING_CASE(level)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "level", (long)(level)); __builtin_expect(!!(__ret), 0); }); | |||
| 1130 | return 0; | |||
| 1131 | } | |||
| 1132 | } | |||
| 1133 | ||||
| 1134 | static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, | |||
| 1135 | const struct intel_plane_state *plane_state, | |||
| 1136 | int level) | |||
| 1137 | { | |||
| 1138 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 1139 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev); | |||
| 1140 | const struct drm_display_mode *adjusted_mode = | |||
| 1141 | &crtc_state->hw.adjusted_mode; | |||
| 1142 | unsigned int latency = dev_priv->wm.pri_latency[level] * 10; | |||
| 1143 | unsigned int clock, htotal, cpp, width, wm; | |||
| 1144 | ||||
| 1145 | if (latency == 0) | |||
| 1146 | return USHRT_MAX0xffff; | |||
| 1147 | ||||
| 1148 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 1149 | return 0; | |||
| 1150 | ||||
| 1151 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 1152 | ||||
| 1153 | /* | |||
| 1154 | * Not 100% sure which way ELK should go here as the | |||
| 1155 | * spec only says CL/CTG should assume 32bpp and BW | |||
| 1156 | * doesn't need to. But as these things followed the | |||
| 1157 | * mobile vs. desktop lines on gen3 as well, let's | |||
| 1158 | * assume ELK doesn't need this. | |||
| 1159 | * | |||
| 1160 | * The spec also fails to list such a restriction for | |||
| 1161 | * the HPLL watermark, which seems a little strange. | |||
| 1162 | * Let's use 32bpp for the HPLL watermark as well. | |||
| 1163 | */ | |||
| 1164 | if (IS_GM45(dev_priv)IS_PLATFORM(dev_priv, INTEL_GM45) && plane->id == PLANE_PRIMARY && | |||
| 1165 | level != G4X_WM_LEVEL_NORMAL) | |||
| 1166 | cpp = max(cpp, 4u)(((cpp)>(4u))?(cpp):(4u)); | |||
| 1167 | ||||
| 1168 | clock = adjusted_mode->crtc_clock; | |||
| 1169 | htotal = adjusted_mode->crtc_htotal; | |||
| 1170 | ||||
| 1171 | width = drm_rect_width(&plane_state->uapi.dst); | |||
| 1172 | ||||
| 1173 | if (plane->id == PLANE_CURSOR) { | |||
| 1174 | wm = intel_wm_method2(clock, htotal, width, cpp, latency); | |||
| 1175 | } else if (plane->id == PLANE_PRIMARY && | |||
| 1176 | level == G4X_WM_LEVEL_NORMAL) { | |||
| 1177 | wm = intel_wm_method1(clock, cpp, latency); | |||
| 1178 | } else { | |||
| 1179 | unsigned int small, large; | |||
| 1180 | ||||
| 1181 | small = intel_wm_method1(clock, cpp, latency); | |||
| 1182 | large = intel_wm_method2(clock, htotal, width, cpp, latency); | |||
| 1183 | ||||
| 1184 | wm = min(small, large)(((small)<(large))?(small):(large)); | |||
| 1185 | } | |||
| 1186 | ||||
| 1187 | wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), | |||
| 1188 | width, cpp); | |||
| 1189 | ||||
| 1190 | wm = DIV_ROUND_UP(wm, 64)(((wm) + ((64) - 1)) / (64)) + 2; | |||
| 1191 | ||||
| 1192 | return min_t(unsigned int, wm, USHRT_MAX)({ unsigned int __min_a = (wm); unsigned int __min_b = (0xffff ); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 1193 | } | |||
| 1194 | ||||
| 1195 | static bool_Bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, | |||
| 1196 | int level, enum plane_id plane_id, u16 value) | |||
| 1197 | { | |||
| 1198 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1199 | bool_Bool dirty = false0; | |||
| 1200 | ||||
| 1201 | for (; level < intel_wm_num_levels(dev_priv); level++) { | |||
| 1202 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1203 | ||||
| 1204 | dirty |= raw->plane[plane_id] != value; | |||
| 1205 | raw->plane[plane_id] = value; | |||
| 1206 | } | |||
| 1207 | ||||
| 1208 | return dirty; | |||
| 1209 | } | |||
| 1210 | ||||
| 1211 | static bool_Bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, | |||
| 1212 | int level, u16 value) | |||
| 1213 | { | |||
| 1214 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1215 | bool_Bool dirty = false0; | |||
| 1216 | ||||
| 1217 | /* NORMAL level doesn't have an FBC watermark */ | |||
| 1218 | level = max(level, G4X_WM_LEVEL_SR)(((level)>(G4X_WM_LEVEL_SR))?(level):(G4X_WM_LEVEL_SR)); | |||
| 1219 | ||||
| 1220 | for (; level < intel_wm_num_levels(dev_priv); level++) { | |||
| 1221 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1222 | ||||
| 1223 | dirty |= raw->fbc != value; | |||
| 1224 | raw->fbc = value; | |||
| 1225 | } | |||
| 1226 | ||||
| 1227 | return dirty; | |||
| 1228 | } | |||
| 1229 | ||||
| 1230 | static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, | |||
| 1231 | const struct intel_plane_state *plane_state, | |||
| 1232 | u32 pri_val); | |||
| 1233 | ||||
| 1234 | static bool_Bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, | |||
| 1235 | const struct intel_plane_state *plane_state) | |||
| 1236 | { | |||
| 1237 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 1238 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1239 | int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); | |||
| 1240 | enum plane_id plane_id = plane->id; | |||
| 1241 | bool_Bool dirty = false0; | |||
| 1242 | int level; | |||
| 1243 | ||||
| 1244 | if (!intel_wm_plane_visible(crtc_state, plane_state)) { | |||
| 1245 | dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); | |||
| 1246 | if (plane_id == PLANE_PRIMARY) | |||
| 1247 | dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); | |||
| 1248 | goto out; | |||
| 1249 | } | |||
| 1250 | ||||
| 1251 | for (level = 0; level < num_levels; level++) { | |||
| 1252 | struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1253 | int wm, max_wm; | |||
| 1254 | ||||
| 1255 | wm = g4x_compute_wm(crtc_state, plane_state, level); | |||
| 1256 | max_wm = g4x_plane_fifo_size(plane_id, level); | |||
| 1257 | ||||
| 1258 | if (wm > max_wm) | |||
| 1259 | break; | |||
| 1260 | ||||
| 1261 | dirty |= raw->plane[plane_id] != wm; | |||
| 1262 | raw->plane[plane_id] = wm; | |||
| 1263 | ||||
| 1264 | if (plane_id != PLANE_PRIMARY || | |||
| 1265 | level == G4X_WM_LEVEL_NORMAL) | |||
| 1266 | continue; | |||
| 1267 | ||||
| 1268 | wm = ilk_compute_fbc_wm(crtc_state, plane_state, | |||
| 1269 | raw->plane[plane_id]); | |||
| 1270 | max_wm = g4x_fbc_fifo_size(level); | |||
| 1271 | ||||
| 1272 | /* | |||
| 1273 | * FBC wm is not mandatory as we | |||
| 1274 | * can always just disable its use. | |||
| 1275 | */ | |||
| 1276 | if (wm > max_wm) | |||
| 1277 | wm = USHRT_MAX0xffff; | |||
| 1278 | ||||
| 1279 | dirty |= raw->fbc != wm; | |||
| 1280 | raw->fbc = wm; | |||
| 1281 | } | |||
| 1282 | ||||
| 1283 | /* mark watermarks as invalid */ | |||
| 1284 | dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX0xffff); | |||
| 1285 | ||||
| 1286 | if (plane_id == PLANE_PRIMARY) | |||
| 1287 | dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX0xffff); | |||
| 1288 | ||||
| 1289 | out: | |||
| 1290 | if (dirty) { | |||
| 1291 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]) | |||
| 1292 | "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]) | |||
| 1293 | plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]) | |||
| 1294 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]) | |||
| 1295 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]) | |||
| 1296 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: normal=%d, SR=%d, HPLL=%d\n" , plane->base.name, crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL ].plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR] .plane[plane_id], crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL ].plane[plane_id]); | |||
| 1297 | ||||
| 1298 | if (plane_id == PLANE_PRIMARY) | |||
| 1299 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FBC watermarks: SR=%d, HPLL=%d\n" , crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state-> wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc) | |||
| 1300 | "FBC watermarks: SR=%d, HPLL=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FBC watermarks: SR=%d, HPLL=%d\n" , crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state-> wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc) | |||
| 1301 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FBC watermarks: SR=%d, HPLL=%d\n" , crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state-> wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc) | |||
| 1302 | crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FBC watermarks: SR=%d, HPLL=%d\n" , crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, crtc_state-> wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); | |||
| 1303 | } | |||
| 1304 | ||||
| 1305 | return dirty; | |||
| 1306 | } | |||
| 1307 | ||||
| 1308 | static bool_Bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, | |||
| 1309 | enum plane_id plane_id, int level) | |||
| 1310 | { | |||
| 1311 | const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1312 | ||||
| 1313 | return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); | |||
| 1314 | } | |||
| 1315 | ||||
| 1316 | static bool_Bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, | |||
| 1317 | int level) | |||
| 1318 | { | |||
| 1319 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1320 | ||||
| 1321 | if (level > dev_priv->wm.max_level) | |||
| 1322 | return false0; | |||
| 1323 | ||||
| 1324 | return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && | |||
| 1325 | g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && | |||
| 1326 | g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); | |||
| 1327 | } | |||
| 1328 | ||||
| 1329 | /* mark all levels starting from 'level' as invalid */ | |||
| 1330 | static void g4x_invalidate_wms(struct intel_crtc *crtc, | |||
| 1331 | struct g4x_wm_state *wm_state, int level) | |||
| 1332 | { | |||
| 1333 | if (level <= G4X_WM_LEVEL_NORMAL) { | |||
| 1334 | enum plane_id plane_id; | |||
| 1335 | ||||
| 1336 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 1337 | wm_state->wm.plane[plane_id] = USHRT_MAX0xffff; | |||
| 1338 | } | |||
| 1339 | ||||
| 1340 | if (level <= G4X_WM_LEVEL_SR) { | |||
| 1341 | wm_state->cxsr = false0; | |||
| 1342 | wm_state->sr.cursor = USHRT_MAX0xffff; | |||
| 1343 | wm_state->sr.plane = USHRT_MAX0xffff; | |||
| 1344 | wm_state->sr.fbc = USHRT_MAX0xffff; | |||
| 1345 | } | |||
| 1346 | ||||
| 1347 | if (level <= G4X_WM_LEVEL_HPLL) { | |||
| 1348 | wm_state->hpll_en = false0; | |||
| 1349 | wm_state->hpll.cursor = USHRT_MAX0xffff; | |||
| 1350 | wm_state->hpll.plane = USHRT_MAX0xffff; | |||
| 1351 | wm_state->hpll.fbc = USHRT_MAX0xffff; | |||
| 1352 | } | |||
| 1353 | } | |||
| 1354 | ||||
| 1355 | static bool_Bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, | |||
| 1356 | int level) | |||
| 1357 | { | |||
| 1358 | if (level < G4X_WM_LEVEL_SR) | |||
| 1359 | return false0; | |||
| 1360 | ||||
| 1361 | if (level >= G4X_WM_LEVEL_SR && | |||
| 1362 | wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) | |||
| 1363 | return false0; | |||
| 1364 | ||||
| 1365 | if (level >= G4X_WM_LEVEL_HPLL && | |||
| 1366 | wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) | |||
| 1367 | return false0; | |||
| 1368 | ||||
| 1369 | return true1; | |||
| 1370 | } | |||
| 1371 | ||||
| 1372 | static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) | |||
| 1373 | { | |||
| 1374 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 1375 | struct intel_atomic_state *state = | |||
| 1376 | to_intel_atomic_state(crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 1377 | struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; | |||
| 1378 | int num_active_planes = hweight8(crtc_state->active_planes & | |||
| 1379 | ~BIT(PLANE_CURSOR)(1UL << (PLANE_CURSOR))); | |||
| 1380 | const struct g4x_pipe_wm *raw; | |||
| 1381 | const struct intel_plane_state *old_plane_state; | |||
| 1382 | const struct intel_plane_state *new_plane_state; | |||
| 1383 | struct intel_plane *plane; | |||
| 1384 | enum plane_id plane_id; | |||
| 1385 | int i, level; | |||
| 1386 | unsigned int dirty = 0; | |||
| 1387 | ||||
| 1388 | for_each_oldnew_intel_plane_in_state(state, plane,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else | |||
| 1389 | old_plane_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else | |||
| 1390 | new_plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else { | |||
| 1391 | if (new_plane_state->hw.crtc != &crtc->base && | |||
| 1392 | old_plane_state->hw.crtc != &crtc->base) | |||
| 1393 | continue; | |||
| 1394 | ||||
| 1395 | if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) | |||
| 1396 | dirty |= BIT(plane->id)(1UL << (plane->id)); | |||
| 1397 | } | |||
| 1398 | ||||
| 1399 | if (!dirty) | |||
| 1400 | return 0; | |||
| 1401 | ||||
| 1402 | level = G4X_WM_LEVEL_NORMAL; | |||
| 1403 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |||
| 1404 | goto out; | |||
| 1405 | ||||
| 1406 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1407 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 1408 | wm_state->wm.plane[plane_id] = raw->plane[plane_id]; | |||
| 1409 | ||||
| 1410 | level = G4X_WM_LEVEL_SR; | |||
| 1411 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |||
| 1412 | goto out; | |||
| 1413 | ||||
| 1414 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1415 | wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; | |||
| 1416 | wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; | |||
| 1417 | wm_state->sr.fbc = raw->fbc; | |||
| 1418 | ||||
| 1419 | wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY)(1UL << (PLANE_PRIMARY)); | |||
| 1420 | ||||
| 1421 | level = G4X_WM_LEVEL_HPLL; | |||
| 1422 | if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) | |||
| 1423 | goto out; | |||
| 1424 | ||||
| 1425 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 1426 | wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; | |||
| 1427 | wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; | |||
| 1428 | wm_state->hpll.fbc = raw->fbc; | |||
| 1429 | ||||
| 1430 | wm_state->hpll_en = wm_state->cxsr; | |||
| 1431 | ||||
| 1432 | level++; | |||
| 1433 | ||||
| 1434 | out: | |||
| 1435 | if (level == G4X_WM_LEVEL_NORMAL) | |||
| 1436 | return -EINVAL22; | |||
| 1437 | ||||
| 1438 | /* invalidate the higher levels */ | |||
| 1439 | g4x_invalidate_wms(crtc, wm_state, level); | |||
| 1440 | ||||
| 1441 | /* | |||
| 1442 | * Determine if the FBC watermark(s) can be used. IF | |||
| 1443 | * this isn't the case we prefer to disable the FBC | |||
| 1444 | * watermark(s) rather than disable the SR/HPLL | |||
| 1445 | * level(s) entirely. 'level-1' is the highest valid | |||
| 1446 | * level here. | |||
| 1447 | */ | |||
| 1448 | wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); | |||
| 1449 | ||||
| 1450 | return 0; | |||
| 1451 | } | |||
| 1452 | ||||
| 1453 | static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) | |||
| 1454 | { | |||
| 1455 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (new_crtc_state->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 1456 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1457 | struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; | |||
| 1458 | const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; | |||
| 1459 | struct intel_atomic_state *intel_state = | |||
| 1460 | to_intel_atomic_state(new_crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (new_crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 1461 | const struct intel_crtc_state *old_crtc_state = | |||
| 1462 | intel_atomic_get_old_crtc_state(intel_state, crtc); | |||
| 1463 | const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; | |||
| 1464 | enum plane_id plane_id; | |||
| 1465 | ||||
| 1466 | if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { | |||
| 1467 | *intermediate = *optimal; | |||
| 1468 | ||||
| 1469 | intermediate->cxsr = false0; | |||
| 1470 | intermediate->hpll_en = false0; | |||
| 1471 | goto out; | |||
| 1472 | } | |||
| 1473 | ||||
| 1474 | intermediate->cxsr = optimal->cxsr && active->cxsr && | |||
| 1475 | !new_crtc_state->disable_cxsr; | |||
| 1476 | intermediate->hpll_en = optimal->hpll_en && active->hpll_en && | |||
| 1477 | !new_crtc_state->disable_cxsr; | |||
| 1478 | intermediate->fbc_en = optimal->fbc_en && active->fbc_en; | |||
| 1479 | ||||
| 1480 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 1481 | intermediate->wm.plane[plane_id] = | |||
| 1482 | max(optimal->wm.plane[plane_id],(((optimal->wm.plane[plane_id])>(active->wm.plane[plane_id ]))?(optimal->wm.plane[plane_id]):(active->wm.plane[plane_id ])) | |||
| 1483 | active->wm.plane[plane_id])(((optimal->wm.plane[plane_id])>(active->wm.plane[plane_id ]))?(optimal->wm.plane[plane_id]):(active->wm.plane[plane_id ])); | |||
| 1484 | ||||
| 1485 | drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >({ int __ret = !!((intermediate->wm.plane[plane_id] > g4x_plane_fifo_size (plane_id, G4X_WM_LEVEL_NORMAL))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "intermediate->wm.plane[plane_id] > g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1486 | g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL))({ int __ret = !!((intermediate->wm.plane[plane_id] > g4x_plane_fifo_size (plane_id, G4X_WM_LEVEL_NORMAL))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "intermediate->wm.plane[plane_id] > g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1487 | } | |||
| 1488 | ||||
| 1489 | intermediate->sr.plane = max(optimal->sr.plane,(((optimal->sr.plane)>(active->sr.plane))?(optimal-> sr.plane):(active->sr.plane)) | |||
| 1490 | active->sr.plane)(((optimal->sr.plane)>(active->sr.plane))?(optimal-> sr.plane):(active->sr.plane)); | |||
| 1491 | intermediate->sr.cursor = max(optimal->sr.cursor,(((optimal->sr.cursor)>(active->sr.cursor))?(optimal ->sr.cursor):(active->sr.cursor)) | |||
| 1492 | active->sr.cursor)(((optimal->sr.cursor)>(active->sr.cursor))?(optimal ->sr.cursor):(active->sr.cursor)); | |||
| 1493 | intermediate->sr.fbc = max(optimal->sr.fbc,(((optimal->sr.fbc)>(active->sr.fbc))?(optimal->sr .fbc):(active->sr.fbc)) | |||
| 1494 | active->sr.fbc)(((optimal->sr.fbc)>(active->sr.fbc))?(optimal->sr .fbc):(active->sr.fbc)); | |||
| 1495 | ||||
| 1496 | intermediate->hpll.plane = max(optimal->hpll.plane,(((optimal->hpll.plane)>(active->hpll.plane))?(optimal ->hpll.plane):(active->hpll.plane)) | |||
| 1497 | active->hpll.plane)(((optimal->hpll.plane)>(active->hpll.plane))?(optimal ->hpll.plane):(active->hpll.plane)); | |||
| 1498 | intermediate->hpll.cursor = max(optimal->hpll.cursor,(((optimal->hpll.cursor)>(active->hpll.cursor))?(optimal ->hpll.cursor):(active->hpll.cursor)) | |||
| 1499 | active->hpll.cursor)(((optimal->hpll.cursor)>(active->hpll.cursor))?(optimal ->hpll.cursor):(active->hpll.cursor)); | |||
| 1500 | intermediate->hpll.fbc = max(optimal->hpll.fbc,(((optimal->hpll.fbc)>(active->hpll.fbc))?(optimal-> hpll.fbc):(active->hpll.fbc)) | |||
| 1501 | active->hpll.fbc)(((optimal->hpll.fbc)>(active->hpll.fbc))?(optimal-> hpll.fbc):(active->hpll.fbc)); | |||
| 1502 | ||||
| 1503 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1504 | (intermediate->sr.plane >({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1505 | g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1506 | intermediate->sr.cursor >({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1507 | g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1508 | intermediate->cxsr)({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1509 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1510 | (intermediate->sr.plane >({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1511 | g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1512 | intermediate->sr.cursor >({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1513 | g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1514 | intermediate->hpll_en)({ int __ret = !!(((intermediate->sr.plane > g4x_plane_fifo_size (PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(intermediate->sr.plane > g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || intermediate->sr.cursor > g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1515 | ||||
| 1516 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intermediate->sr.fbc > g4x_fbc_fifo_size (1) && intermediate->fbc_en && intermediate ->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->sr.fbc > g4x_fbc_fifo_size(1) && intermediate->fbc_en && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1517 | intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&({ int __ret = !!((intermediate->sr.fbc > g4x_fbc_fifo_size (1) && intermediate->fbc_en && intermediate ->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->sr.fbc > g4x_fbc_fifo_size(1) && intermediate->fbc_en && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1518 | intermediate->fbc_en && intermediate->cxsr)({ int __ret = !!((intermediate->sr.fbc > g4x_fbc_fifo_size (1) && intermediate->fbc_en && intermediate ->cxsr)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->sr.fbc > g4x_fbc_fifo_size(1) && intermediate->fbc_en && intermediate->cxsr" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1519 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intermediate->hpll.fbc > g4x_fbc_fifo_size (2) && intermediate->fbc_en && intermediate ->hpll_en)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && intermediate->fbc_en && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1520 | intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&({ int __ret = !!((intermediate->hpll.fbc > g4x_fbc_fifo_size (2) && intermediate->fbc_en && intermediate ->hpll_en)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && intermediate->fbc_en && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 1521 | intermediate->fbc_en && intermediate->hpll_en)({ int __ret = !!((intermediate->hpll.fbc > g4x_fbc_fifo_size (2) && intermediate->fbc_en && intermediate ->hpll_en)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && intermediate->fbc_en && intermediate->hpll_en" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1522 | ||||
| 1523 | out: | |||
| 1524 | /* | |||
| 1525 | * If our intermediate WM are identical to the final WM, then we can | |||
| 1526 | * omit the post-vblank programming; only update if it's different. | |||
| 1527 | */ | |||
| 1528 | if (memcmp(intermediate, optimal, sizeof(*intermediate))__builtin_memcmp((intermediate), (optimal), (sizeof(*intermediate ))) != 0) | |||
| 1529 | new_crtc_state->wm.need_postvbl_update = true1; | |||
| 1530 | ||||
| 1531 | return 0; | |||
| 1532 | } | |||
| 1533 | ||||
| 1534 | static void g4x_merge_wm(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 1535 | struct g4x_wm_values *wm) | |||
| 1536 | { | |||
| 1537 | struct intel_crtc *crtc; | |||
| 1538 | int num_active_pipes = 0; | |||
| 1539 | ||||
| 1540 | wm->cxsr = true1; | |||
| 1541 | wm->hpll_en = true1; | |||
| 1542 | wm->fbc_en = true1; | |||
| 1543 | ||||
| 1544 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 1545 | const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; | |||
| 1546 | ||||
| 1547 | if (!crtc->active) | |||
| 1548 | continue; | |||
| 1549 | ||||
| 1550 | if (!wm_state->cxsr) | |||
| 1551 | wm->cxsr = false0; | |||
| 1552 | if (!wm_state->hpll_en) | |||
| 1553 | wm->hpll_en = false0; | |||
| 1554 | if (!wm_state->fbc_en) | |||
| 1555 | wm->fbc_en = false0; | |||
| 1556 | ||||
| 1557 | num_active_pipes++; | |||
| 1558 | } | |||
| 1559 | ||||
| 1560 | if (num_active_pipes != 1) { | |||
| 1561 | wm->cxsr = false0; | |||
| 1562 | wm->hpll_en = false0; | |||
| 1563 | wm->fbc_en = false0; | |||
| 1564 | } | |||
| 1565 | ||||
| 1566 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 1567 | const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; | |||
| 1568 | enum pipe pipe = crtc->pipe; | |||
| 1569 | ||||
| 1570 | wm->pipe[pipe] = wm_state->wm; | |||
| 1571 | if (crtc->active && wm->cxsr) | |||
| 1572 | wm->sr = wm_state->sr; | |||
| 1573 | if (crtc->active && wm->hpll_en) | |||
| 1574 | wm->hpll = wm_state->hpll; | |||
| 1575 | } | |||
| 1576 | } | |||
| 1577 | ||||
| 1578 | static void g4x_program_watermarks(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1579 | { | |||
| 1580 | struct g4x_wm_values *old_wm = &dev_priv->wm.g4x; | |||
| 1581 | struct g4x_wm_values new_wm = {}; | |||
| 1582 | ||||
| 1583 | g4x_merge_wm(dev_priv, &new_wm); | |||
| 1584 | ||||
| 1585 | if (memcmp(old_wm, &new_wm, sizeof(new_wm))__builtin_memcmp((old_wm), (&new_wm), (sizeof(new_wm))) == 0) | |||
| 1586 | return; | |||
| 1587 | ||||
| 1588 | if (is_disabling(old_wm->cxsr, new_wm.cxsr, true1)) | |||
| 1589 | _intel_set_memory_cxsr(dev_priv, false0); | |||
| 1590 | ||||
| 1591 | g4x_write_wm_values(dev_priv, &new_wm); | |||
| 1592 | ||||
| 1593 | if (is_enabling(old_wm->cxsr, new_wm.cxsr, true1)) | |||
| 1594 | _intel_set_memory_cxsr(dev_priv, true1); | |||
| 1595 | ||||
| 1596 | *old_wm = new_wm; | |||
| 1597 | } | |||
| 1598 | ||||
| 1599 | static void g4x_initial_watermarks(struct intel_atomic_state *state, | |||
| 1600 | struct intel_crtc *crtc) | |||
| 1601 | { | |||
| 1602 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1603 | const struct intel_crtc_state *crtc_state = | |||
| 1604 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 1605 | ||||
| 1606 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 1607 | crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; | |||
| 1608 | g4x_program_watermarks(dev_priv); | |||
| 1609 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 1610 | } | |||
| 1611 | ||||
| 1612 | static void g4x_optimize_watermarks(struct intel_atomic_state *state, | |||
| 1613 | struct intel_crtc *crtc) | |||
| 1614 | { | |||
| 1615 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1616 | const struct intel_crtc_state *crtc_state = | |||
| 1617 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 1618 | ||||
| 1619 | if (!crtc_state->wm.need_postvbl_update) | |||
| 1620 | return; | |||
| 1621 | ||||
| 1622 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 1623 | crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; | |||
| 1624 | g4x_program_watermarks(dev_priv); | |||
| 1625 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 1626 | } | |||
| 1627 | ||||
| 1628 | /* latency must be in 0.1us units. */ | |||
| 1629 | static unsigned int vlv_wm_method2(unsigned int pixel_rate, | |||
| 1630 | unsigned int htotal, | |||
| 1631 | unsigned int width, | |||
| 1632 | unsigned int cpp, | |||
| 1633 | unsigned int latency) | |||
| 1634 | { | |||
| 1635 | unsigned int ret; | |||
| 1636 | ||||
| 1637 | ret = intel_wm_method2(pixel_rate, htotal, | |||
| 1638 | width, cpp, latency); | |||
| 1639 | ret = DIV_ROUND_UP(ret, 64)(((ret) + ((64) - 1)) / (64)); | |||
| 1640 | ||||
| 1641 | return ret; | |||
| 1642 | } | |||
| 1643 | ||||
| 1644 | static void vlv_setup_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 1645 | { | |||
| 1646 | /* all latencies in usec */ | |||
| 1647 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; | |||
| 1648 | ||||
| 1649 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; | |||
| 1650 | ||||
| 1651 | if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 1652 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; | |||
| 1653 | dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; | |||
| 1654 | ||||
| 1655 | dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; | |||
| 1656 | } | |||
| 1657 | } | |||
| 1658 | ||||
| 1659 | static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, | |||
| 1660 | const struct intel_plane_state *plane_state, | |||
| 1661 | int level) | |||
| 1662 | { | |||
| 1663 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 1664 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev); | |||
| 1665 | const struct drm_display_mode *adjusted_mode = | |||
| 1666 | &crtc_state->hw.adjusted_mode; | |||
| 1667 | unsigned int clock, htotal, cpp, width, wm; | |||
| 1668 | ||||
| 1669 | if (dev_priv->wm.pri_latency[level] == 0) | |||
| 1670 | return USHRT_MAX0xffff; | |||
| 1671 | ||||
| 1672 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 1673 | return 0; | |||
| 1674 | ||||
| 1675 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 1676 | clock = adjusted_mode->crtc_clock; | |||
| 1677 | htotal = adjusted_mode->crtc_htotal; | |||
| 1678 | width = crtc_state->pipe_src_w; | |||
| 1679 | ||||
| 1680 | if (plane->id == PLANE_CURSOR) { | |||
| 1681 | /* | |||
| 1682 | * FIXME the formula gives values that are | |||
| 1683 | * too big for the cursor FIFO, and hence we | |||
| 1684 | * would never be able to use cursors. For | |||
| 1685 | * now just hardcode the watermark. | |||
| 1686 | */ | |||
| 1687 | wm = 63; | |||
| 1688 | } else { | |||
| 1689 | wm = vlv_wm_method2(clock, htotal, width, cpp, | |||
| 1690 | dev_priv->wm.pri_latency[level] * 10); | |||
| 1691 | } | |||
| 1692 | ||||
| 1693 | return min_t(unsigned int, wm, USHRT_MAX)({ unsigned int __min_a = (wm); unsigned int __min_b = (0xffff ); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 1694 | } | |||
| 1695 | ||||
| 1696 | static bool_Bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) | |||
| 1697 | { | |||
| 1698 | return (active_planes & (BIT(PLANE_SPRITE0)(1UL << (PLANE_SPRITE0)) | | |||
| 1699 | BIT(PLANE_SPRITE1)(1UL << (PLANE_SPRITE1)))) == BIT(PLANE_SPRITE1)(1UL << (PLANE_SPRITE1)); | |||
| 1700 | } | |||
| 1701 | ||||
| 1702 | static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) | |||
| 1703 | { | |||
| 1704 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 1705 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1706 | const struct g4x_pipe_wm *raw = | |||
| 1707 | &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; | |||
| 1708 | struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; | |||
| 1709 | unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR)(1UL << (PLANE_CURSOR)); | |||
| 1710 | int num_active_planes = hweight8(active_planes); | |||
| 1711 | const int fifo_size = 511; | |||
| 1712 | int fifo_extra, fifo_left = fifo_size; | |||
| 1713 | int sprite0_fifo_extra = 0; | |||
| 1714 | unsigned int total_rate; | |||
| 1715 | enum plane_id plane_id; | |||
| 1716 | ||||
| 1717 | /* | |||
| 1718 | * When enabling sprite0 after sprite1 has already been enabled | |||
| 1719 | * we tend to get an underrun unless sprite0 already has some | |||
| 1720 | * FIFO space allcoated. Hence we always allocate at least one | |||
| 1721 | * cacheline for sprite0 whenever sprite1 is enabled. | |||
| 1722 | * | |||
| 1723 | * All other plane enable sequences appear immune to this problem. | |||
| 1724 | */ | |||
| 1725 | if (vlv_need_sprite0_fifo_workaround(active_planes)) | |||
| 1726 | sprite0_fifo_extra = 1; | |||
| 1727 | ||||
| 1728 | total_rate = raw->plane[PLANE_PRIMARY] + | |||
| 1729 | raw->plane[PLANE_SPRITE0] + | |||
| 1730 | raw->plane[PLANE_SPRITE1] + | |||
| 1731 | sprite0_fifo_extra; | |||
| 1732 | ||||
| 1733 | if (total_rate > fifo_size) | |||
| 1734 | return -EINVAL22; | |||
| 1735 | ||||
| 1736 | if (total_rate == 0) | |||
| 1737 | total_rate = 1; | |||
| 1738 | ||||
| 1739 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 1740 | unsigned int rate; | |||
| 1741 | ||||
| 1742 | if ((active_planes & BIT(plane_id)(1UL << (plane_id))) == 0) { | |||
| 1743 | fifo_state->plane[plane_id] = 0; | |||
| 1744 | continue; | |||
| 1745 | } | |||
| 1746 | ||||
| 1747 | rate = raw->plane[plane_id]; | |||
| 1748 | fifo_state->plane[plane_id] = fifo_size * rate / total_rate; | |||
| 1749 | fifo_left -= fifo_state->plane[plane_id]; | |||
| 1750 | } | |||
| 1751 | ||||
| 1752 | fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; | |||
| 1753 | fifo_left -= sprite0_fifo_extra; | |||
| 1754 | ||||
| 1755 | fifo_state->plane[PLANE_CURSOR] = 63; | |||
| 1756 | ||||
| 1757 | fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1)(((fifo_left) + ((num_active_planes ?: 1) - 1)) / (num_active_planes ?: 1)); | |||
| 1758 | ||||
| 1759 | /* spread the remainder evenly */ | |||
| 1760 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 1761 | int plane_extra; | |||
| 1762 | ||||
| 1763 | if (fifo_left == 0) | |||
| 1764 | break; | |||
| 1765 | ||||
| 1766 | if ((active_planes & BIT(plane_id)(1UL << (plane_id))) == 0) | |||
| 1767 | continue; | |||
| 1768 | ||||
| 1769 | plane_extra = min(fifo_extra, fifo_left)(((fifo_extra)<(fifo_left))?(fifo_extra):(fifo_left)); | |||
| 1770 | fifo_state->plane[plane_id] += plane_extra; | |||
| 1771 | fifo_left -= plane_extra; | |||
| 1772 | } | |||
| 1773 | ||||
| 1774 | drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0)({ int __ret = !!((active_planes != 0 && fifo_left != 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "active_planes != 0 && fifo_left != 0" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 1775 | ||||
| 1776 | /* give it all to the first plane if none are active */ | |||
| 1777 | if (active_planes == 0) { | |||
| 1778 | drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size)({ int __ret = !!((fifo_left != fifo_size)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "fifo_left != fifo_size" ")"); __builtin_expect (!!(__ret), 0); }); | |||
| 1779 | fifo_state->plane[PLANE_PRIMARY] = fifo_left; | |||
| 1780 | } | |||
| 1781 | ||||
| 1782 | return 0; | |||
| 1783 | } | |||
| 1784 | ||||
| 1785 | /* mark all levels starting from 'level' as invalid */ | |||
| 1786 | static void vlv_invalidate_wms(struct intel_crtc *crtc, | |||
| 1787 | struct vlv_wm_state *wm_state, int level) | |||
| 1788 | { | |||
| 1789 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1790 | ||||
| 1791 | for (; level < intel_wm_num_levels(dev_priv); level++) { | |||
| 1792 | enum plane_id plane_id; | |||
| 1793 | ||||
| 1794 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 1795 | wm_state->wm[level].plane[plane_id] = USHRT_MAX0xffff; | |||
| 1796 | ||||
| 1797 | wm_state->sr[level].cursor = USHRT_MAX0xffff; | |||
| 1798 | wm_state->sr[level].plane = USHRT_MAX0xffff; | |||
| 1799 | } | |||
| 1800 | } | |||
| 1801 | ||||
| 1802 | static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) | |||
| 1803 | { | |||
| 1804 | if (wm > fifo_size) | |||
| 1805 | return USHRT_MAX0xffff; | |||
| 1806 | else | |||
| 1807 | return fifo_size - wm; | |||
| 1808 | } | |||
| 1809 | ||||
| 1810 | /* | |||
| 1811 | * Starting from 'level' set all higher | |||
| 1812 | * levels to 'value' in the "raw" watermarks. | |||
| 1813 | */ | |||
| 1814 | static bool_Bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, | |||
| 1815 | int level, enum plane_id plane_id, u16 value) | |||
| 1816 | { | |||
| 1817 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1818 | int num_levels = intel_wm_num_levels(dev_priv); | |||
| 1819 | bool_Bool dirty = false0; | |||
| 1820 | ||||
| 1821 | for (; level < num_levels; level++) { | |||
| 1822 | struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; | |||
| 1823 | ||||
| 1824 | dirty |= raw->plane[plane_id] != value; | |||
| 1825 | raw->plane[plane_id] = value; | |||
| 1826 | } | |||
| 1827 | ||||
| 1828 | return dirty; | |||
| 1829 | } | |||
| 1830 | ||||
| 1831 | static bool_Bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, | |||
| 1832 | const struct intel_plane_state *plane_state) | |||
| 1833 | { | |||
| 1834 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 1835 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 1836 | enum plane_id plane_id = plane->id; | |||
| 1837 | int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); | |||
| 1838 | int level; | |||
| 1839 | bool_Bool dirty = false0; | |||
| 1840 | ||||
| 1841 | if (!intel_wm_plane_visible(crtc_state, plane_state)) { | |||
| 1842 | dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); | |||
| 1843 | goto out; | |||
| 1844 | } | |||
| 1845 | ||||
| 1846 | for (level = 0; level < num_levels; level++) { | |||
| 1847 | struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; | |||
| 1848 | int wm = vlv_compute_wm_level(crtc_state, plane_state, level); | |||
| 1849 | int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; | |||
| 1850 | ||||
| 1851 | if (wm > max_wm) | |||
| 1852 | break; | |||
| 1853 | ||||
| 1854 | dirty |= raw->plane[plane_id] != wm; | |||
| 1855 | raw->plane[plane_id] = wm; | |||
| 1856 | } | |||
| 1857 | ||||
| 1858 | /* mark all higher levels as invalid */ | |||
| 1859 | dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX0xffff); | |||
| 1860 | ||||
| 1861 | out: | |||
| 1862 | if (dirty) | |||
| 1863 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]) | |||
| 1864 | "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]) | |||
| 1865 | plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]) | |||
| 1866 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]) | |||
| 1867 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]) | |||
| 1868 | crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n" , plane->base.name, crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5 ].plane[plane_id], crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS ].plane[plane_id]); | |||
| 1869 | ||||
| 1870 | return dirty; | |||
| 1871 | } | |||
| 1872 | ||||
| 1873 | static bool_Bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, | |||
| 1874 | enum plane_id plane_id, int level) | |||
| 1875 | { | |||
| 1876 | const struct g4x_pipe_wm *raw = | |||
| 1877 | &crtc_state->wm.vlv.raw[level]; | |||
| 1878 | const struct vlv_fifo_state *fifo_state = | |||
| 1879 | &crtc_state->wm.vlv.fifo_state; | |||
| 1880 | ||||
| 1881 | return raw->plane[plane_id] <= fifo_state->plane[plane_id]; | |||
| 1882 | } | |||
| 1883 | ||||
| 1884 | static bool_Bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) | |||
| 1885 | { | |||
| 1886 | return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && | |||
| 1887 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && | |||
| 1888 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && | |||
| 1889 | vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); | |||
| 1890 | } | |||
| 1891 | ||||
| 1892 | static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) | |||
| 1893 | { | |||
| 1894 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 1895 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 1896 | struct intel_atomic_state *state = | |||
| 1897 | to_intel_atomic_state(crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 1898 | struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; | |||
| 1899 | const struct vlv_fifo_state *fifo_state = | |||
| 1900 | &crtc_state->wm.vlv.fifo_state; | |||
| 1901 | int num_active_planes = hweight8(crtc_state->active_planes & | |||
| 1902 | ~BIT(PLANE_CURSOR)(1UL << (PLANE_CURSOR))); | |||
| 1903 | bool_Bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi); | |||
| 1904 | const struct intel_plane_state *old_plane_state; | |||
| 1905 | const struct intel_plane_state *new_plane_state; | |||
| 1906 | struct intel_plane *plane; | |||
| 1907 | enum plane_id plane_id; | |||
| 1908 | int level, ret, i; | |||
| 1909 | unsigned int dirty = 0; | |||
| 1910 | ||||
| 1911 | for_each_oldnew_intel_plane_in_state(state, plane,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else | |||
| 1912 | old_plane_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else | |||
| 1913 | new_plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (old_plane_state) = ({ const __typeof ( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state )->base.planes[i].old_state); (struct intel_plane_state *) ( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}), (new_plane_state) = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state)->base .planes[i].new_state); (struct intel_plane_state *)( (char *) __mptr - __builtin_offsetof(struct intel_plane_state, uapi) ) ;}), 1); (i)++) if (!(plane)) {} else { | |||
| 1914 | if (new_plane_state->hw.crtc != &crtc->base && | |||
| 1915 | old_plane_state->hw.crtc != &crtc->base) | |||
| 1916 | continue; | |||
| 1917 | ||||
| 1918 | if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) | |||
| 1919 | dirty |= BIT(plane->id)(1UL << (plane->id)); | |||
| 1920 | } | |||
| 1921 | ||||
| 1922 | /* | |||
| 1923 | * DSPARB registers may have been reset due to the | |||
| 1924 | * power well being turned off. Make sure we restore | |||
| 1925 | * them to a consistent state even if no primary/sprite | |||
| 1926 | * planes are initially active. | |||
| 1927 | */ | |||
| 1928 | if (needs_modeset) | |||
| 1929 | crtc_state->fifo_changed = true1; | |||
| 1930 | ||||
| 1931 | if (!dirty) | |||
| 1932 | return 0; | |||
| 1933 | ||||
| 1934 | /* cursor changes don't warrant a FIFO recompute */ | |||
| 1935 | if (dirty & ~BIT(PLANE_CURSOR)(1UL << (PLANE_CURSOR))) { | |||
| 1936 | const struct intel_crtc_state *old_crtc_state = | |||
| 1937 | intel_atomic_get_old_crtc_state(state, crtc); | |||
| 1938 | const struct vlv_fifo_state *old_fifo_state = | |||
| 1939 | &old_crtc_state->wm.vlv.fifo_state; | |||
| 1940 | ||||
| 1941 | ret = vlv_compute_fifo(crtc_state); | |||
| 1942 | if (ret) | |||
| 1943 | return ret; | |||
| 1944 | ||||
| 1945 | if (needs_modeset || | |||
| 1946 | memcmp(old_fifo_state, fifo_state,__builtin_memcmp((old_fifo_state), (fifo_state), (sizeof(*fifo_state ))) | |||
| 1947 | sizeof(*fifo_state))__builtin_memcmp((old_fifo_state), (fifo_state), (sizeof(*fifo_state ))) != 0) | |||
| 1948 | crtc_state->fifo_changed = true1; | |||
| 1949 | } | |||
| 1950 | ||||
| 1951 | /* initially allow all levels */ | |||
| 1952 | wm_state->num_levels = intel_wm_num_levels(dev_priv); | |||
| 1953 | /* | |||
| 1954 | * Note that enabling cxsr with no primary/sprite planes | |||
| 1955 | * enabled can wedge the pipe. Hence we only allow cxsr | |||
| 1956 | * with exactly one enabled primary/sprite plane. | |||
| 1957 | */ | |||
| 1958 | wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; | |||
| 1959 | ||||
| 1960 | for (level = 0; level < wm_state->num_levels; level++) { | |||
| 1961 | const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; | |||
| 1962 | const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv)(hweight8((&(dev_priv)->__info)->pipe_mask)) * 512 - 1; | |||
| 1963 | ||||
| 1964 | if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) | |||
| 1965 | break; | |||
| 1966 | ||||
| 1967 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 1968 | wm_state->wm[level].plane[plane_id] = | |||
| 1969 | vlv_invert_wm_value(raw->plane[plane_id], | |||
| 1970 | fifo_state->plane[plane_id]); | |||
| 1971 | } | |||
| 1972 | ||||
| 1973 | wm_state->sr[level].plane = | |||
| 1974 | vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],(((raw->plane[PLANE_PRIMARY])>((((raw->plane[PLANE_SPRITE0 ])>(raw->plane[PLANE_SPRITE1]))?(raw->plane[PLANE_SPRITE0 ]):(raw->plane[PLANE_SPRITE1]))))?(raw->plane[PLANE_PRIMARY ]):((((raw->plane[PLANE_SPRITE0])>(raw->plane[PLANE_SPRITE1 ]))?(raw->plane[PLANE_SPRITE0]):(raw->plane[PLANE_SPRITE1 ])))) | |||
| 1975 | raw->plane[PLANE_SPRITE0],(((raw->plane[PLANE_PRIMARY])>((((raw->plane[PLANE_SPRITE0 ])>(raw->plane[PLANE_SPRITE1]))?(raw->plane[PLANE_SPRITE0 ]):(raw->plane[PLANE_SPRITE1]))))?(raw->plane[PLANE_PRIMARY ]):((((raw->plane[PLANE_SPRITE0])>(raw->plane[PLANE_SPRITE1 ]))?(raw->plane[PLANE_SPRITE0]):(raw->plane[PLANE_SPRITE1 ])))) | |||
| 1976 | raw->plane[PLANE_SPRITE1])(((raw->plane[PLANE_PRIMARY])>((((raw->plane[PLANE_SPRITE0 ])>(raw->plane[PLANE_SPRITE1]))?(raw->plane[PLANE_SPRITE0 ]):(raw->plane[PLANE_SPRITE1]))))?(raw->plane[PLANE_PRIMARY ]):((((raw->plane[PLANE_SPRITE0])>(raw->plane[PLANE_SPRITE1 ]))?(raw->plane[PLANE_SPRITE0]):(raw->plane[PLANE_SPRITE1 ])))), | |||
| 1977 | sr_fifo_size); | |||
| 1978 | ||||
| 1979 | wm_state->sr[level].cursor = | |||
| 1980 | vlv_invert_wm_value(raw->plane[PLANE_CURSOR], | |||
| 1981 | 63); | |||
| 1982 | } | |||
| 1983 | ||||
| 1984 | if (level == 0) | |||
| 1985 | return -EINVAL22; | |||
| 1986 | ||||
| 1987 | /* limit to only levels we can actually handle */ | |||
| 1988 | wm_state->num_levels = level; | |||
| 1989 | ||||
| 1990 | /* invalidate the higher levels */ | |||
| 1991 | vlv_invalidate_wms(crtc, wm_state, level); | |||
| 1992 | ||||
| 1993 | return 0; | |||
| 1994 | } | |||
| 1995 | ||||
| 1996 | #define VLV_FIFO(plane, value) \ | |||
| 1997 | (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) | |||
| 1998 | ||||
| 1999 | static void vlv_atomic_update_fifo(struct intel_atomic_state *state, | |||
| 2000 | struct intel_crtc *crtc) | |||
| 2001 | { | |||
| 2002 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 2003 | struct intel_uncore *uncore = &dev_priv->uncore; | |||
| 2004 | const struct intel_crtc_state *crtc_state = | |||
| 2005 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 2006 | const struct vlv_fifo_state *fifo_state = | |||
| 2007 | &crtc_state->wm.vlv.fifo_state; | |||
| 2008 | int sprite0_start, sprite1_start, fifo_size; | |||
| 2009 | u32 dsparb, dsparb2, dsparb3; | |||
| 2010 | ||||
| 2011 | if (!crtc_state->fifo_changed) | |||
| 2012 | return; | |||
| 2013 | ||||
| 2014 | sprite0_start = fifo_state->plane[PLANE_PRIMARY]; | |||
| 2015 | sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; | |||
| 2016 | fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; | |||
| 2017 | ||||
| 2018 | drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63)({ int __ret = !!((fifo_state->plane[PLANE_CURSOR] != 63)) ; if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "fifo_state->plane[PLANE_CURSOR] != 63" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 2019 | drm_WARN_ON(&dev_priv->drm, fifo_size != 511)({ int __ret = !!((fifo_size != 511)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "fifo_size != 511" ")"); __builtin_expect(!! (__ret), 0); }); | |||
| 2020 | ||||
| 2021 | trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); | |||
| 2022 | ||||
| 2023 | /* | |||
| 2024 | * uncore.lock serves a double purpose here. It allows us to | |||
| 2025 | * use the less expensive I915_{READ,WRITE}_FW() functions, and | |||
| 2026 | * it protects the DSPARB registers from getting clobbered by | |||
| 2027 | * parallel updates from multiple pipes. | |||
| 2028 | * | |||
| 2029 | * intel_pipe_update_start() has already disabled interrupts | |||
| 2030 | * for us, so a plain spin_lock() is sufficient here. | |||
| 2031 | */ | |||
| 2032 | spin_lock(&uncore->lock)mtx_enter(&uncore->lock); | |||
| 2033 | ||||
| 2034 | switch (crtc->pipe) { | |||
| 2035 | case PIPE_A: | |||
| 2036 | dsparb = intel_uncore_read_fw(uncore, DSPARB)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (((& (dev_priv)->__info)->display_mmio_offset) + 0x70030) }) ); | |||
| 2037 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) })); | |||
| 2038 | ||||
| 2039 | dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | | |||
| 2040 | VLV_FIFO(SPRITEB, 0xff)); | |||
| 2041 | dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | | |||
| 2042 | VLV_FIFO(SPRITEB, sprite1_start)); | |||
| 2043 | ||||
| 2044 | dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | | |||
| 2045 | VLV_FIFO(SPRITEB_HI, 0x1)); | |||
| 2046 | dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | | |||
| 2047 | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); | |||
| 2048 | ||||
| 2049 | intel_uncore_write_fw(uncore, DSPARB, dsparb)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (((& (dev_priv)->__info)->display_mmio_offset) + 0x70030) }) , dsparb); | |||
| 2050 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) }), dsparb2); | |||
| 2051 | break; | |||
| 2052 | case PIPE_B: | |||
| 2053 | dsparb = intel_uncore_read_fw(uncore, DSPARB)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (((& (dev_priv)->__info)->display_mmio_offset) + 0x70030) }) ); | |||
| 2054 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) })); | |||
| 2055 | ||||
| 2056 | dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | | |||
| 2057 | VLV_FIFO(SPRITED, 0xff)); | |||
| 2058 | dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | | |||
| 2059 | VLV_FIFO(SPRITED, sprite1_start)); | |||
| 2060 | ||||
| 2061 | dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | | |||
| 2062 | VLV_FIFO(SPRITED_HI, 0xff)); | |||
| 2063 | dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | | |||
| 2064 | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); | |||
| 2065 | ||||
| 2066 | intel_uncore_write_fw(uncore, DSPARB, dsparb)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (((& (dev_priv)->__info)->display_mmio_offset) + 0x70030) }) , dsparb); | |||
| 2067 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) }), dsparb2); | |||
| 2068 | break; | |||
| 2069 | case PIPE_C: | |||
| 2070 | dsparb3 = intel_uncore_read_fw(uncore, DSPARB3)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x7006c) })); | |||
| 2071 | dsparb2 = intel_uncore_read_fw(uncore, DSPARB2)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) })); | |||
| 2072 | ||||
| 2073 | dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | | |||
| 2074 | VLV_FIFO(SPRITEF, 0xff)); | |||
| 2075 | dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | | |||
| 2076 | VLV_FIFO(SPRITEF, sprite1_start)); | |||
| 2077 | ||||
| 2078 | dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | | |||
| 2079 | VLV_FIFO(SPRITEF_HI, 0xff)); | |||
| 2080 | dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | | |||
| 2081 | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); | |||
| 2082 | ||||
| 2083 | intel_uncore_write_fw(uncore, DSPARB3, dsparb3)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x7006c) }), dsparb3); | |||
| 2084 | intel_uncore_write_fw(uncore, DSPARB2, dsparb2)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x180000 + 0x70060) }), dsparb2); | |||
| 2085 | break; | |||
| 2086 | default: | |||
| 2087 | break; | |||
| 2088 | } | |||
| 2089 | ||||
| 2090 | intel_uncore_posting_read_fw(uncore, DSPARB)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->display_mmio_offset) + 0x70030 ) }))); | |||
| 2091 | ||||
| 2092 | spin_unlock(&uncore->lock)mtx_leave(&uncore->lock); | |||
| 2093 | } | |||
| 2094 | ||||
| 2095 | #undef VLV_FIFO | |||
| 2096 | ||||
| 2097 | static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state) | |||
| 2098 | { | |||
| 2099 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (new_crtc_state->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 2100 | struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; | |||
| 2101 | const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; | |||
| 2102 | struct intel_atomic_state *intel_state = | |||
| 2103 | to_intel_atomic_state(new_crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (new_crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 2104 | const struct intel_crtc_state *old_crtc_state = | |||
| 2105 | intel_atomic_get_old_crtc_state(intel_state, crtc); | |||
| 2106 | const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; | |||
| 2107 | int level; | |||
| 2108 | ||||
| 2109 | if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) { | |||
| 2110 | *intermediate = *optimal; | |||
| 2111 | ||||
| 2112 | intermediate->cxsr = false0; | |||
| 2113 | goto out; | |||
| 2114 | } | |||
| 2115 | ||||
| 2116 | intermediate->num_levels = min(optimal->num_levels, active->num_levels)(((optimal->num_levels)<(active->num_levels))?(optimal ->num_levels):(active->num_levels)); | |||
| 2117 | intermediate->cxsr = optimal->cxsr && active->cxsr && | |||
| 2118 | !new_crtc_state->disable_cxsr; | |||
| 2119 | ||||
| 2120 | for (level = 0; level < intermediate->num_levels; level++) { | |||
| 2121 | enum plane_id plane_id; | |||
| 2122 | ||||
| 2123 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 2124 | intermediate->wm[level].plane[plane_id] = | |||
| 2125 | min(optimal->wm[level].plane[plane_id],(((optimal->wm[level].plane[plane_id])<(active->wm[level ].plane[plane_id]))?(optimal->wm[level].plane[plane_id]):( active->wm[level].plane[plane_id])) | |||
| 2126 | active->wm[level].plane[plane_id])(((optimal->wm[level].plane[plane_id])<(active->wm[level ].plane[plane_id]))?(optimal->wm[level].plane[plane_id]):( active->wm[level].plane[plane_id])); | |||
| 2127 | } | |||
| 2128 | ||||
| 2129 | intermediate->sr[level].plane = min(optimal->sr[level].plane,(((optimal->sr[level].plane)<(active->sr[level].plane ))?(optimal->sr[level].plane):(active->sr[level].plane) ) | |||
| 2130 | active->sr[level].plane)(((optimal->sr[level].plane)<(active->sr[level].plane ))?(optimal->sr[level].plane):(active->sr[level].plane) ); | |||
| 2131 | intermediate->sr[level].cursor = min(optimal->sr[level].cursor,(((optimal->sr[level].cursor)<(active->sr[level].cursor ))?(optimal->sr[level].cursor):(active->sr[level].cursor )) | |||
| 2132 | active->sr[level].cursor)(((optimal->sr[level].cursor)<(active->sr[level].cursor ))?(optimal->sr[level].cursor):(active->sr[level].cursor )); | |||
| 2133 | } | |||
| 2134 | ||||
| 2135 | vlv_invalidate_wms(crtc, intermediate, level); | |||
| 2136 | ||||
| 2137 | out: | |||
| 2138 | /* | |||
| 2139 | * If our intermediate WM are identical to the final WM, then we can | |||
| 2140 | * omit the post-vblank programming; only update if it's different. | |||
| 2141 | */ | |||
| 2142 | if (memcmp(intermediate, optimal, sizeof(*intermediate))__builtin_memcmp((intermediate), (optimal), (sizeof(*intermediate ))) != 0) | |||
| 2143 | new_crtc_state->wm.need_postvbl_update = true1; | |||
| 2144 | ||||
| 2145 | return 0; | |||
| 2146 | } | |||
| 2147 | ||||
| 2148 | static void vlv_merge_wm(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2149 | struct vlv_wm_values *wm) | |||
| 2150 | { | |||
| 2151 | struct intel_crtc *crtc; | |||
| 2152 | int num_active_pipes = 0; | |||
| 2153 | ||||
| 2154 | wm->level = dev_priv->wm.max_level; | |||
| 2155 | wm->cxsr = true1; | |||
| 2156 | ||||
| 2157 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 2158 | const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; | |||
| 2159 | ||||
| 2160 | if (!crtc->active) | |||
| 2161 | continue; | |||
| 2162 | ||||
| 2163 | if (!wm_state->cxsr) | |||
| 2164 | wm->cxsr = false0; | |||
| 2165 | ||||
| 2166 | num_active_pipes++; | |||
| 2167 | wm->level = min_t(int, wm->level, wm_state->num_levels - 1)({ int __min_a = (wm->level); int __min_b = (wm_state-> num_levels - 1); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 2168 | } | |||
| 2169 | ||||
| 2170 | if (num_active_pipes != 1) | |||
| 2171 | wm->cxsr = false0; | |||
| 2172 | ||||
| 2173 | if (num_active_pipes > 1) | |||
| 2174 | wm->level = VLV_WM_LEVEL_PM2; | |||
| 2175 | ||||
| 2176 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 2177 | const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; | |||
| 2178 | enum pipe pipe = crtc->pipe; | |||
| 2179 | ||||
| 2180 | wm->pipe[pipe] = wm_state->wm[wm->level]; | |||
| 2181 | if (crtc->active && wm->cxsr) | |||
| 2182 | wm->sr = wm_state->sr[wm->level]; | |||
| 2183 | ||||
| 2184 | wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH(1 << 7) | 2; | |||
| 2185 | wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH(1 << 7) | 2; | |||
| 2186 | wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH(1 << 7) | 2; | |||
| 2187 | wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH(1 << 7) | 2; | |||
| 2188 | } | |||
| 2189 | } | |||
| 2190 | ||||
| 2191 | static void vlv_program_watermarks(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 2192 | { | |||
| 2193 | struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; | |||
| 2194 | struct vlv_wm_values new_wm = {}; | |||
| 2195 | ||||
| 2196 | vlv_merge_wm(dev_priv, &new_wm); | |||
| 2197 | ||||
| 2198 | if (memcmp(old_wm, &new_wm, sizeof(new_wm))__builtin_memcmp((old_wm), (&new_wm), (sizeof(new_wm))) == 0) | |||
| 2199 | return; | |||
| 2200 | ||||
| 2201 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) | |||
| 2202 | chv_set_memory_dvfs(dev_priv, false0); | |||
| 2203 | ||||
| 2204 | if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) | |||
| 2205 | chv_set_memory_pm5(dev_priv, false0); | |||
| 2206 | ||||
| 2207 | if (is_disabling(old_wm->cxsr, new_wm.cxsr, true1)) | |||
| 2208 | _intel_set_memory_cxsr(dev_priv, false0); | |||
| 2209 | ||||
| 2210 | vlv_write_wm_values(dev_priv, &new_wm); | |||
| 2211 | ||||
| 2212 | if (is_enabling(old_wm->cxsr, new_wm.cxsr, true1)) | |||
| 2213 | _intel_set_memory_cxsr(dev_priv, true1); | |||
| 2214 | ||||
| 2215 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) | |||
| 2216 | chv_set_memory_pm5(dev_priv, true1); | |||
| 2217 | ||||
| 2218 | if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) | |||
| 2219 | chv_set_memory_dvfs(dev_priv, true1); | |||
| 2220 | ||||
| 2221 | *old_wm = new_wm; | |||
| 2222 | } | |||
| 2223 | ||||
| 2224 | static void vlv_initial_watermarks(struct intel_atomic_state *state, | |||
| 2225 | struct intel_crtc *crtc) | |||
| 2226 | { | |||
| 2227 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 2228 | const struct intel_crtc_state *crtc_state = | |||
| 2229 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 2230 | ||||
| 2231 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 2232 | crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; | |||
| 2233 | vlv_program_watermarks(dev_priv); | |||
| 2234 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 2235 | } | |||
| 2236 | ||||
| 2237 | static void vlv_optimize_watermarks(struct intel_atomic_state *state, | |||
| 2238 | struct intel_crtc *crtc) | |||
| 2239 | { | |||
| 2240 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 2241 | const struct intel_crtc_state *crtc_state = | |||
| 2242 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 2243 | ||||
| 2244 | if (!crtc_state->wm.need_postvbl_update) | |||
| 2245 | return; | |||
| 2246 | ||||
| 2247 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 2248 | crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; | |||
| 2249 | vlv_program_watermarks(dev_priv); | |||
| 2250 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 2251 | } | |||
| 2252 | ||||
| 2253 | static void i965_update_wm(struct intel_crtc *unused_crtc) | |||
| 2254 | { | |||
| 2255 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(unused_crtc->base.dev); | |||
| 2256 | struct intel_crtc *crtc; | |||
| 2257 | int srwm = 1; | |||
| 2258 | int cursor_sr = 16; | |||
| 2259 | bool_Bool cxsr_enabled; | |||
| 2260 | ||||
| 2261 | /* Calc sr entries for one plane configs */ | |||
| 2262 | crtc = single_enabled_crtc(dev_priv); | |||
| 2263 | if (crtc) { | |||
| 2264 | /* self-refresh has much higher latency */ | |||
| 2265 | static const int sr_latency_ns = 12000; | |||
| 2266 | const struct drm_display_mode *adjusted_mode = | |||
| 2267 | &crtc->config->hw.adjusted_mode; | |||
| 2268 | const struct drm_framebuffer *fb = | |||
| 2269 | crtc->base.primary->state->fb; | |||
| 2270 | int clock = adjusted_mode->crtc_clock; | |||
| 2271 | int htotal = adjusted_mode->crtc_htotal; | |||
| 2272 | int hdisplay = crtc->config->pipe_src_w; | |||
| 2273 | int cpp = fb->format->cpp[0]; | |||
| 2274 | int entries; | |||
| 2275 | ||||
| 2276 | entries = intel_wm_method2(clock, htotal, | |||
| 2277 | hdisplay, cpp, sr_latency_ns / 100); | |||
| 2278 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE)(((entries) + ((64) - 1)) / (64)); | |||
| 2279 | srwm = I965_FIFO_SIZE512 - entries; | |||
| 2280 | if (srwm < 0) | |||
| 2281 | srwm = 1; | |||
| 2282 | srwm &= 0x1ff; | |||
| 2283 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh entries: %d, wm: %d\n" , entries, srwm) | |||
| 2284 | "self-refresh entries: %d, wm: %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh entries: %d, wm: %d\n" , entries, srwm) | |||
| 2285 | entries, srwm)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh entries: %d, wm: %d\n" , entries, srwm); | |||
| 2286 | ||||
| 2287 | entries = intel_wm_method2(clock, htotal, | |||
| 2288 | crtc->base.cursor->state->crtc_w, 4, | |||
| 2289 | sr_latency_ns / 100); | |||
| 2290 | entries = DIV_ROUND_UP(entries,(((entries) + ((i965_cursor_wm_info.cacheline_size) - 1)) / ( i965_cursor_wm_info.cacheline_size)) | |||
| 2291 | i965_cursor_wm_info.cacheline_size)(((entries) + ((i965_cursor_wm_info.cacheline_size) - 1)) / ( i965_cursor_wm_info.cacheline_size)) + | |||
| 2292 | i965_cursor_wm_info.guard_size; | |||
| 2293 | ||||
| 2294 | cursor_sr = i965_cursor_wm_info.fifo_size - entries; | |||
| 2295 | if (cursor_sr > i965_cursor_wm_info.max_wm) | |||
| 2296 | cursor_sr = i965_cursor_wm_info.max_wm; | |||
| 2297 | ||||
| 2298 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr) | |||
| 2299 | "self-refresh watermark: display plane %d "drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr) | |||
| 2300 | "cursor %d\n", srwm, cursor_sr)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); | |||
| 2301 | ||||
| 2302 | cxsr_enabled = true1; | |||
| 2303 | } else { | |||
| 2304 | cxsr_enabled = false0; | |||
| 2305 | /* Turn off self refresh if both pipes are enabled */ | |||
| 2306 | intel_set_memory_cxsr(dev_priv, false0); | |||
| 2307 | } | |||
| 2308 | ||||
| 2309 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n" , srwm) | |||
| 2310 | "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n" , srwm) | |||
| 2311 | srwm)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n" , srwm); | |||
| 2312 | ||||
| 2313 | /* 965 has limitations... */ | |||
| 2314 | I915_WRITE(DSPFW1, FW_WM(srwm, SR) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM (8, PLANEB) | FW_WM(8, PLANEA))) | |||
| 2315 | FW_WM(8, CURSORB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM (8, PLANEB) | FW_WM(8, PLANEA))) | |||
| 2316 | FW_WM(8, PLANEB) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM (8, PLANEB) | FW_WM(8, PLANEA))) | |||
| 2317 | FW_WM(8, PLANEA))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) })), (FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM (8, PLANEB) | FW_WM(8, PLANEA))); | |||
| 2318 | I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM(8, CURSORA) | FW_WM(8, PLANEC_OLD))) | |||
| 2319 | FW_WM(8, PLANEC_OLD))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) })), (FW_WM(8, CURSORA) | FW_WM(8, PLANEC_OLD))); | |||
| 2320 | /* update cursor SR watermark */ | |||
| 2321 | I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) })), (FW_WM(cursor_sr, CURSOR_SR))); | |||
| 2322 | ||||
| 2323 | if (cxsr_enabled) | |||
| 2324 | intel_set_memory_cxsr(dev_priv, true1); | |||
| 2325 | } | |||
| 2326 | ||||
| 2327 | #undef FW_WM | |||
| 2328 | ||||
| 2329 | static void i9xx_update_wm(struct intel_crtc *unused_crtc) | |||
| 2330 | { | |||
| 2331 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(unused_crtc->base.dev); | |||
| 2332 | const struct intel_watermark_params *wm_info; | |||
| 2333 | u32 fwater_lo; | |||
| 2334 | u32 fwater_hi; | |||
| 2335 | int cwm, srwm = 1; | |||
| 2336 | int fifo_size; | |||
| 2337 | int planea_wm, planeb_wm; | |||
| 2338 | struct intel_crtc *crtc, *enabled = NULL((void *)0); | |||
| 2339 | ||||
| 2340 | if (IS_I945GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945GM)) | |||
| 2341 | wm_info = &i945_wm_info; | |||
| 2342 | else if (!IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) | |||
| 2343 | wm_info = &i915_wm_info; | |||
| 2344 | else | |||
| 2345 | wm_info = &i830_a_wm_info; | |||
| 2346 | ||||
| 2347 | fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A); | |||
| 2348 | crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); | |||
| 2349 | if (intel_crtc_active(crtc)) { | |||
| 2350 | const struct drm_display_mode *adjusted_mode = | |||
| 2351 | &crtc->config->hw.adjusted_mode; | |||
| 2352 | const struct drm_framebuffer *fb = | |||
| 2353 | crtc->base.primary->state->fb; | |||
| 2354 | int cpp; | |||
| 2355 | ||||
| 2356 | if (IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) | |||
| 2357 | cpp = 4; | |||
| 2358 | else | |||
| 2359 | cpp = fb->format->cpp[0]; | |||
| 2360 | ||||
| 2361 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |||
| 2362 | wm_info, fifo_size, cpp, | |||
| 2363 | pessimal_latency_ns); | |||
| 2364 | enabled = crtc; | |||
| 2365 | } else { | |||
| 2366 | planea_wm = fifo_size - wm_info->guard_size; | |||
| 2367 | if (planea_wm > (long)wm_info->max_wm) | |||
| 2368 | planea_wm = wm_info->max_wm; | |||
| 2369 | } | |||
| 2370 | ||||
| 2371 | if (IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) | |||
| 2372 | wm_info = &i830_bc_wm_info; | |||
| 2373 | ||||
| 2374 | fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); | |||
| 2375 | crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); | |||
| 2376 | if (intel_crtc_active(crtc)) { | |||
| 2377 | const struct drm_display_mode *adjusted_mode = | |||
| 2378 | &crtc->config->hw.adjusted_mode; | |||
| 2379 | const struct drm_framebuffer *fb = | |||
| 2380 | crtc->base.primary->state->fb; | |||
| 2381 | int cpp; | |||
| 2382 | ||||
| 2383 | if (IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) | |||
| 2384 | cpp = 4; | |||
| 2385 | else | |||
| 2386 | cpp = fb->format->cpp[0]; | |||
| 2387 | ||||
| 2388 | planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |||
| 2389 | wm_info, fifo_size, cpp, | |||
| 2390 | pessimal_latency_ns); | |||
| 2391 | if (enabled == NULL((void *)0)) | |||
| 2392 | enabled = crtc; | |||
| 2393 | else | |||
| 2394 | enabled = NULL((void *)0); | |||
| 2395 | } else { | |||
| 2396 | planeb_wm = fifo_size - wm_info->guard_size; | |||
| 2397 | if (planeb_wm > (long)wm_info->max_wm) | |||
| 2398 | planeb_wm = wm_info->max_wm; | |||
| 2399 | } | |||
| 2400 | ||||
| 2401 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO watermarks - A: %d, B: %d\n" , planea_wm, planeb_wm) | |||
| 2402 | "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "FIFO watermarks - A: %d, B: %d\n" , planea_wm, planeb_wm); | |||
| 2403 | ||||
| 2404 | if (IS_I915GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I915GM) && enabled) { | |||
| 2405 | struct drm_i915_gem_object *obj; | |||
| 2406 | ||||
| 2407 | obj = intel_fb_obj(enabled->base.primary->state->fb)((enabled->base.primary->state->fb) ? to_intel_bo((enabled ->base.primary->state->fb)->obj[0]) : ((void *)0) ); | |||
| 2408 | ||||
| 2409 | /* self-refresh seems busted with untiled */ | |||
| 2410 | if (!i915_gem_object_is_tiled(obj)) | |||
| 2411 | enabled = NULL((void *)0); | |||
| 2412 | } | |||
| 2413 | ||||
| 2414 | /* | |||
| 2415 | * Overlay gets an aggressive default since video jitter is bad. | |||
| 2416 | */ | |||
| 2417 | cwm = 2; | |||
| 2418 | ||||
| 2419 | /* Play safe and disable self-refresh before adjusting watermarks. */ | |||
| 2420 | intel_set_memory_cxsr(dev_priv, false0); | |||
| 2421 | ||||
| 2422 | /* Calc sr entries for one plane configs */ | |||
| 2423 | if (HAS_FW_BLC(dev_priv)(((&(dev_priv)->__info)->gen) > 2) && enabled) { | |||
| 2424 | /* self-refresh has much higher latency */ | |||
| 2425 | static const int sr_latency_ns = 6000; | |||
| 2426 | const struct drm_display_mode *adjusted_mode = | |||
| 2427 | &enabled->config->hw.adjusted_mode; | |||
| 2428 | const struct drm_framebuffer *fb = | |||
| 2429 | enabled->base.primary->state->fb; | |||
| 2430 | int clock = adjusted_mode->crtc_clock; | |||
| 2431 | int htotal = adjusted_mode->crtc_htotal; | |||
| 2432 | int hdisplay = enabled->config->pipe_src_w; | |||
| 2433 | int cpp; | |||
| 2434 | int entries; | |||
| 2435 | ||||
| 2436 | if (IS_I915GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I915GM) || IS_I945GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945GM)) | |||
| 2437 | cpp = 4; | |||
| 2438 | else | |||
| 2439 | cpp = fb->format->cpp[0]; | |||
| 2440 | ||||
| 2441 | entries = intel_wm_method2(clock, htotal, hdisplay, cpp, | |||
| 2442 | sr_latency_ns / 100); | |||
| 2443 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size)(((entries) + ((wm_info->cacheline_size) - 1)) / (wm_info-> cacheline_size)); | |||
| 2444 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh entries: %d\n" , entries) | |||
| 2445 | "self-refresh entries: %d\n", entries)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "self-refresh entries: %d\n" , entries); | |||
| 2446 | srwm = wm_info->fifo_size - entries; | |||
| 2447 | if (srwm < 0) | |||
| 2448 | srwm = 1; | |||
| 2449 | ||||
| 2450 | if (IS_I945G(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945G) || IS_I945GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I945GM)) | |||
| 2451 | I915_WRITE(FW_BLC_SELF,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) })), ((1 << 16) | (srwm & 0xff)) ) | |||
| 2452 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) })), ((1 << 16) | (srwm & 0xff)) ); | |||
| 2453 | else | |||
| 2454 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) })), (srwm & 0x3f)); | |||
| 2455 | } | |||
| 2456 | ||||
| 2457 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n" , planea_wm, planeb_wm, cwm, srwm) | |||
| 2458 | "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n" , planea_wm, planeb_wm, cwm, srwm) | |||
| 2459 | planea_wm, planeb_wm, cwm, srwm)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n" , planea_wm, planeb_wm, cwm, srwm); | |||
| 2460 | ||||
| 2461 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); | |||
| 2462 | fwater_hi = (cwm & 0x1f); | |||
| 2463 | ||||
| 2464 | /* Set request length to 8 cachelines per fetch */ | |||
| 2465 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | |||
| 2466 | fwater_hi = fwater_hi | (1 << 8); | |||
| 2467 | ||||
| 2468 | I915_WRITE(FW_BLC, fwater_lo)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20d8) })), (fwater_lo)); | |||
| 2469 | I915_WRITE(FW_BLC2, fwater_hi)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20dc) })), (fwater_hi)); | |||
| 2470 | ||||
| 2471 | if (enabled) | |||
| 2472 | intel_set_memory_cxsr(dev_priv, true1); | |||
| 2473 | } | |||
| 2474 | ||||
| 2475 | static void i845_update_wm(struct intel_crtc *unused_crtc) | |||
| 2476 | { | |||
| 2477 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(unused_crtc->base.dev); | |||
| 2478 | struct intel_crtc *crtc; | |||
| 2479 | const struct drm_display_mode *adjusted_mode; | |||
| 2480 | u32 fwater_lo; | |||
| 2481 | int planea_wm; | |||
| 2482 | ||||
| 2483 | crtc = single_enabled_crtc(dev_priv); | |||
| 2484 | if (crtc == NULL((void *)0)) | |||
| 2485 | return; | |||
| 2486 | ||||
| 2487 | adjusted_mode = &crtc->config->hw.adjusted_mode; | |||
| 2488 | planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, | |||
| 2489 | &i845_wm_info, | |||
| 2490 | dev_priv->display.get_fifo_size(dev_priv, PLANE_A), | |||
| 2491 | 4, pessimal_latency_ns); | |||
| 2492 | fwater_lo = I915_READ(FW_BLC)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20d8) }))) & ~0xfff; | |||
| 2493 | fwater_lo |= (3<<8) | planea_wm; | |||
| 2494 | ||||
| 2495 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: %d\n" , planea_wm) | |||
| 2496 | "Setting FIFO watermarks - A: %d\n", planea_wm)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting FIFO watermarks - A: %d\n" , planea_wm); | |||
| 2497 | ||||
| 2498 | I915_WRITE(FW_BLC, fwater_lo)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20d8) })), (fwater_lo)); | |||
| 2499 | } | |||
| 2500 | ||||
| 2501 | /* latency must be in 0.1us units. */ | |||
| 2502 | static unsigned int ilk_wm_method1(unsigned int pixel_rate, | |||
| 2503 | unsigned int cpp, | |||
| 2504 | unsigned int latency) | |||
| 2505 | { | |||
| 2506 | unsigned int ret; | |||
| 2507 | ||||
| 2508 | ret = intel_wm_method1(pixel_rate, cpp, latency); | |||
| 2509 | ret = DIV_ROUND_UP(ret, 64)(((ret) + ((64) - 1)) / (64)) + 2; | |||
| 2510 | ||||
| 2511 | return ret; | |||
| 2512 | } | |||
| 2513 | ||||
| 2514 | /* latency must be in 0.1us units. */ | |||
| 2515 | static unsigned int ilk_wm_method2(unsigned int pixel_rate, | |||
| 2516 | unsigned int htotal, | |||
| 2517 | unsigned int width, | |||
| 2518 | unsigned int cpp, | |||
| 2519 | unsigned int latency) | |||
| 2520 | { | |||
| 2521 | unsigned int ret; | |||
| 2522 | ||||
| 2523 | ret = intel_wm_method2(pixel_rate, htotal, | |||
| 2524 | width, cpp, latency); | |||
| 2525 | ret = DIV_ROUND_UP(ret, 64)(((ret) + ((64) - 1)) / (64)) + 2; | |||
| 2526 | ||||
| 2527 | return ret; | |||
| 2528 | } | |||
| 2529 | ||||
| 2530 | static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) | |||
| 2531 | { | |||
| 2532 | /* | |||
| 2533 | * Neither of these should be possible since this function shouldn't be | |||
| 2534 | * called if the CRTC is off or the plane is invisible. But let's be | |||
| 2535 | * extra paranoid to avoid a potential divide-by-zero if we screw up | |||
| 2536 | * elsewhere in the driver. | |||
| 2537 | */ | |||
| 2538 | if (WARN_ON(!cpp)({ int __ret = !!((!cpp)); if (__ret) printf("%s", "WARN_ON(" "!cpp" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2539 | return 0; | |||
| 2540 | if (WARN_ON(!horiz_pixels)({ int __ret = !!((!horiz_pixels)); if (__ret) printf("%s", "WARN_ON(" "!horiz_pixels" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 2541 | return 0; | |||
| 2542 | ||||
| 2543 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp)(((pri_val * 64) + ((horiz_pixels * cpp) - 1)) / (horiz_pixels * cpp)) + 2; | |||
| 2544 | } | |||
| 2545 | ||||
| 2546 | struct ilk_wm_maximums { | |||
| 2547 | u16 pri; | |||
| 2548 | u16 spr; | |||
| 2549 | u16 cur; | |||
| 2550 | u16 fbc; | |||
| 2551 | }; | |||
| 2552 | ||||
| 2553 | /* | |||
| 2554 | * For both WM_PIPE and WM_LP. | |||
| 2555 | * mem_value must be in 0.1us units. | |||
| 2556 | */ | |||
| 2557 | static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, | |||
| 2558 | const struct intel_plane_state *plane_state, | |||
| 2559 | u32 mem_value, bool_Bool is_lp) | |||
| 2560 | { | |||
| 2561 | u32 method1, method2; | |||
| 2562 | int cpp; | |||
| 2563 | ||||
| 2564 | if (mem_value == 0) | |||
| 2565 | return U32_MAX0xffffffffU; | |||
| 2566 | ||||
| 2567 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 2568 | return 0; | |||
| 2569 | ||||
| 2570 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 2571 | ||||
| 2572 | method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); | |||
| 2573 | ||||
| 2574 | if (!is_lp) | |||
| 2575 | return method1; | |||
| 2576 | ||||
| 2577 | method2 = ilk_wm_method2(crtc_state->pixel_rate, | |||
| 2578 | crtc_state->hw.adjusted_mode.crtc_htotal, | |||
| 2579 | drm_rect_width(&plane_state->uapi.dst), | |||
| 2580 | cpp, mem_value); | |||
| 2581 | ||||
| 2582 | return min(method1, method2)(((method1)<(method2))?(method1):(method2)); | |||
| 2583 | } | |||
| 2584 | ||||
| 2585 | /* | |||
| 2586 | * For both WM_PIPE and WM_LP. | |||
| 2587 | * mem_value must be in 0.1us units. | |||
| 2588 | */ | |||
| 2589 | static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, | |||
| 2590 | const struct intel_plane_state *plane_state, | |||
| 2591 | u32 mem_value) | |||
| 2592 | { | |||
| 2593 | u32 method1, method2; | |||
| 2594 | int cpp; | |||
| 2595 | ||||
| 2596 | if (mem_value == 0) | |||
| 2597 | return U32_MAX0xffffffffU; | |||
| 2598 | ||||
| 2599 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 2600 | return 0; | |||
| 2601 | ||||
| 2602 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 2603 | ||||
| 2604 | method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); | |||
| 2605 | method2 = ilk_wm_method2(crtc_state->pixel_rate, | |||
| 2606 | crtc_state->hw.adjusted_mode.crtc_htotal, | |||
| 2607 | drm_rect_width(&plane_state->uapi.dst), | |||
| 2608 | cpp, mem_value); | |||
| 2609 | return min(method1, method2)(((method1)<(method2))?(method1):(method2)); | |||
| 2610 | } | |||
| 2611 | ||||
| 2612 | /* | |||
| 2613 | * For both WM_PIPE and WM_LP. | |||
| 2614 | * mem_value must be in 0.1us units. | |||
| 2615 | */ | |||
| 2616 | static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, | |||
| 2617 | const struct intel_plane_state *plane_state, | |||
| 2618 | u32 mem_value) | |||
| 2619 | { | |||
| 2620 | int cpp; | |||
| 2621 | ||||
| 2622 | if (mem_value == 0) | |||
| 2623 | return U32_MAX0xffffffffU; | |||
| 2624 | ||||
| 2625 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 2626 | return 0; | |||
| 2627 | ||||
| 2628 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 2629 | ||||
| 2630 | return ilk_wm_method2(crtc_state->pixel_rate, | |||
| 2631 | crtc_state->hw.adjusted_mode.crtc_htotal, | |||
| 2632 | drm_rect_width(&plane_state->uapi.dst), | |||
| 2633 | cpp, mem_value); | |||
| 2634 | } | |||
| 2635 | ||||
| 2636 | /* Only for WM_LP. */ | |||
| 2637 | static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, | |||
| 2638 | const struct intel_plane_state *plane_state, | |||
| 2639 | u32 pri_val) | |||
| 2640 | { | |||
| 2641 | int cpp; | |||
| 2642 | ||||
| 2643 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 2644 | return 0; | |||
| 2645 | ||||
| 2646 | cpp = plane_state->hw.fb->format->cpp[0]; | |||
| 2647 | ||||
| 2648 | return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst), | |||
| 2649 | cpp); | |||
| 2650 | } | |||
| 2651 | ||||
| 2652 | static unsigned int | |||
| 2653 | ilk_display_fifo_size(const struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 2654 | { | |||
| 2655 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) | |||
| 2656 | return 3072; | |||
| 2657 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7) | |||
| 2658 | return 768; | |||
| 2659 | else | |||
| 2660 | return 512; | |||
| 2661 | } | |||
| 2662 | ||||
| 2663 | static unsigned int | |||
| 2664 | ilk_plane_wm_reg_max(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2665 | int level, bool_Bool is_sprite) | |||
| 2666 | { | |||
| 2667 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) | |||
| 2668 | /* BDW primary/sprite plane watermarks */ | |||
| 2669 | return level == 0 ? 255 : 2047; | |||
| 2670 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7) | |||
| 2671 | /* IVB/HSW primary/sprite plane watermarks */ | |||
| 2672 | return level == 0 ? 127 : 1023; | |||
| 2673 | else if (!is_sprite) | |||
| 2674 | /* ILK/SNB primary plane watermarks */ | |||
| 2675 | return level == 0 ? 127 : 511; | |||
| 2676 | else | |||
| 2677 | /* ILK/SNB sprite plane watermarks */ | |||
| 2678 | return level == 0 ? 63 : 255; | |||
| 2679 | } | |||
| 2680 | ||||
| 2681 | static unsigned int | |||
| 2682 | ilk_cursor_wm_reg_max(const struct drm_i915_privateinteldrm_softc *dev_priv, int level) | |||
| 2683 | { | |||
| 2684 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7) | |||
| 2685 | return level == 0 ? 63 : 255; | |||
| 2686 | else | |||
| 2687 | return level == 0 ? 31 : 63; | |||
| 2688 | } | |||
| 2689 | ||||
| 2690 | static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 2691 | { | |||
| 2692 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) | |||
| 2693 | return 31; | |||
| 2694 | else | |||
| 2695 | return 15; | |||
| 2696 | } | |||
| 2697 | ||||
| 2698 | /* Calculate the maximum primary/sprite plane watermark */ | |||
| 2699 | static unsigned int ilk_plane_wm_max(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2700 | int level, | |||
| 2701 | const struct intel_wm_config *config, | |||
| 2702 | enum intel_ddb_partitioning ddb_partitioning, | |||
| 2703 | bool_Bool is_sprite) | |||
| 2704 | { | |||
| 2705 | unsigned int fifo_size = ilk_display_fifo_size(dev_priv); | |||
| 2706 | ||||
| 2707 | /* if sprites aren't enabled, sprites get nothing */ | |||
| 2708 | if (is_sprite && !config->sprites_enabled) | |||
| 2709 | return 0; | |||
| 2710 | ||||
| 2711 | /* HSW allows LP1+ watermarks even with multiple pipes */ | |||
| 2712 | if (level == 0 || config->num_pipes_active > 1) { | |||
| 2713 | fifo_size /= INTEL_NUM_PIPES(dev_priv)(hweight8((&(dev_priv)->__info)->pipe_mask)); | |||
| 2714 | ||||
| 2715 | /* | |||
| 2716 | * For some reason the non self refresh | |||
| 2717 | * FIFO size is only half of the self | |||
| 2718 | * refresh FIFO size on ILK/SNB. | |||
| 2719 | */ | |||
| 2720 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) <= 6) | |||
| 2721 | fifo_size /= 2; | |||
| 2722 | } | |||
| 2723 | ||||
| 2724 | if (config->sprites_enabled) { | |||
| 2725 | /* level 0 is always calculated with 1:1 split */ | |||
| 2726 | if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { | |||
| 2727 | if (is_sprite) | |||
| 2728 | fifo_size *= 5; | |||
| 2729 | fifo_size /= 6; | |||
| 2730 | } else { | |||
| 2731 | fifo_size /= 2; | |||
| 2732 | } | |||
| 2733 | } | |||
| 2734 | ||||
| 2735 | /* clamp to max that the registers can hold */ | |||
| 2736 | return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite))(((fifo_size)<(ilk_plane_wm_reg_max(dev_priv, level, is_sprite )))?(fifo_size):(ilk_plane_wm_reg_max(dev_priv, level, is_sprite ))); | |||
| 2737 | } | |||
| 2738 | ||||
| 2739 | /* Calculate the maximum cursor plane watermark */ | |||
| 2740 | static unsigned int ilk_cursor_wm_max(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2741 | int level, | |||
| 2742 | const struct intel_wm_config *config) | |||
| 2743 | { | |||
| 2744 | /* HSW LP1+ watermarks w/ multiple pipes */ | |||
| 2745 | if (level > 0 && config->num_pipes_active > 1) | |||
| 2746 | return 64; | |||
| 2747 | ||||
| 2748 | /* otherwise just report max that registers can hold */ | |||
| 2749 | return ilk_cursor_wm_reg_max(dev_priv, level); | |||
| 2750 | } | |||
| 2751 | ||||
| 2752 | static void ilk_compute_wm_maximums(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2753 | int level, | |||
| 2754 | const struct intel_wm_config *config, | |||
| 2755 | enum intel_ddb_partitioning ddb_partitioning, | |||
| 2756 | struct ilk_wm_maximums *max) | |||
| 2757 | { | |||
| 2758 | max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false0); | |||
| 2759 | max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true1); | |||
| 2760 | max->cur = ilk_cursor_wm_max(dev_priv, level, config); | |||
| 2761 | max->fbc = ilk_fbc_wm_reg_max(dev_priv); | |||
| 2762 | } | |||
| 2763 | ||||
| 2764 | static void ilk_compute_wm_reg_maximums(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2765 | int level, | |||
| 2766 | struct ilk_wm_maximums *max) | |||
| 2767 | { | |||
| 2768 | max->pri = ilk_plane_wm_reg_max(dev_priv, level, false0); | |||
| 2769 | max->spr = ilk_plane_wm_reg_max(dev_priv, level, true1); | |||
| 2770 | max->cur = ilk_cursor_wm_reg_max(dev_priv, level); | |||
| 2771 | max->fbc = ilk_fbc_wm_reg_max(dev_priv); | |||
| 2772 | } | |||
| 2773 | ||||
| 2774 | static bool_Bool ilk_validate_wm_level(int level, | |||
| 2775 | const struct ilk_wm_maximums *max, | |||
| 2776 | struct intel_wm_level *result) | |||
| 2777 | { | |||
| 2778 | bool_Bool ret; | |||
| 2779 | ||||
| 2780 | /* already determined to be invalid? */ | |||
| 2781 | if (!result->enable) | |||
| 2782 | return false0; | |||
| 2783 | ||||
| 2784 | result->enable = result->pri_val <= max->pri && | |||
| 2785 | result->spr_val <= max->spr && | |||
| 2786 | result->cur_val <= max->cur; | |||
| 2787 | ||||
| 2788 | ret = result->enable; | |||
| 2789 | ||||
| 2790 | /* | |||
| 2791 | * HACK until we can pre-compute everything, | |||
| 2792 | * and thus fail gracefully if LP0 watermarks | |||
| 2793 | * are exceeded... | |||
| 2794 | */ | |||
| 2795 | if (level == 0 && !result->enable) { | |||
| 2796 | if (result->pri_val > max->pri) | |||
| 2797 | DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",__drm_dbg(DRM_UT_KMS, "Primary WM%d too large %u (max %u)\n", level, result->pri_val, max->pri) | |||
| 2798 | level, result->pri_val, max->pri)__drm_dbg(DRM_UT_KMS, "Primary WM%d too large %u (max %u)\n", level, result->pri_val, max->pri); | |||
| 2799 | if (result->spr_val > max->spr) | |||
| 2800 | DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",__drm_dbg(DRM_UT_KMS, "Sprite WM%d too large %u (max %u)\n", level , result->spr_val, max->spr) | |||
| 2801 | level, result->spr_val, max->spr)__drm_dbg(DRM_UT_KMS, "Sprite WM%d too large %u (max %u)\n", level , result->spr_val, max->spr); | |||
| 2802 | if (result->cur_val > max->cur) | |||
| 2803 | DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",__drm_dbg(DRM_UT_KMS, "Cursor WM%d too large %u (max %u)\n", level , result->cur_val, max->cur) | |||
| 2804 | level, result->cur_val, max->cur)__drm_dbg(DRM_UT_KMS, "Cursor WM%d too large %u (max %u)\n", level , result->cur_val, max->cur); | |||
| 2805 | ||||
| 2806 | result->pri_val = min_t(u32, result->pri_val, max->pri)({ u32 __min_a = (result->pri_val); u32 __min_b = (max-> pri); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 2807 | result->spr_val = min_t(u32, result->spr_val, max->spr)({ u32 __min_a = (result->spr_val); u32 __min_b = (max-> spr); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 2808 | result->cur_val = min_t(u32, result->cur_val, max->cur)({ u32 __min_a = (result->cur_val); u32 __min_b = (max-> cur); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 2809 | result->enable = true1; | |||
| 2810 | } | |||
| 2811 | ||||
| 2812 | return ret; | |||
| 2813 | } | |||
| 2814 | ||||
| 2815 | static void ilk_compute_wm_level(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2816 | const struct intel_crtc *crtc, | |||
| 2817 | int level, | |||
| 2818 | struct intel_crtc_state *crtc_state, | |||
| 2819 | const struct intel_plane_state *pristate, | |||
| 2820 | const struct intel_plane_state *sprstate, | |||
| 2821 | const struct intel_plane_state *curstate, | |||
| 2822 | struct intel_wm_level *result) | |||
| 2823 | { | |||
| 2824 | u16 pri_latency = dev_priv->wm.pri_latency[level]; | |||
| 2825 | u16 spr_latency = dev_priv->wm.spr_latency[level]; | |||
| 2826 | u16 cur_latency = dev_priv->wm.cur_latency[level]; | |||
| 2827 | ||||
| 2828 | /* WM1+ latency values stored in 0.5us units */ | |||
| 2829 | if (level > 0) { | |||
| 2830 | pri_latency *= 5; | |||
| 2831 | spr_latency *= 5; | |||
| 2832 | cur_latency *= 5; | |||
| 2833 | } | |||
| 2834 | ||||
| 2835 | if (pristate) { | |||
| 2836 | result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, | |||
| 2837 | pri_latency, level); | |||
| 2838 | result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); | |||
| 2839 | } | |||
| 2840 | ||||
| 2841 | if (sprstate) | |||
| 2842 | result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); | |||
| 2843 | ||||
| 2844 | if (curstate) | |||
| 2845 | result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); | |||
| 2846 | ||||
| 2847 | result->enable = true1; | |||
| 2848 | } | |||
| 2849 | ||||
| 2850 | static void intel_read_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2851 | u16 wm[8]) | |||
| 2852 | { | |||
| 2853 | struct intel_uncore *uncore = &dev_priv->uncore; | |||
| 2854 | ||||
| 2855 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) { | |||
| 2856 | u32 val; | |||
| 2857 | int ret, i; | |||
| 2858 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 2859 | ||||
| 2860 | /* read the first set of memory latencies[0:3] */ | |||
| 2861 | val = 0; /* data0 to be programmed to 0 for first set */ | |||
| 2862 | ret = sandybridge_pcode_read(dev_priv, | |||
| 2863 | GEN9_PCODE_READ_MEM_LATENCY0x6, | |||
| 2864 | &val, NULL((void *)0)); | |||
| 2865 | ||||
| 2866 | if (ret) { | |||
| 2867 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret) | |||
| 2868 | "SKL Mailbox read error = %d\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); | |||
| 2869 | return; | |||
| 2870 | } | |||
| 2871 | ||||
| 2872 | wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2873 | wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT8) & | |||
| 2874 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2875 | wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT16) & | |||
| 2876 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2877 | wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT24) & | |||
| 2878 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2879 | ||||
| 2880 | /* read the second set of memory latencies[4:7] */ | |||
| 2881 | val = 1; /* data0 to be programmed to 1 for second set */ | |||
| 2882 | ret = sandybridge_pcode_read(dev_priv, | |||
| 2883 | GEN9_PCODE_READ_MEM_LATENCY0x6, | |||
| 2884 | &val, NULL((void *)0)); | |||
| 2885 | if (ret) { | |||
| 2886 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret) | |||
| 2887 | "SKL Mailbox read error = %d\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); | |||
| 2888 | return; | |||
| 2889 | } | |||
| 2890 | ||||
| 2891 | wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2892 | wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT8) & | |||
| 2893 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2894 | wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT16) & | |||
| 2895 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2896 | wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT24) & | |||
| 2897 | GEN9_MEM_LATENCY_LEVEL_MASK0xFF; | |||
| 2898 | ||||
| 2899 | /* | |||
| 2900 | * If a level n (n > 1) has a 0us latency, all levels m (m >= n) | |||
| 2901 | * need to be disabled. We make sure to sanitize the values out | |||
| 2902 | * of the punit to satisfy this requirement. | |||
| 2903 | */ | |||
| 2904 | for (level = 1; level <= max_level; level++) { | |||
| 2905 | if (wm[level] == 0) { | |||
| 2906 | for (i = level + 1; i <= max_level; i++) | |||
| 2907 | wm[i] = 0; | |||
| 2908 | break; | |||
| 2909 | } | |||
| 2910 | } | |||
| 2911 | ||||
| 2912 | /* | |||
| 2913 | * WaWmMemoryReadLatency:skl+,glk | |||
| 2914 | * | |||
| 2915 | * punit doesn't take into account the read latency so we need | |||
| 2916 | * to add 2us to the various latency levels we retrieve from the | |||
| 2917 | * punit when level 0 response data us 0us. | |||
| 2918 | */ | |||
| 2919 | if (wm[0] == 0) { | |||
| 2920 | wm[0] += 2; | |||
| 2921 | for (level = 1; level <= max_level; level++) { | |||
| 2922 | if (wm[level] == 0) | |||
| 2923 | break; | |||
| 2924 | wm[level] += 2; | |||
| 2925 | } | |||
| 2926 | } | |||
| 2927 | ||||
| 2928 | /* | |||
| 2929 | * WA Level-0 adjustment for 16GB DIMMs: SKL+ | |||
| 2930 | * If we could not get dimm info enable this WA to prevent from | |||
| 2931 | * any underrun. If not able to get Dimm info assume 16GB dimm | |||
| 2932 | * to avoid any underrun. | |||
| 2933 | */ | |||
| 2934 | if (dev_priv->dram_info.is_16gb_dimm) | |||
| 2935 | wm[0] += 1; | |||
| 2936 | ||||
| 2937 | } else if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) { | |||
| 2938 | u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD((const i915_reg_t){ .reg = (0x140000 + 0x5d10) })); | |||
| 2939 | ||||
| 2940 | wm[0] = (sskpd >> 56) & 0xFF; | |||
| 2941 | if (wm[0] == 0) | |||
| 2942 | wm[0] = sskpd & 0xF; | |||
| 2943 | wm[1] = (sskpd >> 4) & 0xFF; | |||
| 2944 | wm[2] = (sskpd >> 12) & 0xFF; | |||
| 2945 | wm[3] = (sskpd >> 20) & 0x1FF; | |||
| 2946 | wm[4] = (sskpd >> 32) & 0x1FF; | |||
| 2947 | } else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 6) { | |||
| 2948 | u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD((const i915_reg_t){ .reg = (0x140000 + 0x5d10) })); | |||
| 2949 | ||||
| 2950 | wm[0] = (sskpd >> SSKPD_WM0_SHIFT0) & SSKPD_WM_MASK0x3f; | |||
| 2951 | wm[1] = (sskpd >> SSKPD_WM1_SHIFT8) & SSKPD_WM_MASK0x3f; | |||
| 2952 | wm[2] = (sskpd >> SSKPD_WM2_SHIFT16) & SSKPD_WM_MASK0x3f; | |||
| 2953 | wm[3] = (sskpd >> SSKPD_WM3_SHIFT24) & SSKPD_WM_MASK0x3f; | |||
| 2954 | } else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 5) { | |||
| 2955 | u32 mltr = intel_uncore_read(uncore, MLTR_ILK((const i915_reg_t){ .reg = (0x11222) })); | |||
| 2956 | ||||
| 2957 | /* ILK primary LP0 latency is 700 ns */ | |||
| 2958 | wm[0] = 7; | |||
| 2959 | wm[1] = (mltr >> MLTR_WM1_SHIFT0) & ILK_SRLT_MASK0x3f; | |||
| 2960 | wm[2] = (mltr >> MLTR_WM2_SHIFT8) & ILK_SRLT_MASK0x3f; | |||
| 2961 | } else { | |||
| 2962 | MISSING_CASE(INTEL_DEVID(dev_priv))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(dev_priv)->__runtime)->device_id)", (long)(( (&(dev_priv)->__runtime)->device_id))); __builtin_expect (!!(__ret), 0); }); | |||
| 2963 | } | |||
| 2964 | } | |||
| 2965 | ||||
| 2966 | static void intel_fixup_spr_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2967 | u16 wm[5]) | |||
| 2968 | { | |||
| 2969 | /* ILK sprite LP0 latency is 1300 ns */ | |||
| 2970 | if (IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5))) | |||
| 2971 | wm[0] = 13; | |||
| 2972 | } | |||
| 2973 | ||||
| 2974 | static void intel_fixup_cur_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2975 | u16 wm[5]) | |||
| 2976 | { | |||
| 2977 | /* ILK cursor LP0 latency is 1300 ns */ | |||
| 2978 | if (IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5))) | |||
| 2979 | wm[0] = 13; | |||
| 2980 | } | |||
| 2981 | ||||
| 2982 | int ilk_wm_max_level(const struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 2983 | { | |||
| 2984 | /* how many WM levels are we expecting */ | |||
| 2985 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) | |||
| 2986 | return 7; | |||
| 2987 | else if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) | |||
| 2988 | return 4; | |||
| 2989 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 6) | |||
| 2990 | return 3; | |||
| 2991 | else | |||
| 2992 | return 2; | |||
| 2993 | } | |||
| 2994 | ||||
| 2995 | static void intel_print_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 2996 | const char *name, | |||
| 2997 | const u16 wm[]) | |||
| 2998 | { | |||
| 2999 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 3000 | ||||
| 3001 | for (level = 0; level <= max_level; level++) { | |||
| 3002 | unsigned int latency = wm[level]; | |||
| 3003 | ||||
| 3004 | if (latency == 0) { | |||
| 3005 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency not provided\n" , name, level) | |||
| 3006 | "%s WM%d latency not provided\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency not provided\n" , name, level) | |||
| 3007 | name, level)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency not provided\n" , name, level); | |||
| 3008 | continue; | |||
| 3009 | } | |||
| 3010 | ||||
| 3011 | /* | |||
| 3012 | * - latencies are in us on gen9. | |||
| 3013 | * - before then, WM1+ latency values are in 0.5us units | |||
| 3014 | */ | |||
| 3015 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) | |||
| 3016 | latency *= 10; | |||
| 3017 | else if (level > 0) | |||
| 3018 | latency *= 5; | |||
| 3019 | ||||
| 3020 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency %u (%u.%u usec)\n" , name, level, wm[level], latency / 10, latency % 10) | |||
| 3021 | "%s WM%d latency %u (%u.%u usec)\n", name, level,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency %u (%u.%u usec)\n" , name, level, wm[level], latency / 10, latency % 10) | |||
| 3022 | wm[level], latency / 10, latency % 10)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s WM%d latency %u (%u.%u usec)\n" , name, level, wm[level], latency / 10, latency % 10); | |||
| 3023 | } | |||
| 3024 | } | |||
| 3025 | ||||
| 3026 | static bool_Bool ilk_increase_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3027 | u16 wm[5], u16 min) | |||
| 3028 | { | |||
| 3029 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 3030 | ||||
| 3031 | if (wm[0] >= min) | |||
| 3032 | return false0; | |||
| 3033 | ||||
| 3034 | wm[0] = max(wm[0], min)(((wm[0])>(min))?(wm[0]):(min)); | |||
| 3035 | for (level = 1; level <= max_level; level++) | |||
| 3036 | wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5))({ u16 __max_a = (wm[level]); u16 __max_b = ((((min) + ((5) - 1)) / (5))); __max_a > __max_b ? __max_a : __max_b; }); | |||
| 3037 | ||||
| 3038 | return true1; | |||
| 3039 | } | |||
| 3040 | ||||
| 3041 | static void snb_wm_latency_quirk(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3042 | { | |||
| 3043 | bool_Bool changed; | |||
| 3044 | ||||
| 3045 | /* | |||
| 3046 | * The BIOS provided WM memory latency values are often | |||
| 3047 | * inadequate for high resolution displays. Adjust them. | |||
| 3048 | */ | |||
| 3049 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | |||
| 3050 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | |||
| 3051 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | |||
| 3052 | ||||
| 3053 | if (!changed) | |||
| 3054 | return; | |||
| 3055 | ||||
| 3056 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "WM latency values increased to avoid potential underruns\n" ) | |||
| 3057 | "WM latency values increased to avoid potential underruns\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "WM latency values increased to avoid potential underruns\n" ); | |||
| 3058 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); | |||
| 3059 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |||
| 3060 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |||
| 3061 | } | |||
| 3062 | ||||
| 3063 | static void snb_wm_lp3_irq_quirk(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3064 | { | |||
| 3065 | /* | |||
| 3066 | * On some SNB machines (Thinkpad X220 Tablet at least) | |||
| 3067 | * LP3 usage can cause vblank interrupts to be lost. | |||
| 3068 | * The DEIIR bit will go high but it looks like the CPU | |||
| 3069 | * never gets interrupted. | |||
| 3070 | * | |||
| 3071 | * It's not clear whether other interrupt source could | |||
| 3072 | * be affected or if this is somehow limited to vblank | |||
| 3073 | * interrupts only. To play it safe we disable LP3 | |||
| 3074 | * watermarks entirely. | |||
| 3075 | */ | |||
| 3076 | if (dev_priv->wm.pri_latency[3] == 0 && | |||
| 3077 | dev_priv->wm.spr_latency[3] == 0 && | |||
| 3078 | dev_priv->wm.cur_latency[3] == 0) | |||
| 3079 | return; | |||
| 3080 | ||||
| 3081 | dev_priv->wm.pri_latency[3] = 0; | |||
| 3082 | dev_priv->wm.spr_latency[3] = 0; | |||
| 3083 | dev_priv->wm.cur_latency[3] = 0; | |||
| 3084 | ||||
| 3085 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "LP3 watermarks disabled due to potential for lost interrupts\n" ) | |||
| 3086 | "LP3 watermarks disabled due to potential for lost interrupts\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "LP3 watermarks disabled due to potential for lost interrupts\n" ); | |||
| 3087 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); | |||
| 3088 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |||
| 3089 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |||
| 3090 | } | |||
| 3091 | ||||
| 3092 | static void ilk_setup_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3093 | { | |||
| 3094 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); | |||
| 3095 | ||||
| 3096 | memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,__builtin_memcpy((dev_priv->wm.spr_latency), (dev_priv-> wm.pri_latency), (sizeof(dev_priv->wm.pri_latency))) | |||
| 3097 | sizeof(dev_priv->wm.pri_latency))__builtin_memcpy((dev_priv->wm.spr_latency), (dev_priv-> wm.pri_latency), (sizeof(dev_priv->wm.pri_latency))); | |||
| 3098 | memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,__builtin_memcpy((dev_priv->wm.cur_latency), (dev_priv-> wm.pri_latency), (sizeof(dev_priv->wm.pri_latency))) | |||
| 3099 | sizeof(dev_priv->wm.pri_latency))__builtin_memcpy((dev_priv->wm.cur_latency), (dev_priv-> wm.pri_latency), (sizeof(dev_priv->wm.pri_latency))); | |||
| 3100 | ||||
| 3101 | intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); | |||
| 3102 | intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); | |||
| 3103 | ||||
| 3104 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); | |||
| 3105 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | |||
| 3106 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | |||
| 3107 | ||||
| 3108 | if (IS_GEN(dev_priv, 6)(0 + (&(dev_priv)->__info)->gen == (6))) { | |||
| 3109 | snb_wm_latency_quirk(dev_priv); | |||
| 3110 | snb_wm_lp3_irq_quirk(dev_priv); | |||
| 3111 | } | |||
| 3112 | } | |||
| 3113 | ||||
| 3114 | static void skl_setup_wm_latency(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3115 | { | |||
| 3116 | intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); | |||
| 3117 | intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); | |||
| 3118 | } | |||
| 3119 | ||||
| 3120 | static bool_Bool ilk_validate_pipe_wm(const struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3121 | struct intel_pipe_wm *pipe_wm) | |||
| 3122 | { | |||
| 3123 | /* LP0 watermark maximums depend on this pipe alone */ | |||
| 3124 | const struct intel_wm_config config = { | |||
| 3125 | .num_pipes_active = 1, | |||
| 3126 | .sprites_enabled = pipe_wm->sprites_enabled, | |||
| 3127 | .sprites_scaled = pipe_wm->sprites_scaled, | |||
| 3128 | }; | |||
| 3129 | struct ilk_wm_maximums max; | |||
| 3130 | ||||
| 3131 | /* LP0 watermarks always use 1/2 DDB partitioning */ | |||
| 3132 | ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); | |||
| 3133 | ||||
| 3134 | /* At least LP0 must be valid */ | |||
| 3135 | if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { | |||
| 3136 | drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "LP0 watermark invalid\n" ); | |||
| 3137 | return false0; | |||
| 3138 | } | |||
| 3139 | ||||
| 3140 | return true1; | |||
| 3141 | } | |||
| 3142 | ||||
| 3143 | /* Compute new watermarks for the pipe */ | |||
| 3144 | static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) | |||
| 3145 | { | |||
| 3146 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 3147 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 3148 | struct intel_pipe_wm *pipe_wm; | |||
| 3149 | struct intel_plane *plane; | |||
| 3150 | const struct intel_plane_state *plane_state; | |||
| 3151 | const struct intel_plane_state *pristate = NULL((void *)0); | |||
| 3152 | const struct intel_plane_state *sprstate = NULL((void *)0); | |||
| 3153 | const struct intel_plane_state *curstate = NULL((void *)0); | |||
| 3154 | int level, max_level = ilk_wm_max_level(dev_priv), usable_level; | |||
| 3155 | struct ilk_wm_maximums max; | |||
| 3156 | ||||
| 3157 | pipe_wm = &crtc_state->wm.ilk.optimal; | |||
| 3158 | ||||
| 3159 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else { | |||
| 3160 | if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) | |||
| 3161 | pristate = plane_state; | |||
| 3162 | else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) | |||
| 3163 | sprstate = plane_state; | |||
| 3164 | else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) | |||
| 3165 | curstate = plane_state; | |||
| 3166 | } | |||
| 3167 | ||||
| 3168 | pipe_wm->pipe_enabled = crtc_state->hw.active; | |||
| 3169 | if (sprstate) { | |||
| 3170 | pipe_wm->sprites_enabled = sprstate->uapi.visible; | |||
| 3171 | pipe_wm->sprites_scaled = sprstate->uapi.visible && | |||
| 3172 | (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 || | |||
| 3173 | drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16); | |||
| 3174 | } | |||
| 3175 | ||||
| 3176 | usable_level = max_level; | |||
| 3177 | ||||
| 3178 | /* ILK/SNB: LP2+ watermarks only w/o sprites */ | |||
| 3179 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) <= 6 && pipe_wm->sprites_enabled) | |||
| 3180 | usable_level = 1; | |||
| 3181 | ||||
| 3182 | /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ | |||
| 3183 | if (pipe_wm->sprites_scaled) | |||
| 3184 | usable_level = 0; | |||
| 3185 | ||||
| 3186 | memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm))__builtin_memset((&pipe_wm->wm), (0), (sizeof(pipe_wm-> wm))); | |||
| 3187 | ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, | |||
| 3188 | pristate, sprstate, curstate, &pipe_wm->wm[0]); | |||
| 3189 | ||||
| 3190 | if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) | |||
| 3191 | return -EINVAL22; | |||
| 3192 | ||||
| 3193 | ilk_compute_wm_reg_maximums(dev_priv, 1, &max); | |||
| 3194 | ||||
| 3195 | for (level = 1; level <= usable_level; level++) { | |||
| 3196 | struct intel_wm_level *wm = &pipe_wm->wm[level]; | |||
| 3197 | ||||
| 3198 | ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, | |||
| 3199 | pristate, sprstate, curstate, wm); | |||
| 3200 | ||||
| 3201 | /* | |||
| 3202 | * Disable any watermark level that exceeds the | |||
| 3203 | * register maximums since such watermarks are | |||
| 3204 | * always invalid. | |||
| 3205 | */ | |||
| 3206 | if (!ilk_validate_wm_level(level, &max, wm)) { | |||
| 3207 | memset(wm, 0, sizeof(*wm))__builtin_memset((wm), (0), (sizeof(*wm))); | |||
| 3208 | break; | |||
| 3209 | } | |||
| 3210 | } | |||
| 3211 | ||||
| 3212 | return 0; | |||
| 3213 | } | |||
| 3214 | ||||
| 3215 | /* | |||
| 3216 | * Build a set of 'intermediate' watermark values that satisfy both the old | |||
| 3217 | * state and the new state. These can be programmed to the hardware | |||
| 3218 | * immediately. | |||
| 3219 | */ | |||
| 3220 | static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate) | |||
| 3221 | { | |||
| 3222 | struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (newstate->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 3223 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(intel_crtc->base.dev); | |||
| 3224 | struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; | |||
| 3225 | struct intel_atomic_state *intel_state = | |||
| 3226 | to_intel_atomic_state(newstate->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (newstate->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 3227 | const struct intel_crtc_state *oldstate = | |||
| 3228 | intel_atomic_get_old_crtc_state(intel_state, intel_crtc); | |||
| 3229 | const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal; | |||
| 3230 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 3231 | ||||
| 3232 | /* | |||
| 3233 | * Start with the final, target watermarks, then combine with the | |||
| 3234 | * currently active watermarks to get values that are safe both before | |||
| 3235 | * and after the vblank. | |||
| 3236 | */ | |||
| 3237 | *a = newstate->wm.ilk.optimal; | |||
| 3238 | if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) || | |||
| 3239 | intel_state->skip_intermediate_wm) | |||
| 3240 | return 0; | |||
| 3241 | ||||
| 3242 | a->pipe_enabled |= b->pipe_enabled; | |||
| 3243 | a->sprites_enabled |= b->sprites_enabled; | |||
| 3244 | a->sprites_scaled |= b->sprites_scaled; | |||
| 3245 | ||||
| 3246 | for (level = 0; level <= max_level; level++) { | |||
| 3247 | struct intel_wm_level *a_wm = &a->wm[level]; | |||
| 3248 | const struct intel_wm_level *b_wm = &b->wm[level]; | |||
| 3249 | ||||
| 3250 | a_wm->enable &= b_wm->enable; | |||
| 3251 | a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val)(((a_wm->pri_val)>(b_wm->pri_val))?(a_wm->pri_val ):(b_wm->pri_val)); | |||
| 3252 | a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val)(((a_wm->spr_val)>(b_wm->spr_val))?(a_wm->spr_val ):(b_wm->spr_val)); | |||
| 3253 | a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val)(((a_wm->cur_val)>(b_wm->cur_val))?(a_wm->cur_val ):(b_wm->cur_val)); | |||
| 3254 | a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val)(((a_wm->fbc_val)>(b_wm->fbc_val))?(a_wm->fbc_val ):(b_wm->fbc_val)); | |||
| 3255 | } | |||
| 3256 | ||||
| 3257 | /* | |||
| 3258 | * We need to make sure that these merged watermark values are | |||
| 3259 | * actually a valid configuration themselves. If they're not, | |||
| 3260 | * there's no safe way to transition from the old state to | |||
| 3261 | * the new state, so we need to fail the atomic transaction. | |||
| 3262 | */ | |||
| 3263 | if (!ilk_validate_pipe_wm(dev_priv, a)) | |||
| 3264 | return -EINVAL22; | |||
| 3265 | ||||
| 3266 | /* | |||
| 3267 | * If our intermediate WM are identical to the final WM, then we can | |||
| 3268 | * omit the post-vblank programming; only update if it's different. | |||
| 3269 | */ | |||
| 3270 | if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a))__builtin_memcmp((a), (&newstate->wm.ilk.optimal), (sizeof (*a))) != 0) | |||
| 3271 | newstate->wm.need_postvbl_update = true1; | |||
| 3272 | ||||
| 3273 | return 0; | |||
| 3274 | } | |||
| 3275 | ||||
| 3276 | /* | |||
| 3277 | * Merge the watermarks from all active pipes for a specific level. | |||
| 3278 | */ | |||
| 3279 | static void ilk_merge_wm_level(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3280 | int level, | |||
| 3281 | struct intel_wm_level *ret_wm) | |||
| 3282 | { | |||
| 3283 | const struct intel_crtc *intel_crtc; | |||
| 3284 | ||||
| 3285 | ret_wm->enable = true1; | |||
| 3286 | ||||
| 3287 | for_each_intel_crtc(&dev_priv->drm, intel_crtc)for (intel_crtc = ({ const __typeof( ((__typeof(*intel_crtc) * )0)->base.head ) *__mptr = ((&(&dev_priv->drm)-> mode_config.crtc_list)->next); (__typeof(*intel_crtc) *)( ( char *)__mptr - __builtin_offsetof(__typeof(*intel_crtc), base .head) );}); &intel_crtc->base.head != (&(&dev_priv ->drm)->mode_config.crtc_list); intel_crtc = ({ const __typeof ( ((__typeof(*intel_crtc) *)0)->base.head ) *__mptr = (intel_crtc ->base.head.next); (__typeof(*intel_crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*intel_crtc), base.head) );})) { | |||
| 3288 | const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; | |||
| 3289 | const struct intel_wm_level *wm = &active->wm[level]; | |||
| 3290 | ||||
| 3291 | if (!active->pipe_enabled) | |||
| 3292 | continue; | |||
| 3293 | ||||
| 3294 | /* | |||
| 3295 | * The watermark values may have been used in the past, | |||
| 3296 | * so we must maintain them in the registers for some | |||
| 3297 | * time even if the level is now disabled. | |||
| 3298 | */ | |||
| 3299 | if (!wm->enable) | |||
| 3300 | ret_wm->enable = false0; | |||
| 3301 | ||||
| 3302 | ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val)(((ret_wm->pri_val)>(wm->pri_val))?(ret_wm->pri_val ):(wm->pri_val)); | |||
| 3303 | ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val)(((ret_wm->spr_val)>(wm->spr_val))?(ret_wm->spr_val ):(wm->spr_val)); | |||
| 3304 | ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val)(((ret_wm->cur_val)>(wm->cur_val))?(ret_wm->cur_val ):(wm->cur_val)); | |||
| 3305 | ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val)(((ret_wm->fbc_val)>(wm->fbc_val))?(ret_wm->fbc_val ):(wm->fbc_val)); | |||
| 3306 | } | |||
| 3307 | } | |||
| 3308 | ||||
| 3309 | /* | |||
| 3310 | * Merge all low power watermarks for all active pipes. | |||
| 3311 | */ | |||
| 3312 | static void ilk_wm_merge(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3313 | const struct intel_wm_config *config, | |||
| 3314 | const struct ilk_wm_maximums *max, | |||
| 3315 | struct intel_pipe_wm *merged) | |||
| 3316 | { | |||
| 3317 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 3318 | int last_enabled_level = max_level; | |||
| 3319 | ||||
| 3320 | /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ | |||
| 3321 | if ((INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) <= 6 || IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)) && | |||
| 3322 | config->num_pipes_active > 1) | |||
| 3323 | last_enabled_level = 0; | |||
| 3324 | ||||
| 3325 | /* ILK: FBC WM must be disabled always */ | |||
| 3326 | merged->fbc_wm_enabled = INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 6; | |||
| 3327 | ||||
| 3328 | /* merge each WM1+ level */ | |||
| 3329 | for (level = 1; level <= max_level; level++) { | |||
| 3330 | struct intel_wm_level *wm = &merged->wm[level]; | |||
| 3331 | ||||
| 3332 | ilk_merge_wm_level(dev_priv, level, wm); | |||
| 3333 | ||||
| 3334 | if (level > last_enabled_level) | |||
| 3335 | wm->enable = false0; | |||
| 3336 | else if (!ilk_validate_wm_level(level, max, wm)) | |||
| 3337 | /* make sure all following levels get disabled */ | |||
| 3338 | last_enabled_level = level - 1; | |||
| 3339 | ||||
| 3340 | /* | |||
| 3341 | * The spec says it is preferred to disable | |||
| 3342 | * FBC WMs instead of disabling a WM level. | |||
| 3343 | */ | |||
| 3344 | if (wm->fbc_val > max->fbc) { | |||
| 3345 | if (wm->enable) | |||
| 3346 | merged->fbc_wm_enabled = false0; | |||
| 3347 | wm->fbc_val = 0; | |||
| 3348 | } | |||
| 3349 | } | |||
| 3350 | ||||
| 3351 | /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ | |||
| 3352 | /* | |||
| 3353 | * FIXME this is racy. FBC might get enabled later. | |||
| 3354 | * What we should check here is whether FBC can be | |||
| 3355 | * enabled sometime later. | |||
| 3356 | */ | |||
| 3357 | if (IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5)) && !merged->fbc_wm_enabled && | |||
| 3358 | intel_fbc_is_active(dev_priv)) { | |||
| 3359 | for (level = 2; level <= max_level; level++) { | |||
| 3360 | struct intel_wm_level *wm = &merged->wm[level]; | |||
| 3361 | ||||
| 3362 | wm->enable = false0; | |||
| 3363 | } | |||
| 3364 | } | |||
| 3365 | } | |||
| 3366 | ||||
| 3367 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | |||
| 3368 | { | |||
| 3369 | /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ | |||
| 3370 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | |||
| 3371 | } | |||
| 3372 | ||||
| 3373 | /* The value we need to program into the WM_LPx latency field */ | |||
| 3374 | static unsigned int ilk_wm_lp_latency(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3375 | int level) | |||
| 3376 | { | |||
| 3377 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) | |||
| 3378 | return 2 * level; | |||
| 3379 | else | |||
| 3380 | return dev_priv->wm.pri_latency[level]; | |||
| 3381 | } | |||
| 3382 | ||||
| 3383 | static void ilk_compute_wm_results(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3384 | const struct intel_pipe_wm *merged, | |||
| 3385 | enum intel_ddb_partitioning partitioning, | |||
| 3386 | struct ilk_wm_values *results) | |||
| 3387 | { | |||
| 3388 | struct intel_crtc *intel_crtc; | |||
| 3389 | int level, wm_lp; | |||
| 3390 | ||||
| 3391 | results->enable_fbc_wm = merged->fbc_wm_enabled; | |||
| 3392 | results->partitioning = partitioning; | |||
| 3393 | ||||
| 3394 | /* LP1+ register values */ | |||
| 3395 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | |||
| 3396 | const struct intel_wm_level *r; | |||
| 3397 | ||||
| 3398 | level = ilk_wm_lp_to_level(wm_lp, merged); | |||
| 3399 | ||||
| 3400 | r = &merged->wm[level]; | |||
| 3401 | ||||
| 3402 | /* | |||
| 3403 | * Maintain the watermark values even if the level is | |||
| 3404 | * disabled. Doing otherwise could cause underruns. | |||
| 3405 | */ | |||
| 3406 | results->wm_lp[wm_lp - 1] = | |||
| 3407 | (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT24) | | |||
| 3408 | (r->pri_val << WM1_LP_SR_SHIFT8) | | |||
| 3409 | r->cur_val; | |||
| 3410 | ||||
| 3411 | if (r->enable) | |||
| 3412 | results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN(1 << 31); | |||
| 3413 | ||||
| 3414 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) | |||
| 3415 | results->wm_lp[wm_lp - 1] |= | |||
| 3416 | r->fbc_val << WM1_LP_FBC_SHIFT_BDW19; | |||
| 3417 | else | |||
| 3418 | results->wm_lp[wm_lp - 1] |= | |||
| 3419 | r->fbc_val << WM1_LP_FBC_SHIFT20; | |||
| 3420 | ||||
| 3421 | /* | |||
| 3422 | * Always set WM1S_LP_EN when spr_val != 0, even if the | |||
| 3423 | * level is disabled. Doing otherwise could cause underruns. | |||
| 3424 | */ | |||
| 3425 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) <= 6 && r->spr_val) { | |||
| 3426 | drm_WARN_ON(&dev_priv->drm, wm_lp != 1)({ int __ret = !!((wm_lp != 1)); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "wm_lp != 1" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 3427 | results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN(1 << 31) | r->spr_val; | |||
| 3428 | } else | |||
| 3429 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | |||
| 3430 | } | |||
| 3431 | ||||
| 3432 | /* LP0 register values */ | |||
| 3433 | for_each_intel_crtc(&dev_priv->drm, intel_crtc)for (intel_crtc = ({ const __typeof( ((__typeof(*intel_crtc) * )0)->base.head ) *__mptr = ((&(&dev_priv->drm)-> mode_config.crtc_list)->next); (__typeof(*intel_crtc) *)( ( char *)__mptr - __builtin_offsetof(__typeof(*intel_crtc), base .head) );}); &intel_crtc->base.head != (&(&dev_priv ->drm)->mode_config.crtc_list); intel_crtc = ({ const __typeof ( ((__typeof(*intel_crtc) *)0)->base.head ) *__mptr = (intel_crtc ->base.head.next); (__typeof(*intel_crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*intel_crtc), base.head) );})) { | |||
| 3434 | enum pipe pipe = intel_crtc->pipe; | |||
| 3435 | const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk; | |||
| 3436 | const struct intel_wm_level *r = &pipe_wm->wm[0]; | |||
| 3437 | ||||
| 3438 | if (drm_WARN_ON(&dev_priv->drm, !r->enable)({ int __ret = !!((!r->enable)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "!r->enable" ")"); __builtin_expect(!!(__ret ), 0); })) | |||
| 3439 | continue; | |||
| 3440 | ||||
| 3441 | results->wm_pipe[pipe] = | |||
| 3442 | (r->pri_val << WM0_PIPE_PLANE_SHIFT16) | | |||
| 3443 | (r->spr_val << WM0_PIPE_SPRITE_SHIFT8) | | |||
| 3444 | r->cur_val; | |||
| 3445 | } | |||
| 3446 | } | |||
| 3447 | ||||
| 3448 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in | |||
| 3449 | * case both are at the same level. Prefer r1 in case they're the same. */ | |||
| 3450 | static struct intel_pipe_wm * | |||
| 3451 | ilk_find_best_result(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3452 | struct intel_pipe_wm *r1, | |||
| 3453 | struct intel_pipe_wm *r2) | |||
| 3454 | { | |||
| 3455 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 3456 | int level1 = 0, level2 = 0; | |||
| 3457 | ||||
| 3458 | for (level = 1; level <= max_level; level++) { | |||
| 3459 | if (r1->wm[level].enable) | |||
| 3460 | level1 = level; | |||
| 3461 | if (r2->wm[level].enable) | |||
| 3462 | level2 = level; | |||
| 3463 | } | |||
| 3464 | ||||
| 3465 | if (level1 == level2) { | |||
| 3466 | if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) | |||
| 3467 | return r2; | |||
| 3468 | else | |||
| 3469 | return r1; | |||
| 3470 | } else if (level1 > level2) { | |||
| 3471 | return r1; | |||
| 3472 | } else { | |||
| 3473 | return r2; | |||
| 3474 | } | |||
| 3475 | } | |||
| 3476 | ||||
| 3477 | /* dirty bits used to track which watermarks need changes */ | |||
| 3478 | #define WM_DIRTY_PIPE(pipe)(1 << (pipe)) (1 << (pipe)) | |||
| 3479 | #define WM_DIRTY_LP(wm_lp)(1 << (15 + (wm_lp))) (1 << (15 + (wm_lp))) | |||
| 3480 | #define WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3)))) (WM_DIRTY_LP(1)(1 << (15 + (1))) | WM_DIRTY_LP(2)(1 << (15 + (2))) | WM_DIRTY_LP(3)(1 << (15 + (3)))) | |||
| 3481 | #define WM_DIRTY_FBC(1 << 24) (1 << 24) | |||
| 3482 | #define WM_DIRTY_DDB(1 << 25) (1 << 25) | |||
| 3483 | ||||
| 3484 | static unsigned int ilk_compute_wm_dirty(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3485 | const struct ilk_wm_values *old, | |||
| 3486 | const struct ilk_wm_values *new) | |||
| 3487 | { | |||
| 3488 | unsigned int dirty = 0; | |||
| 3489 | enum pipe pipe; | |||
| 3490 | int wm_lp; | |||
| 3491 | ||||
| 3492 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 3493 | if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { | |||
| 3494 | dirty |= WM_DIRTY_PIPE(pipe)(1 << (pipe)); | |||
| 3495 | /* Must disable LP1+ watermarks too */ | |||
| 3496 | dirty |= WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3)))); | |||
| 3497 | } | |||
| 3498 | } | |||
| 3499 | ||||
| 3500 | if (old->enable_fbc_wm != new->enable_fbc_wm) { | |||
| 3501 | dirty |= WM_DIRTY_FBC(1 << 24); | |||
| 3502 | /* Must disable LP1+ watermarks too */ | |||
| 3503 | dirty |= WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3)))); | |||
| 3504 | } | |||
| 3505 | ||||
| 3506 | if (old->partitioning != new->partitioning) { | |||
| 3507 | dirty |= WM_DIRTY_DDB(1 << 25); | |||
| 3508 | /* Must disable LP1+ watermarks too */ | |||
| 3509 | dirty |= WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3)))); | |||
| 3510 | } | |||
| 3511 | ||||
| 3512 | /* LP1+ watermarks already deemed dirty, no need to continue */ | |||
| 3513 | if (dirty & WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3))))) | |||
| 3514 | return dirty; | |||
| 3515 | ||||
| 3516 | /* Find the lowest numbered LP1+ watermark in need of an update... */ | |||
| 3517 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | |||
| 3518 | if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || | |||
| 3519 | old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) | |||
| 3520 | break; | |||
| 3521 | } | |||
| 3522 | ||||
| 3523 | /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ | |||
| 3524 | for (; wm_lp <= 3; wm_lp++) | |||
| 3525 | dirty |= WM_DIRTY_LP(wm_lp)(1 << (15 + (wm_lp))); | |||
| 3526 | ||||
| 3527 | return dirty; | |||
| 3528 | } | |||
| 3529 | ||||
| 3530 | static bool_Bool _ilk_disable_lp_wm(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3531 | unsigned int dirty) | |||
| 3532 | { | |||
| 3533 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | |||
| 3534 | bool_Bool changed = false0; | |||
| 3535 | ||||
| 3536 | if (dirty & WM_DIRTY_LP(3)(1 << (15 + (3))) && previous->wm_lp[2] & WM1_LP_SR_EN(1 << 31)) { | |||
| 3537 | previous->wm_lp[2] &= ~WM1_LP_SR_EN(1 << 31); | |||
| 3538 | I915_WRITE(WM3_LP_ILK, previous->wm_lp[2])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45110) })), (previous->wm_lp[2])); | |||
| 3539 | changed = true1; | |||
| 3540 | } | |||
| 3541 | if (dirty & WM_DIRTY_LP(2)(1 << (15 + (2))) && previous->wm_lp[1] & WM1_LP_SR_EN(1 << 31)) { | |||
| 3542 | previous->wm_lp[1] &= ~WM1_LP_SR_EN(1 << 31); | |||
| 3543 | I915_WRITE(WM2_LP_ILK, previous->wm_lp[1])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4510c) })), (previous->wm_lp[1])); | |||
| 3544 | changed = true1; | |||
| 3545 | } | |||
| 3546 | if (dirty & WM_DIRTY_LP(1)(1 << (15 + (1))) && previous->wm_lp[0] & WM1_LP_SR_EN(1 << 31)) { | |||
| 3547 | previous->wm_lp[0] &= ~WM1_LP_SR_EN(1 << 31); | |||
| 3548 | I915_WRITE(WM1_LP_ILK, previous->wm_lp[0])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45108) })), (previous->wm_lp[0])); | |||
| 3549 | changed = true1; | |||
| 3550 | } | |||
| 3551 | ||||
| 3552 | /* | |||
| 3553 | * Don't touch WM1S_LP_EN here. | |||
| 3554 | * Doing so could cause underruns. | |||
| 3555 | */ | |||
| 3556 | ||||
| 3557 | return changed; | |||
| 3558 | } | |||
| 3559 | ||||
| 3560 | /* | |||
| 3561 | * The spec says we shouldn't write when we don't need, because every write | |||
| 3562 | * causes WMs to be re-evaluated, expending some power. | |||
| 3563 | */ | |||
| 3564 | static void ilk_write_wm_values(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3565 | struct ilk_wm_values *results) | |||
| 3566 | { | |||
| 3567 | struct ilk_wm_values *previous = &dev_priv->wm.hw; | |||
| 3568 | unsigned int dirty; | |||
| 3569 | u32 val; | |||
| 3570 | ||||
| 3571 | dirty = ilk_compute_wm_dirty(dev_priv, previous, results); | |||
| 3572 | if (!dirty) | |||
| 3573 | return; | |||
| 3574 | ||||
| 3575 | _ilk_disable_lp_wm(dev_priv, dirty); | |||
| 3576 | ||||
| 3577 | if (dirty & WM_DIRTY_PIPE(PIPE_A)(1 << (PIPE_A))) | |||
| 3578 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45100) })), (results->wm_pipe[0])); | |||
| 3579 | if (dirty & WM_DIRTY_PIPE(PIPE_B)(1 << (PIPE_B))) | |||
| 3580 | I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45104) })), (results->wm_pipe[1])); | |||
| 3581 | if (dirty & WM_DIRTY_PIPE(PIPE_C)(1 << (PIPE_C))) | |||
| 3582 | I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45200) })), (results->wm_pipe[2])); | |||
| 3583 | ||||
| 3584 | if (dirty & WM_DIRTY_DDB(1 << 25)) { | |||
| 3585 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) { | |||
| 3586 | val = I915_READ(WM_MISC)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45260) }))); | |||
| 3587 | if (results->partitioning == INTEL_DDB_PART_1_2) | |||
| 3588 | val &= ~WM_MISC_DATA_PARTITION_5_6(1 << 0); | |||
| 3589 | else | |||
| 3590 | val |= WM_MISC_DATA_PARTITION_5_6(1 << 0); | |||
| 3591 | I915_WRITE(WM_MISC, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45260) })), (val)); | |||
| 3592 | } else { | |||
| 3593 | val = I915_READ(DISP_ARB_CTL2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45004) }))); | |||
| 3594 | if (results->partitioning == INTEL_DDB_PART_1_2) | |||
| 3595 | val &= ~DISP_DATA_PARTITION_5_6(1 << 6); | |||
| 3596 | else | |||
| 3597 | val |= DISP_DATA_PARTITION_5_6(1 << 6); | |||
| 3598 | I915_WRITE(DISP_ARB_CTL2, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45004) })), (val)); | |||
| 3599 | } | |||
| 3600 | } | |||
| 3601 | ||||
| 3602 | if (dirty & WM_DIRTY_FBC(1 << 24)) { | |||
| 3603 | val = I915_READ(DISP_ARB_CTL)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) }))); | |||
| 3604 | if (results->enable_fbc_wm) | |||
| 3605 | val &= ~DISP_FBC_WM_DIS(1 << 15); | |||
| 3606 | else | |||
| 3607 | val |= DISP_FBC_WM_DIS(1 << 15); | |||
| 3608 | I915_WRITE(DISP_ARB_CTL, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (val)); | |||
| 3609 | } | |||
| 3610 | ||||
| 3611 | if (dirty & WM_DIRTY_LP(1)(1 << (15 + (1))) && | |||
| 3612 | previous->wm_lp_spr[0] != results->wm_lp_spr[0]) | |||
| 3613 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45120) })), (results->wm_lp_spr[0])); | |||
| 3614 | ||||
| 3615 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7) { | |||
| 3616 | if (dirty & WM_DIRTY_LP(2)(1 << (15 + (2))) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) | |||
| 3617 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45124) })), (results->wm_lp_spr[1])); | |||
| 3618 | if (dirty & WM_DIRTY_LP(3)(1 << (15 + (3))) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) | |||
| 3619 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45128) })), (results->wm_lp_spr[2])); | |||
| 3620 | } | |||
| 3621 | ||||
| 3622 | if (dirty & WM_DIRTY_LP(1)(1 << (15 + (1))) && previous->wm_lp[0] != results->wm_lp[0]) | |||
| 3623 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45108) })), (results->wm_lp[0])); | |||
| 3624 | if (dirty & WM_DIRTY_LP(2)(1 << (15 + (2))) && previous->wm_lp[1] != results->wm_lp[1]) | |||
| 3625 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4510c) })), (results->wm_lp[1])); | |||
| 3626 | if (dirty & WM_DIRTY_LP(3)(1 << (15 + (3))) && previous->wm_lp[2] != results->wm_lp[2]) | |||
| 3627 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2])intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45110) })), (results->wm_lp[2])); | |||
| 3628 | ||||
| 3629 | dev_priv->wm.hw = *results; | |||
| 3630 | } | |||
| 3631 | ||||
| 3632 | bool_Bool ilk_disable_lp_wm(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3633 | { | |||
| 3634 | return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL((1 << (15 + (1))) | (1 << (15 + (2))) | (1 << (15 + (3))))); | |||
| 3635 | } | |||
| 3636 | ||||
| 3637 | u8 intel_enabled_dbuf_slices_mask(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3638 | { | |||
| 3639 | int i; | |||
| 3640 | int max_slices = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->num_supported_dbuf_slices; | |||
| 3641 | u8 enabled_slices_mask = 0; | |||
| 3642 | ||||
| 3643 | for (i = 0; i < max_slices; i++) { | |||
| 3644 | if (I915_READ(DBUF_CTL_S(i))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x45008) + (i) * ((0x44FE8) - (0x45008)))) }))) & DBUF_POWER_STATE(1 << 30)) | |||
| 3645 | enabled_slices_mask |= BIT(i)(1UL << (i)); | |||
| 3646 | } | |||
| 3647 | ||||
| 3648 | return enabled_slices_mask; | |||
| 3649 | } | |||
| 3650 | ||||
| 3651 | /* | |||
| 3652 | * FIXME: We still don't have the proper code detect if we need to apply the WA, | |||
| 3653 | * so assume we'll always need it in order to avoid underruns. | |||
| 3654 | */ | |||
| 3655 | static bool_Bool skl_needs_memory_bw_wa(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3656 | { | |||
| 3657 | return IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) && !((&(dev_priv)->__info)->is_lp)) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON); | |||
| 3658 | } | |||
| 3659 | ||||
| 3660 | static bool_Bool | |||
| 3661 | intel_has_sagv(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3662 | { | |||
| 3663 | return (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) && !((&(dev_priv)->__info)->is_lp)) || INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10) && | |||
| 3664 | dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; | |||
| 3665 | } | |||
| 3666 | ||||
| 3667 | static void | |||
| 3668 | skl_setup_sagv_block_time(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3669 | { | |||
| 3670 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { | |||
| 3671 | u32 val = 0; | |||
| 3672 | int ret; | |||
| 3673 | ||||
| 3674 | ret = sandybridge_pcode_read(dev_priv, | |||
| 3675 | GEN12_PCODE_READ_SAGV_BLOCK_TIME_US0x23, | |||
| 3676 | &val, NULL((void *)0)); | |||
| 3677 | if (!ret) { | |||
| 3678 | dev_priv->sagv_block_time_us = val; | |||
| 3679 | return; | |||
| 3680 | } | |||
| 3681 | ||||
| 3682 | drm_dbg(&dev_priv->drm, "Couldn't read SAGV block time!\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "Couldn't read SAGV block time!\n" ); | |||
| 3683 | } else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) { | |||
| 3684 | dev_priv->sagv_block_time_us = 10; | |||
| 3685 | return; | |||
| 3686 | } else if (IS_GEN(dev_priv, 10)(0 + (&(dev_priv)->__info)->gen == (10))) { | |||
| 3687 | dev_priv->sagv_block_time_us = 20; | |||
| 3688 | return; | |||
| 3689 | } else if (IS_GEN(dev_priv, 9)(0 + (&(dev_priv)->__info)->gen == (9))) { | |||
| 3690 | dev_priv->sagv_block_time_us = 30; | |||
| 3691 | return; | |||
| 3692 | } else { | |||
| 3693 | MISSING_CASE(INTEL_GEN(dev_priv))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(dev_priv)->__info)->gen)", (long)(((&(dev_priv )->__info)->gen))); __builtin_expect(!!(__ret), 0); }); | |||
| 3694 | } | |||
| 3695 | ||||
| 3696 | /* Default to an unusable block time */ | |||
| 3697 | dev_priv->sagv_block_time_us = -1; | |||
| 3698 | } | |||
| 3699 | ||||
| 3700 | /* | |||
| 3701 | * SAGV dynamically adjusts the system agent voltage and clock frequencies | |||
| 3702 | * depending on power and performance requirements. The display engine access | |||
| 3703 | * to system memory is blocked during the adjustment time. Because of the | |||
| 3704 | * blocking time, having this enabled can cause full system hangs and/or pipe | |||
| 3705 | * underruns if we don't meet all of the following requirements: | |||
| 3706 | * | |||
| 3707 | * - <= 1 pipe enabled | |||
| 3708 | * - All planes can enable watermarks for latencies >= SAGV engine block time | |||
| 3709 | * - We're not using an interlaced display configuration | |||
| 3710 | */ | |||
| 3711 | int | |||
| 3712 | intel_enable_sagv(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3713 | { | |||
| 3714 | int ret; | |||
| 3715 | ||||
| 3716 | if (!intel_has_sagv(dev_priv)) | |||
| 3717 | return 0; | |||
| 3718 | ||||
| 3719 | if (dev_priv->sagv_status == I915_SAGV_ENABLED) | |||
| 3720 | return 0; | |||
| 3721 | ||||
| 3722 | drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling SAGV\n" ); | |||
| 3723 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,sandybridge_pcode_write_timeout(dev_priv, 0x21, 0x3, 500, 0) | |||
| 3724 | GEN9_SAGV_ENABLE)sandybridge_pcode_write_timeout(dev_priv, 0x21, 0x3, 500, 0); | |||
| 3725 | ||||
| 3726 | /* We don't need to wait for SAGV when enabling */ | |||
| 3727 | ||||
| 3728 | /* | |||
| 3729 | * Some skl systems, pre-release machines in particular, | |||
| 3730 | * don't actually have SAGV. | |||
| 3731 | */ | |||
| 3732 | if (IS_SKYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_SKYLAKE) && ret == -ENXIO6) { | |||
| 3733 | drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "No SAGV found on system, ignoring\n" ); | |||
| 3734 | dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; | |||
| 3735 | return 0; | |||
| 3736 | } else if (ret < 0) { | |||
| 3737 | drm_err(&dev_priv->drm, "Failed to enable SAGV\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to enable SAGV\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
| 3738 | return ret; | |||
| 3739 | } | |||
| 3740 | ||||
| 3741 | dev_priv->sagv_status = I915_SAGV_ENABLED; | |||
| 3742 | return 0; | |||
| 3743 | } | |||
| 3744 | ||||
| 3745 | int | |||
| 3746 | intel_disable_sagv(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 3747 | { | |||
| 3748 | int ret; | |||
| 3749 | ||||
| 3750 | if (!intel_has_sagv(dev_priv)) | |||
| 3751 | return 0; | |||
| 3752 | ||||
| 3753 | if (dev_priv->sagv_status == I915_SAGV_DISABLED) | |||
| 3754 | return 0; | |||
| 3755 | ||||
| 3756 | drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling SAGV\n" ); | |||
| 3757 | /* bspec says to keep retrying for at least 1 ms */ | |||
| 3758 | ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL0x21, | |||
| 3759 | GEN9_SAGV_DISABLE0x0, | |||
| 3760 | GEN9_SAGV_IS_DISABLED0x1, GEN9_SAGV_IS_DISABLED0x1, | |||
| 3761 | 1); | |||
| 3762 | /* | |||
| 3763 | * Some skl systems, pre-release machines in particular, | |||
| 3764 | * don't actually have SAGV. | |||
| 3765 | */ | |||
| 3766 | if (IS_SKYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_SKYLAKE) && ret == -ENXIO6) { | |||
| 3767 | drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "No SAGV found on system, ignoring\n" ); | |||
| 3768 | dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; | |||
| 3769 | return 0; | |||
| 3770 | } else if (ret < 0) { | |||
| 3771 | drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to disable SAGV (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); | |||
| 3772 | return ret; | |||
| 3773 | } | |||
| 3774 | ||||
| 3775 | dev_priv->sagv_status = I915_SAGV_DISABLED; | |||
| 3776 | return 0; | |||
| 3777 | } | |||
| 3778 | ||||
| 3779 | void intel_sagv_pre_plane_update(struct intel_atomic_state *state) | |||
| 3780 | { | |||
| 3781 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 3782 | const struct intel_bw_state *new_bw_state; | |||
| 3783 | const struct intel_bw_state *old_bw_state; | |||
| 3784 | u32 new_mask = 0; | |||
| 3785 | ||||
| 3786 | /* | |||
| 3787 | * Just return if we can't control SAGV or don't have it. | |||
| 3788 | * This is different from situation when we have SAGV but just can't | |||
| 3789 | * afford it due to DBuf limitation - in case if SAGV is completely | |||
| 3790 | * disabled in a BIOS, we are not even allowed to send a PCode request, | |||
| 3791 | * as it will throw an error. So have to check it here. | |||
| 3792 | */ | |||
| 3793 | if (!intel_has_sagv(dev_priv)) | |||
| 3794 | return; | |||
| 3795 | ||||
| 3796 | new_bw_state = intel_atomic_get_new_bw_state(state); | |||
| 3797 | if (!new_bw_state) | |||
| 3798 | return; | |||
| 3799 | ||||
| 3800 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) { | |||
| 3801 | intel_disable_sagv(dev_priv); | |||
| 3802 | return; | |||
| 3803 | } | |||
| 3804 | ||||
| 3805 | old_bw_state = intel_atomic_get_old_bw_state(state); | |||
| 3806 | /* | |||
| 3807 | * Nothing to mask | |||
| 3808 | */ | |||
| 3809 | if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) | |||
| 3810 | return; | |||
| 3811 | ||||
| 3812 | new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; | |||
| 3813 | ||||
| 3814 | /* | |||
| 3815 | * If new mask is zero - means there is nothing to mask, | |||
| 3816 | * we can only unmask, which should be done in unmask. | |||
| 3817 | */ | |||
| 3818 | if (!new_mask) | |||
| 3819 | return; | |||
| 3820 | ||||
| 3821 | /* | |||
| 3822 | * Restrict required qgv points before updating the configuration. | |||
| 3823 | * According to BSpec we can't mask and unmask qgv points at the same | |||
| 3824 | * time. Also masking should be done before updating the configuration | |||
| 3825 | * and unmasking afterwards. | |||
| 3826 | */ | |||
| 3827 | icl_pcode_restrict_qgv_points(dev_priv, new_mask); | |||
| 3828 | } | |||
| 3829 | ||||
| 3830 | void intel_sagv_post_plane_update(struct intel_atomic_state *state) | |||
| 3831 | { | |||
| 3832 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 3833 | const struct intel_bw_state *new_bw_state; | |||
| 3834 | const struct intel_bw_state *old_bw_state; | |||
| 3835 | u32 new_mask = 0; | |||
| 3836 | ||||
| 3837 | /* | |||
| 3838 | * Just return if we can't control SAGV or don't have it. | |||
| 3839 | * This is different from situation when we have SAGV but just can't | |||
| 3840 | * afford it due to DBuf limitation - in case if SAGV is completely | |||
| 3841 | * disabled in a BIOS, we are not even allowed to send a PCode request, | |||
| 3842 | * as it will throw an error. So have to check it here. | |||
| 3843 | */ | |||
| 3844 | if (!intel_has_sagv(dev_priv)) | |||
| 3845 | return; | |||
| 3846 | ||||
| 3847 | new_bw_state = intel_atomic_get_new_bw_state(state); | |||
| 3848 | if (!new_bw_state) | |||
| 3849 | return; | |||
| 3850 | ||||
| 3851 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) { | |||
| 3852 | intel_enable_sagv(dev_priv); | |||
| 3853 | return; | |||
| 3854 | } | |||
| 3855 | ||||
| 3856 | old_bw_state = intel_atomic_get_old_bw_state(state); | |||
| 3857 | /* | |||
| 3858 | * Nothing to unmask | |||
| 3859 | */ | |||
| 3860 | if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) | |||
| 3861 | return; | |||
| 3862 | ||||
| 3863 | new_mask = new_bw_state->qgv_points_mask; | |||
| 3864 | ||||
| 3865 | /* | |||
| 3866 | * Allow required qgv points after updating the configuration. | |||
| 3867 | * According to BSpec we can't mask and unmask qgv points at the same | |||
| 3868 | * time. Also masking should be done before updating the configuration | |||
| 3869 | * and unmasking afterwards. | |||
| 3870 | */ | |||
| 3871 | icl_pcode_restrict_qgv_points(dev_priv, new_mask); | |||
| 3872 | } | |||
| 3873 | ||||
| 3874 | static bool_Bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) | |||
| 3875 | { | |||
| 3876 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 3877 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 3878 | struct intel_plane *plane; | |||
| 3879 | const struct intel_plane_state *plane_state; | |||
| 3880 | int level, latency; | |||
| 3881 | ||||
| 3882 | if (!intel_has_sagv(dev_priv)) | |||
| 3883 | return false0; | |||
| 3884 | ||||
| 3885 | if (!crtc_state->hw.active) | |||
| 3886 | return true1; | |||
| 3887 | ||||
| 3888 | if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE(1<<4)) | |||
| 3889 | return false0; | |||
| 3890 | ||||
| 3891 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else { | |||
| 3892 | const struct skl_plane_wm *wm = | |||
| 3893 | &crtc_state->wm.skl.optimal.planes[plane->id]; | |||
| 3894 | ||||
| 3895 | /* Skip this plane if it's not enabled */ | |||
| 3896 | if (!wm->wm[0].plane_en) | |||
| 3897 | continue; | |||
| 3898 | ||||
| 3899 | /* Find the highest enabled wm level for this plane */ | |||
| 3900 | for (level = ilk_wm_max_level(dev_priv); | |||
| 3901 | !wm->wm[level].plane_en; --level) | |||
| 3902 | { } | |||
| 3903 | ||||
| 3904 | latency = dev_priv->wm.skl_latency[level]; | |||
| 3905 | ||||
| 3906 | if (skl_needs_memory_bw_wa(dev_priv) && | |||
| 3907 | plane_state->uapi.fb->modifier == | |||
| 3908 | I915_FORMAT_MOD_X_TILED((((__u64)0x01) << 56) | ((1) & 0x00ffffffffffffffULL ))) | |||
| 3909 | latency += 15; | |||
| 3910 | ||||
| 3911 | /* | |||
| 3912 | * If any of the planes on this pipe don't enable wm levels that | |||
| 3913 | * incur memory latencies higher than sagv_block_time_us we | |||
| 3914 | * can't enable SAGV. | |||
| 3915 | */ | |||
| 3916 | if (latency < dev_priv->sagv_block_time_us) | |||
| 3917 | return false0; | |||
| 3918 | } | |||
| 3919 | ||||
| 3920 | return true1; | |||
| 3921 | } | |||
| 3922 | ||||
| 3923 | static bool_Bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) | |||
| 3924 | { | |||
| 3925 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 3926 | enum plane_id plane_id; | |||
| 3927 | ||||
| 3928 | if (!crtc_state->hw.active) | |||
| 3929 | return true1; | |||
| 3930 | ||||
| 3931 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 3932 | const struct skl_ddb_entry *plane_alloc = | |||
| 3933 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 3934 | const struct skl_plane_wm *wm = | |||
| 3935 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 3936 | ||||
| 3937 | if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc) | |||
| 3938 | return false0; | |||
| 3939 | } | |||
| 3940 | ||||
| 3941 | return true1; | |||
| 3942 | } | |||
| 3943 | ||||
| 3944 | static bool_Bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) | |||
| 3945 | { | |||
| 3946 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 3947 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 3948 | ||||
| 3949 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) | |||
| 3950 | return tgl_crtc_can_enable_sagv(crtc_state); | |||
| 3951 | else | |||
| 3952 | return skl_crtc_can_enable_sagv(crtc_state); | |||
| 3953 | } | |||
| 3954 | ||||
| 3955 | bool_Bool intel_can_enable_sagv(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 3956 | const struct intel_bw_state *bw_state) | |||
| 3957 | { | |||
| 3958 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 11 && | |||
| 3959 | bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)(((bw_state->active_pipes) != 0) && (((bw_state-> active_pipes) - 1) & (bw_state->active_pipes)) == 0)) | |||
| 3960 | return false0; | |||
| 3961 | ||||
| 3962 | return bw_state->pipe_sagv_reject == 0; | |||
| 3963 | } | |||
| 3964 | ||||
| 3965 | static int intel_compute_sagv_mask(struct intel_atomic_state *state) | |||
| 3966 | { | |||
| 3967 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 3968 | int ret; | |||
| 3969 | struct intel_crtc *crtc; | |||
| 3970 | struct intel_crtc_state *new_crtc_state; | |||
| 3971 | struct intel_bw_state *new_bw_state = NULL((void *)0); | |||
| 3972 | const struct intel_bw_state *old_bw_state = NULL((void *)0); | |||
| 3973 | int i; | |||
| 3974 | ||||
| 3975 | for_each_new_intel_crtc_in_state(state, crtc,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 3976 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 3977 | new_bw_state = intel_atomic_get_bw_state(state); | |||
| 3978 | if (IS_ERR(new_bw_state)) | |||
| 3979 | return PTR_ERR(new_bw_state); | |||
| 3980 | ||||
| 3981 | old_bw_state = intel_atomic_get_old_bw_state(state); | |||
| 3982 | ||||
| 3983 | if (intel_crtc_can_enable_sagv(new_crtc_state)) | |||
| 3984 | new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe)(1UL << (crtc->pipe)); | |||
| 3985 | else | |||
| 3986 | new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe)(1UL << (crtc->pipe)); | |||
| 3987 | } | |||
| 3988 | ||||
| 3989 | if (!new_bw_state) | |||
| 3990 | return 0; | |||
| 3991 | ||||
| 3992 | new_bw_state->active_pipes = | |||
| 3993 | intel_calc_active_pipes(state, old_bw_state->active_pipes); | |||
| 3994 | ||||
| 3995 | if (new_bw_state->active_pipes != old_bw_state->active_pipes) { | |||
| 3996 | ret = intel_atomic_lock_global_state(&new_bw_state->base); | |||
| 3997 | if (ret) | |||
| 3998 | return ret; | |||
| 3999 | } | |||
| 4000 | ||||
| 4001 | for_each_new_intel_crtc_in_state(state, crtc,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 4002 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 4003 | struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; | |||
| 4004 | ||||
| 4005 | /* | |||
| 4006 | * We store use_sagv_wm in the crtc state rather than relying on | |||
| 4007 | * that bw state since we have no convenient way to get at the | |||
| 4008 | * latter from the plane commit hooks (especially in the legacy | |||
| 4009 | * cursor case) | |||
| 4010 | */ | |||
| 4011 | pipe_wm->use_sagv_wm = INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12 && | |||
| 4012 | intel_can_enable_sagv(dev_priv, new_bw_state); | |||
| 4013 | } | |||
| 4014 | ||||
| 4015 | if (intel_can_enable_sagv(dev_priv, new_bw_state) != | |||
| 4016 | intel_can_enable_sagv(dev_priv, old_bw_state)) { | |||
| 4017 | ret = intel_atomic_serialize_global_state(&new_bw_state->base); | |||
| 4018 | if (ret) | |||
| 4019 | return ret; | |||
| 4020 | } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { | |||
| 4021 | ret = intel_atomic_lock_global_state(&new_bw_state->base); | |||
| 4022 | if (ret) | |||
| 4023 | return ret; | |||
| 4024 | } | |||
| 4025 | ||||
| 4026 | return 0; | |||
| 4027 | } | |||
| 4028 | ||||
| 4029 | /* | |||
| 4030 | * Calculate initial DBuf slice offset, based on slice size | |||
| 4031 | * and mask(i.e if slice size is 1024 and second slice is enabled | |||
| 4032 | * offset would be 1024) | |||
| 4033 | */ | |||
| 4034 | static unsigned int | |||
| 4035 | icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask, | |||
| 4036 | u32 slice_size, | |||
| 4037 | u32 ddb_size) | |||
| 4038 | { | |||
| 4039 | unsigned int offset = 0; | |||
| 4040 | ||||
| 4041 | if (!dbuf_slice_mask) | |||
| 4042 | return 0; | |||
| 4043 | ||||
| 4044 | offset = (ffs(dbuf_slice_mask) - 1) * slice_size; | |||
| 4045 | ||||
| 4046 | WARN_ON(offset >= ddb_size)({ int __ret = !!((offset >= ddb_size)); if (__ret) printf ("%s", "WARN_ON(" "offset >= ddb_size" ")"); __builtin_expect (!!(__ret), 0); }); | |||
| 4047 | return offset; | |||
| 4048 | } | |||
| 4049 | ||||
| 4050 | u16 intel_get_ddb_size(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 4051 | { | |||
| 4052 | u16 ddb_size = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->ddb_size; | |||
| 4053 | drm_WARN_ON(&dev_priv->drm, ddb_size == 0)({ int __ret = !!((ddb_size == 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "ddb_size == 0" ")"); __builtin_expect(!!(__ret ), 0); }); | |||
| 4054 | ||||
| 4055 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 11) | |||
| 4056 | return ddb_size - 4; /* 4 blocks for bypass path allocation */ | |||
| 4057 | ||||
| 4058 | return ddb_size; | |||
| 4059 | } | |||
| 4060 | ||||
| 4061 | u32 skl_ddb_dbuf_slice_mask(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 4062 | const struct skl_ddb_entry *entry) | |||
| 4063 | { | |||
| 4064 | u32 slice_mask = 0; | |||
| 4065 | u16 ddb_size = intel_get_ddb_size(dev_priv); | |||
| 4066 | u16 num_supported_slices = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->num_supported_dbuf_slices; | |||
| 4067 | u16 slice_size = ddb_size / num_supported_slices; | |||
| 4068 | u16 start_slice; | |||
| 4069 | u16 end_slice; | |||
| 4070 | ||||
| 4071 | if (!skl_ddb_entry_size(entry)) | |||
| 4072 | return 0; | |||
| 4073 | ||||
| 4074 | start_slice = entry->start / slice_size; | |||
| 4075 | end_slice = (entry->end - 1) / slice_size; | |||
| 4076 | ||||
| 4077 | /* | |||
| 4078 | * Per plane DDB entry can in a really worst case be on multiple slices | |||
| 4079 | * but single entry is anyway contigious. | |||
| 4080 | */ | |||
| 4081 | while (start_slice <= end_slice) { | |||
| 4082 | slice_mask |= BIT(start_slice)(1UL << (start_slice)); | |||
| 4083 | start_slice++; | |||
| 4084 | } | |||
| 4085 | ||||
| 4086 | return slice_mask; | |||
| 4087 | } | |||
| 4088 | ||||
| 4089 | static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, | |||
| 4090 | u8 active_pipes); | |||
| 4091 | ||||
| 4092 | static int | |||
| 4093 | skl_ddb_get_pipe_allocation_limits(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 4094 | const struct intel_crtc_state *crtc_state, | |||
| 4095 | const u64 total_data_rate, | |||
| 4096 | struct skl_ddb_entry *alloc, /* out */ | |||
| 4097 | int *num_active /* out */) | |||
| 4098 | { | |||
| 4099 | struct drm_atomic_state *state = crtc_state->uapi.state; | |||
| 4100 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state, base) );}); | |||
| 4101 | struct drm_crtc *for_crtc = crtc_state->uapi.crtc; | |||
| 4102 | const struct intel_crtc *crtc; | |||
| 4103 | u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0; | |||
| 4104 | enum pipe for_pipe = to_intel_crtc(for_crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (for_crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc, base) );})->pipe; | |||
| 4105 | struct intel_dbuf_state *new_dbuf_state = | |||
| 4106 | intel_atomic_get_new_dbuf_state(intel_state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(intel_state, &to_i915(intel_state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state, base) );}); | |||
| 4107 | const struct intel_dbuf_state *old_dbuf_state = | |||
| 4108 | intel_atomic_get_old_dbuf_state(intel_state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(intel_state, &to_i915(intel_state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state, base) );}); | |||
| 4109 | u8 active_pipes = new_dbuf_state->active_pipes; | |||
| 4110 | u16 ddb_size; | |||
| 4111 | u32 ddb_range_size; | |||
| 4112 | u32 i; | |||
| 4113 | u32 dbuf_slice_mask; | |||
| 4114 | u32 offset; | |||
| 4115 | u32 slice_size; | |||
| 4116 | u32 total_slice_mask; | |||
| 4117 | u32 start, end; | |||
| 4118 | int ret; | |||
| 4119 | ||||
| 4120 | *num_active = hweight8(active_pipes); | |||
| 4121 | ||||
| 4122 | if (!crtc_state->hw.active
| |||
| 4123 | alloc->start = 0; | |||
| 4124 | alloc->end = 0; | |||
| 4125 | return 0; | |||
| 4126 | } | |||
| 4127 | ||||
| 4128 | ddb_size = intel_get_ddb_size(dev_priv); | |||
| 4129 | ||||
| 4130 | slice_size = ddb_size / INTEL_INFO(dev_priv)(&(dev_priv)->__info)->num_supported_dbuf_slices; | |||
| 4131 | ||||
| 4132 | /* | |||
| 4133 | * If the state doesn't change the active CRTC's or there is no | |||
| 4134 | * modeset request, then there's no need to recalculate; | |||
| 4135 | * the existing pipe allocation limits should remain unchanged. | |||
| 4136 | * Note that we're safe from racing commits since any racing commit | |||
| 4137 | * that changes the active CRTC list or do modeset would need to | |||
| 4138 | * grab _all_ crtc locks, including the one we currently hold. | |||
| 4139 | */ | |||
| 4140 | if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes && | |||
| 4141 | !dev_priv->wm.distrust_bios_wm) { | |||
| 4142 | /* | |||
| 4143 | * alloc may be cleared by clear_intel_crtc_state, | |||
| 4144 | * copy from old state to be sure | |||
| 4145 | * | |||
| 4146 | * FIXME get rid of this mess | |||
| 4147 | */ | |||
| 4148 | *alloc = to_intel_crtc_state(for_crtc->state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (for_crtc->state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );})->wm.skl.ddb; | |||
| 4149 | return 0; | |||
| 4150 | } | |||
| 4151 | ||||
| 4152 | /* | |||
| 4153 | * Get allowed DBuf slices for correspondent pipe and platform. | |||
| 4154 | */ | |||
| 4155 | dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes); | |||
| 4156 | ||||
| 4157 | /* | |||
| 4158 | * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2 | |||
| 4159 | * and slice size is 1024, the offset would be 1024 | |||
| 4160 | */ | |||
| 4161 | offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask, | |||
| 4162 | slice_size, ddb_size); | |||
| 4163 | ||||
| 4164 | /* | |||
| 4165 | * Figure out total size of allowed DBuf slices, which is basically | |||
| 4166 | * a number of allowed slices for that pipe multiplied by slice size. | |||
| 4167 | * Inside of this | |||
| 4168 | * range ddb entries are still allocated in proportion to display width. | |||
| 4169 | */ | |||
| 4170 | ddb_range_size = hweight8(dbuf_slice_mask) * slice_size; | |||
| 4171 | ||||
| 4172 | /* | |||
| 4173 | * Watermark/ddb requirement highly depends upon width of the | |||
| 4174 | * framebuffer, So instead of allocating DDB equally among pipes | |||
| 4175 | * distribute DDB based on resolution/width of the display. | |||
| 4176 | */ | |||
| 4177 | total_slice_mask = dbuf_slice_mask; | |||
| 4178 | for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i)for ((i) = 0; (i) < (intel_state)->base.dev->mode_config .num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = ((intel_state)->base.crtcs[i].ptr ); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc, base) );}), (crtc_state) = ({ const __typeof ( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((intel_state )->base.crtcs[i].new_state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 4179 | const struct drm_display_mode *adjusted_mode = | |||
| 4180 | &crtc_state->hw.adjusted_mode; | |||
| 4181 | enum pipe pipe = crtc->pipe; | |||
| 4182 | int hdisplay, vdisplay; | |||
| 4183 | u32 pipe_dbuf_slice_mask; | |||
| 4184 | ||||
| 4185 | if (!crtc_state->hw.active) | |||
| 4186 | continue; | |||
| 4187 | ||||
| 4188 | pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, | |||
| 4189 | active_pipes); | |||
| 4190 | ||||
| 4191 | /* | |||
| 4192 | * According to BSpec pipe can share one dbuf slice with another | |||
| 4193 | * pipes or pipe can use multiple dbufs, in both cases we | |||
| 4194 | * account for other pipes only if they have exactly same mask. | |||
| 4195 | * However we need to account how many slices we should enable | |||
| 4196 | * in total. | |||
| 4197 | */ | |||
| 4198 | total_slice_mask |= pipe_dbuf_slice_mask; | |||
| 4199 | ||||
| 4200 | /* | |||
| 4201 | * Do not account pipes using other slice sets | |||
| 4202 | * luckily as of current BSpec slice sets do not partially | |||
| 4203 | * intersect(pipes share either same one slice or same slice set | |||
| 4204 | * i.e no partial intersection), so it is enough to check for | |||
| 4205 | * equality for now. | |||
| 4206 | */ | |||
| 4207 | if (dbuf_slice_mask != pipe_dbuf_slice_mask) | |||
| 4208 | continue; | |||
| 4209 | ||||
| 4210 | drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); | |||
| 4211 | ||||
| 4212 | total_width_in_range += hdisplay; | |||
| 4213 | ||||
| 4214 | if (pipe < for_pipe) | |||
| 4215 | width_before_pipe_in_range += hdisplay; | |||
| 4216 | else if (pipe == for_pipe) | |||
| 4217 | pipe_width = hdisplay; | |||
| 4218 | } | |||
| 4219 | ||||
| 4220 | /* | |||
| 4221 | * FIXME: For now we always enable slice S1 as per | |||
| 4222 | * the Bspec display initialization sequence. | |||
| 4223 | */ | |||
| 4224 | new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1)(1UL << (DBUF_S1)); | |||
| 4225 | ||||
| 4226 | if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) { | |||
| 4227 | ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); | |||
| 4228 | if (ret) | |||
| 4229 | return ret; | |||
| 4230 | } | |||
| 4231 | ||||
| 4232 | start = ddb_range_size * width_before_pipe_in_range / total_width_in_range; | |||
| ||||
| 4233 | end = ddb_range_size * | |||
| 4234 | (width_before_pipe_in_range + pipe_width) / total_width_in_range; | |||
| 4235 | ||||
| 4236 | alloc->start = offset + start; | |||
| 4237 | alloc->end = offset + end; | |||
| 4238 | ||||
| 4239 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n" , for_crtc->base.id, for_crtc->name, dbuf_slice_mask, alloc ->start, alloc->end, active_pipes) | |||
| 4240 | "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n" , for_crtc->base.id, for_crtc->name, dbuf_slice_mask, alloc ->start, alloc->end, active_pipes) | |||
| 4241 | for_crtc->base.id, for_crtc->name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n" , for_crtc->base.id, for_crtc->name, dbuf_slice_mask, alloc ->start, alloc->end, active_pipes) | |||
| 4242 | dbuf_slice_mask, alloc->start, alloc->end, active_pipes)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n" , for_crtc->base.id, for_crtc->name, dbuf_slice_mask, alloc ->start, alloc->end, active_pipes); | |||
| 4243 | ||||
| 4244 | return 0; | |||
| 4245 | } | |||
| 4246 | ||||
| 4247 | static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, | |||
| 4248 | int width, const struct drm_format_info *format, | |||
| 4249 | u64 modifier, unsigned int rotation, | |||
| 4250 | u32 plane_pixel_rate, struct skl_wm_params *wp, | |||
| 4251 | int color_plane); | |||
| 4252 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, | |||
| 4253 | int level, | |||
| 4254 | unsigned int latency, | |||
| 4255 | const struct skl_wm_params *wp, | |||
| 4256 | const struct skl_wm_level *result_prev, | |||
| 4257 | struct skl_wm_level *result /* out */); | |||
| 4258 | ||||
| 4259 | static unsigned int | |||
| 4260 | skl_cursor_allocation(const struct intel_crtc_state *crtc_state, | |||
| 4261 | int num_active) | |||
| 4262 | { | |||
| 4263 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 4264 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 4265 | struct skl_wm_level wm = {}; | |||
| 4266 | int ret, min_ddb_alloc = 0; | |||
| 4267 | struct skl_wm_params wp; | |||
| 4268 | ||||
| 4269 | ret = skl_compute_wm_params(crtc_state, 256, | |||
| 4270 | drm_format_info(DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') << 16) | ((__u32)('4') << 24))), | |||
| 4271 | DRM_FORMAT_MOD_LINEAR((((__u64)0) << 56) | ((0) & 0x00ffffffffffffffULL) ), | |||
| 4272 | DRM_MODE_ROTATE_0(1<<0), | |||
| 4273 | crtc_state->pixel_rate, &wp, 0); | |||
| 4274 | drm_WARN_ON(&dev_priv->drm, ret)({ int __ret = !!((ret)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "ret" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 4275 | ||||
| 4276 | for (level = 0; level <= max_level; level++) { | |||
| 4277 | unsigned int latency = dev_priv->wm.skl_latency[level]; | |||
| 4278 | ||||
| 4279 | skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm); | |||
| 4280 | if (wm.min_ddb_alloc == U16_MAX0xffff) | |||
| 4281 | break; | |||
| 4282 | ||||
| 4283 | min_ddb_alloc = wm.min_ddb_alloc; | |||
| 4284 | } | |||
| 4285 | ||||
| 4286 | return max(num_active == 1 ? 32 : 8, min_ddb_alloc)(((num_active == 1 ? 32 : 8)>(min_ddb_alloc))?(num_active == 1 ? 32 : 8):(min_ddb_alloc)); | |||
| 4287 | } | |||
| 4288 | ||||
| 4289 | static void skl_ddb_entry_init_from_hw(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 4290 | struct skl_ddb_entry *entry, u32 reg) | |||
| 4291 | { | |||
| 4292 | ||||
| 4293 | entry->start = reg & DDB_ENTRY_MASK0x7FF; | |||
| 4294 | entry->end = (reg >> DDB_ENTRY_END_SHIFT16) & DDB_ENTRY_MASK0x7FF; | |||
| 4295 | ||||
| 4296 | if (entry->end) | |||
| 4297 | entry->end += 1; | |||
| 4298 | } | |||
| 4299 | ||||
| 4300 | static void | |||
| 4301 | skl_ddb_get_hw_plane_state(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 4302 | const enum pipe pipe, | |||
| 4303 | const enum plane_id plane_id, | |||
| 4304 | struct skl_ddb_entry *ddb_y, | |||
| 4305 | struct skl_ddb_entry *ddb_uv) | |||
| 4306 | { | |||
| 4307 | u32 val, val2; | |||
| 4308 | u32 fourcc = 0; | |||
| 4309 | ||||
| 4310 | /* Cursor doesn't support NV12/planar, so no extra calculation needed */ | |||
| 4311 | if (plane_id == PLANE_CURSOR) { | |||
| 4312 | val = I915_READ(CUR_BUF_CFG(pipe))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x7017c) + (pipe) * ((0x7117c) - (0x7017c)))) }) )); | |||
| 4313 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | |||
| 4314 | return; | |||
| 4315 | } | |||
| 4316 | ||||
| 4317 | val = I915_READ(PLANE_CTL(pipe, plane_id))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x70180) + (pipe) * ((0x71180) - (0x70180)))) + (plane_id) * ((((0x70280) + (pipe) * ((0x71280) - (0x70280)) )) - (((0x70180) + (pipe) * ((0x71180) - (0x70180))))))) }))); | |||
| 4318 | ||||
| 4319 | /* No DDB allocated for disabled planes */ | |||
| 4320 | if (val & PLANE_CTL_ENABLE(1 << 31)) | |||
| 4321 | fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK(0xf << 24), | |||
| 4322 | val & PLANE_CTL_ORDER_RGBX(1 << 20), | |||
| 4323 | val & PLANE_CTL_ALPHA_MASK(0x3 << 4)); | |||
| 4324 | ||||
| 4325 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) { | |||
| 4326 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x7027c) + (pipe) * ((0x7127c) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c) - (0x7037c)) )) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c))))))) }))); | |||
| 4327 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | |||
| 4328 | } else { | |||
| 4329 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x7027c) + (pipe) * ((0x7127c) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c) - (0x7037c)) )) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c))))))) }))); | |||
| 4330 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x70278) + (pipe) * ((0x71278) - (0x70278)))) + (plane_id) * ((((0x70378) + (pipe) * ((0x71378) - (0x70378)) )) - (((0x70278) + (pipe) * ((0x71278) - (0x70278))))))) }))); | |||
| 4331 | ||||
| 4332 | if (fourcc && | |||
| 4333 | drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) | |||
| 4334 | swap(val, val2)do { __typeof(val) __tmp = (val); (val) = (val2); (val2) = __tmp ; } while(0); | |||
| 4335 | ||||
| 4336 | skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); | |||
| 4337 | skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); | |||
| 4338 | } | |||
| 4339 | } | |||
| 4340 | ||||
| 4341 | void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, | |||
| 4342 | struct skl_ddb_entry *ddb_y, | |||
| 4343 | struct skl_ddb_entry *ddb_uv) | |||
| 4344 | { | |||
| 4345 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 4346 | enum intel_display_power_domain power_domain; | |||
| 4347 | enum pipe pipe = crtc->pipe; | |||
| 4348 | intel_wakeref_t wakeref; | |||
| 4349 | enum plane_id plane_id; | |||
| 4350 | ||||
| 4351 | power_domain = POWER_DOMAIN_PIPE(pipe)((pipe) + POWER_DOMAIN_PIPE_A); | |||
| 4352 | wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); | |||
| 4353 | if (!wakeref) | |||
| 4354 | return; | |||
| 4355 | ||||
| 4356 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 4357 | skl_ddb_get_hw_plane_state(dev_priv, pipe, | |||
| 4358 | plane_id, | |||
| 4359 | &ddb_y[plane_id], | |||
| 4360 | &ddb_uv[plane_id]); | |||
| 4361 | ||||
| 4362 | intel_display_power_put(dev_priv, power_domain, wakeref); | |||
| 4363 | } | |||
| 4364 | ||||
| 4365 | /* | |||
| 4366 | * Determines the downscale amount of a plane for the purposes of watermark calculations. | |||
| 4367 | * The bspec defines downscale amount as: | |||
| 4368 | * | |||
| 4369 | * """ | |||
| 4370 | * Horizontal down scale amount = maximum[1, Horizontal source size / | |||
| 4371 | * Horizontal destination size] | |||
| 4372 | * Vertical down scale amount = maximum[1, Vertical source size / | |||
| 4373 | * Vertical destination size] | |||
| 4374 | * Total down scale amount = Horizontal down scale amount * | |||
| 4375 | * Vertical down scale amount | |||
| 4376 | * """ | |||
| 4377 | * | |||
| 4378 | * Return value is provided in 16.16 fixed point form to retain fractional part. | |||
| 4379 | * Caller should take care of dividing & rounding off the value. | |||
| 4380 | */ | |||
| 4381 | static uint_fixed_16_16_t | |||
| 4382 | skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, | |||
| 4383 | const struct intel_plane_state *plane_state) | |||
| 4384 | { | |||
| 4385 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 4386 | u32 src_w, src_h, dst_w, dst_h; | |||
| 4387 | uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; | |||
| 4388 | uint_fixed_16_16_t downscale_h, downscale_w; | |||
| 4389 | ||||
| 4390 | if (drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 4391 | !intel_wm_plane_visible(crtc_state, plane_state))({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 4392 | return u32_to_fixed16(0); | |||
| 4393 | ||||
| 4394 | /* | |||
| 4395 | * Src coordinates are already rotated by 270 degrees for | |||
| 4396 | * the 90/270 degree plane rotation cases (to match the | |||
| 4397 | * GTT mapping), hence no need to account for rotation here. | |||
| 4398 | * | |||
| 4399 | * n.b., src is 16.16 fixed point, dst is whole integer. | |||
| 4400 | */ | |||
| 4401 | src_w = drm_rect_width(&plane_state->uapi.src) >> 16; | |||
| 4402 | src_h = drm_rect_height(&plane_state->uapi.src) >> 16; | |||
| 4403 | dst_w = drm_rect_width(&plane_state->uapi.dst); | |||
| 4404 | dst_h = drm_rect_height(&plane_state->uapi.dst); | |||
| 4405 | ||||
| 4406 | fp_w_ratio = div_fixed16(src_w, dst_w); | |||
| 4407 | fp_h_ratio = div_fixed16(src_h, dst_h); | |||
| 4408 | downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); | |||
| 4409 | downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); | |||
| 4410 | ||||
| 4411 | return mul_fixed16(downscale_w, downscale_h); | |||
| 4412 | } | |||
| 4413 | ||||
| 4414 | struct dbuf_slice_conf_entry { | |||
| 4415 | u8 active_pipes; | |||
| 4416 | u8 dbuf_mask[I915_MAX_PIPES]; | |||
| 4417 | }; | |||
| 4418 | ||||
| 4419 | /* | |||
| 4420 | * Table taken from Bspec 12716 | |||
| 4421 | * Pipes do have some preferred DBuf slice affinity, | |||
| 4422 | * plus there are some hardcoded requirements on how | |||
| 4423 | * those should be distributed for multipipe scenarios. | |||
| 4424 | * For more DBuf slices algorithm can get even more messy | |||
| 4425 | * and less readable, so decided to use a table almost | |||
| 4426 | * as is from BSpec itself - that way it is at least easier | |||
| 4427 | * to compare, change and check. | |||
| 4428 | */ | |||
| 4429 | static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = | |||
| 4430 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ | |||
| 4431 | { | |||
| 4432 | { | |||
| 4433 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), | |||
| 4434 | .dbuf_mask = { | |||
| 4435 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4436 | }, | |||
| 4437 | }, | |||
| 4438 | { | |||
| 4439 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), | |||
| 4440 | .dbuf_mask = { | |||
| 4441 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4442 | }, | |||
| 4443 | }, | |||
| 4444 | { | |||
| 4445 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), | |||
| 4446 | .dbuf_mask = { | |||
| 4447 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4448 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4449 | }, | |||
| 4450 | }, | |||
| 4451 | { | |||
| 4452 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4453 | .dbuf_mask = { | |||
| 4454 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4455 | }, | |||
| 4456 | }, | |||
| 4457 | { | |||
| 4458 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4459 | .dbuf_mask = { | |||
| 4460 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4461 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4462 | }, | |||
| 4463 | }, | |||
| 4464 | { | |||
| 4465 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4466 | .dbuf_mask = { | |||
| 4467 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4468 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4469 | }, | |||
| 4470 | }, | |||
| 4471 | { | |||
| 4472 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4473 | .dbuf_mask = { | |||
| 4474 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4475 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4476 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4477 | }, | |||
| 4478 | }, | |||
| 4479 | {} | |||
| 4480 | }; | |||
| 4481 | ||||
| 4482 | /* | |||
| 4483 | * Table taken from Bspec 49255 | |||
| 4484 | * Pipes do have some preferred DBuf slice affinity, | |||
| 4485 | * plus there are some hardcoded requirements on how | |||
| 4486 | * those should be distributed for multipipe scenarios. | |||
| 4487 | * For more DBuf slices algorithm can get even more messy | |||
| 4488 | * and less readable, so decided to use a table almost | |||
| 4489 | * as is from BSpec itself - that way it is at least easier | |||
| 4490 | * to compare, change and check. | |||
| 4491 | */ | |||
| 4492 | static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = | |||
| 4493 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ | |||
| 4494 | { | |||
| 4495 | { | |||
| 4496 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), | |||
| 4497 | .dbuf_mask = { | |||
| 4498 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4499 | }, | |||
| 4500 | }, | |||
| 4501 | { | |||
| 4502 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), | |||
| 4503 | .dbuf_mask = { | |||
| 4504 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4505 | }, | |||
| 4506 | }, | |||
| 4507 | { | |||
| 4508 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), | |||
| 4509 | .dbuf_mask = { | |||
| 4510 | [PIPE_A] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4511 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4512 | }, | |||
| 4513 | }, | |||
| 4514 | { | |||
| 4515 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4516 | .dbuf_mask = { | |||
| 4517 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4518 | }, | |||
| 4519 | }, | |||
| 4520 | { | |||
| 4521 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4522 | .dbuf_mask = { | |||
| 4523 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4524 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4525 | }, | |||
| 4526 | }, | |||
| 4527 | { | |||
| 4528 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4529 | .dbuf_mask = { | |||
| 4530 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4531 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4532 | }, | |||
| 4533 | }, | |||
| 4534 | { | |||
| 4535 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), | |||
| 4536 | .dbuf_mask = { | |||
| 4537 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4538 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4539 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4540 | }, | |||
| 4541 | }, | |||
| 4542 | { | |||
| 4543 | .active_pipes = BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4544 | .dbuf_mask = { | |||
| 4545 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4546 | }, | |||
| 4547 | }, | |||
| 4548 | { | |||
| 4549 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4550 | .dbuf_mask = { | |||
| 4551 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4552 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4553 | }, | |||
| 4554 | }, | |||
| 4555 | { | |||
| 4556 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4557 | .dbuf_mask = { | |||
| 4558 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4559 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4560 | }, | |||
| 4561 | }, | |||
| 4562 | { | |||
| 4563 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4564 | .dbuf_mask = { | |||
| 4565 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4566 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4567 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4568 | }, | |||
| 4569 | }, | |||
| 4570 | { | |||
| 4571 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4572 | .dbuf_mask = { | |||
| 4573 | [PIPE_C] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4574 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4575 | }, | |||
| 4576 | }, | |||
| 4577 | { | |||
| 4578 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4579 | .dbuf_mask = { | |||
| 4580 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4581 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4582 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4583 | }, | |||
| 4584 | }, | |||
| 4585 | { | |||
| 4586 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4587 | .dbuf_mask = { | |||
| 4588 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4589 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4590 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4591 | }, | |||
| 4592 | }, | |||
| 4593 | { | |||
| 4594 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), | |||
| 4595 | .dbuf_mask = { | |||
| 4596 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4597 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), | |||
| 4598 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4599 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), | |||
| 4600 | }, | |||
| 4601 | }, | |||
| 4602 | {} | |||
| 4603 | }; | |||
| 4604 | ||||
| 4605 | static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, | |||
| 4606 | const struct dbuf_slice_conf_entry *dbuf_slices) | |||
| 4607 | { | |||
| 4608 | int i; | |||
| 4609 | ||||
| 4610 | for (i = 0; i < dbuf_slices[i].active_pipes; i++) { | |||
| 4611 | if (dbuf_slices[i].active_pipes == active_pipes) | |||
| 4612 | return dbuf_slices[i].dbuf_mask[pipe]; | |||
| 4613 | } | |||
| 4614 | return 0; | |||
| 4615 | } | |||
| 4616 | ||||
| 4617 | /* | |||
| 4618 | * This function finds an entry with same enabled pipe configuration and | |||
| 4619 | * returns correspondent DBuf slice mask as stated in BSpec for particular | |||
| 4620 | * platform. | |||
| 4621 | */ | |||
| 4622 | static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) | |||
| 4623 | { | |||
| 4624 | /* | |||
| 4625 | * FIXME: For ICL this is still a bit unclear as prev BSpec revision | |||
| 4626 | * required calculating "pipe ratio" in order to determine | |||
| 4627 | * if one or two slices can be used for single pipe configurations | |||
| 4628 | * as additional constraint to the existing table. | |||
| 4629 | * However based on recent info, it should be not "pipe ratio" | |||
| 4630 | * but rather ratio between pixel_rate and cdclk with additional | |||
| 4631 | * constants, so for now we are using only table until this is | |||
| 4632 | * clarified. Also this is the reason why crtc_state param is | |||
| 4633 | * still here - we will need it once those additional constraints | |||
| 4634 | * pop up. | |||
| 4635 | */ | |||
| 4636 | return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); | |||
| 4637 | } | |||
| 4638 | ||||
| 4639 | static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) | |||
| 4640 | { | |||
| 4641 | return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); | |||
| 4642 | } | |||
| 4643 | ||||
| 4644 | static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, | |||
| 4645 | u8 active_pipes) | |||
| 4646 | { | |||
| 4647 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 4648 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 4649 | enum pipe pipe = crtc->pipe; | |||
| 4650 | ||||
| 4651 | if (IS_GEN(dev_priv, 12)(0 + (&(dev_priv)->__info)->gen == (12))) | |||
| 4652 | return tgl_compute_dbuf_slices(pipe, active_pipes); | |||
| 4653 | else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) | |||
| 4654 | return icl_compute_dbuf_slices(pipe, active_pipes); | |||
| 4655 | /* | |||
| 4656 | * For anything else just return one slice yet. | |||
| 4657 | * Should be extended for other platforms. | |||
| 4658 | */ | |||
| 4659 | return active_pipes & BIT(pipe)(1UL << (pipe)) ? BIT(DBUF_S1)(1UL << (DBUF_S1)) : 0; | |||
| 4660 | } | |||
| 4661 | ||||
| 4662 | static u64 | |||
| 4663 | skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, | |||
| 4664 | const struct intel_plane_state *plane_state, | |||
| 4665 | int color_plane) | |||
| 4666 | { | |||
| 4667 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 4668 | const struct drm_framebuffer *fb = plane_state->hw.fb; | |||
| 4669 | u32 data_rate; | |||
| 4670 | u32 width = 0, height = 0; | |||
| 4671 | uint_fixed_16_16_t down_scale_amount; | |||
| 4672 | u64 rate; | |||
| 4673 | ||||
| 4674 | if (!plane_state->uapi.visible) | |||
| 4675 | return 0; | |||
| 4676 | ||||
| 4677 | if (plane->id == PLANE_CURSOR) | |||
| 4678 | return 0; | |||
| 4679 | ||||
| 4680 | if (color_plane == 1 && | |||
| 4681 | !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) | |||
| 4682 | return 0; | |||
| 4683 | ||||
| 4684 | /* | |||
| 4685 | * Src coordinates are already rotated by 270 degrees for | |||
| 4686 | * the 90/270 degree plane rotation cases (to match the | |||
| 4687 | * GTT mapping), hence no need to account for rotation here. | |||
| 4688 | */ | |||
| 4689 | width = drm_rect_width(&plane_state->uapi.src) >> 16; | |||
| 4690 | height = drm_rect_height(&plane_state->uapi.src) >> 16; | |||
| 4691 | ||||
| 4692 | /* UV plane does 1/2 pixel sub-sampling */ | |||
| 4693 | if (color_plane == 1) { | |||
| 4694 | width /= 2; | |||
| 4695 | height /= 2; | |||
| 4696 | } | |||
| 4697 | ||||
| 4698 | data_rate = width * height; | |||
| 4699 | ||||
| 4700 | down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); | |||
| 4701 | ||||
| 4702 | rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); | |||
| 4703 | ||||
| 4704 | rate *= fb->format->cpp[color_plane]; | |||
| 4705 | return rate; | |||
| 4706 | } | |||
| 4707 | ||||
| 4708 | static u64 | |||
| 4709 | skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, | |||
| 4710 | u64 *plane_data_rate, | |||
| 4711 | u64 *uv_plane_data_rate) | |||
| 4712 | { | |||
| 4713 | struct intel_plane *plane; | |||
| 4714 | const struct intel_plane_state *plane_state; | |||
| 4715 | u64 total_data_rate = 0; | |||
| 4716 | ||||
| 4717 | /* Calculate and cache data rate for each plane */ | |||
| 4718 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else { | |||
| 4719 | enum plane_id plane_id = plane->id; | |||
| 4720 | u64 rate; | |||
| 4721 | ||||
| 4722 | /* packed/y */ | |||
| 4723 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); | |||
| 4724 | plane_data_rate[plane_id] = rate; | |||
| 4725 | total_data_rate += rate; | |||
| 4726 | ||||
| 4727 | /* uv-plane */ | |||
| 4728 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); | |||
| 4729 | uv_plane_data_rate[plane_id] = rate; | |||
| 4730 | total_data_rate += rate; | |||
| 4731 | } | |||
| 4732 | ||||
| 4733 | return total_data_rate; | |||
| 4734 | } | |||
| 4735 | ||||
| 4736 | static u64 | |||
| 4737 | icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, | |||
| 4738 | u64 *plane_data_rate) | |||
| 4739 | { | |||
| 4740 | struct intel_plane *plane; | |||
| 4741 | const struct intel_plane_state *plane_state; | |||
| 4742 | u64 total_data_rate = 0; | |||
| 4743 | ||||
| 4744 | /* Calculate and cache data rate for each plane */ | |||
| 4745 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else { | |||
| 4746 | enum plane_id plane_id = plane->id; | |||
| 4747 | u64 rate; | |||
| 4748 | ||||
| 4749 | if (!plane_state->planar_linked_plane) { | |||
| 4750 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); | |||
| 4751 | plane_data_rate[plane_id] = rate; | |||
| 4752 | total_data_rate += rate; | |||
| 4753 | } else { | |||
| 4754 | enum plane_id y_plane_id; | |||
| 4755 | ||||
| 4756 | /* | |||
| 4757 | * The slave plane might not iterate in | |||
| 4758 | * intel_atomic_crtc_state_for_each_plane_state(), | |||
| 4759 | * and needs the master plane state which may be | |||
| 4760 | * NULL if we try get_new_plane_state(), so we | |||
| 4761 | * always calculate from the master. | |||
| 4762 | */ | |||
| 4763 | if (plane_state->planar_slave) | |||
| 4764 | continue; | |||
| 4765 | ||||
| 4766 | /* Y plane rate is calculated on the slave */ | |||
| 4767 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); | |||
| 4768 | y_plane_id = plane_state->planar_linked_plane->id; | |||
| 4769 | plane_data_rate[y_plane_id] = rate; | |||
| 4770 | total_data_rate += rate; | |||
| 4771 | ||||
| 4772 | rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); | |||
| 4773 | plane_data_rate[plane_id] = rate; | |||
| 4774 | total_data_rate += rate; | |||
| 4775 | } | |||
| 4776 | } | |||
| 4777 | ||||
| 4778 | return total_data_rate; | |||
| 4779 | } | |||
| 4780 | ||||
| 4781 | static const struct skl_wm_level * | |||
| 4782 | skl_plane_wm_level(const struct intel_crtc_state *crtc_state, | |||
| 4783 | enum plane_id plane_id, | |||
| 4784 | int level) | |||
| 4785 | { | |||
| 4786 | const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; | |||
| 4787 | const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; | |||
| 4788 | ||||
| 4789 | if (level == 0 && pipe_wm->use_sagv_wm) | |||
| 4790 | return &wm->sagv_wm0; | |||
| 4791 | ||||
| 4792 | return &wm->wm[level]; | |||
| 4793 | } | |||
| 4794 | ||||
| 4795 | static int | |||
| 4796 | skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) | |||
| 4797 | { | |||
| 4798 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 4799 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 4800 | struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; | |||
| 4801 | u16 alloc_size, start = 0; | |||
| 4802 | u16 total[I915_MAX_PLANES] = {}; | |||
| 4803 | u16 uv_total[I915_MAX_PLANES] = {}; | |||
| 4804 | u64 total_data_rate; | |||
| 4805 | enum plane_id plane_id; | |||
| 4806 | int num_active; | |||
| 4807 | u64 plane_data_rate[I915_MAX_PLANES] = {}; | |||
| 4808 | u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; | |||
| 4809 | u32 blocks; | |||
| 4810 | int level; | |||
| 4811 | int ret; | |||
| 4812 | ||||
| 4813 | /* Clear the partitioning for disabled planes. */ | |||
| 4814 | memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y))__builtin_memset((crtc_state->wm.skl.plane_ddb_y), (0), (sizeof (crtc_state->wm.skl.plane_ddb_y))); | |||
| 4815 | memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv))__builtin_memset((crtc_state->wm.skl.plane_ddb_uv), (0), ( sizeof(crtc_state->wm.skl.plane_ddb_uv))); | |||
| 4816 | ||||
| 4817 | if (!crtc_state->hw.active) { | |||
| 4818 | struct intel_atomic_state *state = | |||
| 4819 | to_intel_atomic_state(crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 4820 | struct intel_dbuf_state *new_dbuf_state = | |||
| 4821 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 4822 | const struct intel_dbuf_state *old_dbuf_state = | |||
| 4823 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 4824 | ||||
| 4825 | /* | |||
| 4826 | * FIXME hack to make sure we compute this sensibly when | |||
| 4827 | * turning off all the pipes. Otherwise we leave it at | |||
| 4828 | * whatever we had previously, and then runtime PM will | |||
| 4829 | * mess it up by turning off all but S1. Remove this | |||
| 4830 | * once the dbuf state computation flow becomes sane. | |||
| 4831 | */ | |||
| 4832 | if (new_dbuf_state->active_pipes == 0) { | |||
| 4833 | new_dbuf_state->enabled_slices = BIT(DBUF_S1)(1UL << (DBUF_S1)); | |||
| 4834 | ||||
| 4835 | if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) { | |||
| 4836 | ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); | |||
| 4837 | if (ret) | |||
| 4838 | return ret; | |||
| 4839 | } | |||
| 4840 | } | |||
| 4841 | ||||
| 4842 | alloc->start = alloc->end = 0; | |||
| 4843 | return 0; | |||
| 4844 | } | |||
| 4845 | ||||
| 4846 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) | |||
| 4847 | total_data_rate = | |||
| 4848 | icl_get_total_relative_data_rate(crtc_state, | |||
| 4849 | plane_data_rate); | |||
| 4850 | else | |||
| 4851 | total_data_rate = | |||
| 4852 | skl_get_total_relative_data_rate(crtc_state, | |||
| 4853 | plane_data_rate, | |||
| 4854 | uv_plane_data_rate); | |||
| 4855 | ||||
| 4856 | ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, | |||
| 4857 | total_data_rate, | |||
| 4858 | alloc, &num_active); | |||
| 4859 | if (ret) | |||
| 4860 | return ret; | |||
| 4861 | ||||
| 4862 | alloc_size = skl_ddb_entry_size(alloc); | |||
| 4863 | if (alloc_size == 0) | |||
| 4864 | return 0; | |||
| 4865 | ||||
| 4866 | /* Allocate fixed number of blocks for cursor. */ | |||
| 4867 | total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); | |||
| 4868 | alloc_size -= total[PLANE_CURSOR]; | |||
| 4869 | crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = | |||
| 4870 | alloc->end - total[PLANE_CURSOR]; | |||
| 4871 | crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; | |||
| 4872 | ||||
| 4873 | if (total_data_rate == 0) | |||
| 4874 | return 0; | |||
| 4875 | ||||
| 4876 | /* | |||
| 4877 | * Find the highest watermark level for which we can satisfy the block | |||
| 4878 | * requirement of active planes. | |||
| 4879 | */ | |||
| 4880 | for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { | |||
| 4881 | blocks = 0; | |||
| 4882 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 4883 | const struct skl_plane_wm *wm = | |||
| 4884 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 4885 | ||||
| 4886 | if (plane_id == PLANE_CURSOR) { | |||
| 4887 | if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { | |||
| 4888 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((wm->wm[level].min_ddb_alloc != 0xffff)) ; if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "wm->wm[level].min_ddb_alloc != 0xffff" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 4889 | wm->wm[level].min_ddb_alloc != U16_MAX)({ int __ret = !!((wm->wm[level].min_ddb_alloc != 0xffff)) ; if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "wm->wm[level].min_ddb_alloc != 0xffff" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 4890 | blocks = U32_MAX0xffffffffU; | |||
| 4891 | break; | |||
| 4892 | } | |||
| 4893 | continue; | |||
| 4894 | } | |||
| 4895 | ||||
| 4896 | blocks += wm->wm[level].min_ddb_alloc; | |||
| 4897 | blocks += wm->uv_wm[level].min_ddb_alloc; | |||
| 4898 | } | |||
| 4899 | ||||
| 4900 | if (blocks <= alloc_size) { | |||
| 4901 | alloc_size -= blocks; | |||
| 4902 | break; | |||
| 4903 | } | |||
| 4904 | } | |||
| 4905 | ||||
| 4906 | if (level < 0) { | |||
| 4907 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Requested display configuration exceeds system DDB limitations" ) | |||
| 4908 | "Requested display configuration exceeds system DDB limitations")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Requested display configuration exceeds system DDB limitations" ); | |||
| 4909 | drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "minimum required %d/%d\n" , blocks, alloc_size) | |||
| 4910 | blocks, alloc_size)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "minimum required %d/%d\n" , blocks, alloc_size); | |||
| 4911 | return -EINVAL22; | |||
| 4912 | } | |||
| 4913 | ||||
| 4914 | /* | |||
| 4915 | * Grant each plane the blocks it requires at the highest achievable | |||
| 4916 | * watermark level, plus an extra share of the leftover blocks | |||
| 4917 | * proportional to its relative data rate. | |||
| 4918 | */ | |||
| 4919 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 4920 | const struct skl_plane_wm *wm = | |||
| 4921 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 4922 | u64 rate; | |||
| 4923 | u16 extra; | |||
| 4924 | ||||
| 4925 | if (plane_id == PLANE_CURSOR) | |||
| 4926 | continue; | |||
| 4927 | ||||
| 4928 | /* | |||
| 4929 | * We've accounted for all active planes; remaining planes are | |||
| 4930 | * all disabled. | |||
| 4931 | */ | |||
| 4932 | if (total_data_rate == 0) | |||
| 4933 | break; | |||
| 4934 | ||||
| 4935 | rate = plane_data_rate[plane_id]; | |||
| 4936 | extra = min_t(u16, alloc_size,({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }) | |||
| 4937 | DIV64_U64_ROUND_UP(alloc_size * rate,({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }) | |||
| 4938 | total_data_rate))({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 4939 | total[plane_id] = wm->wm[level].min_ddb_alloc + extra; | |||
| 4940 | alloc_size -= extra; | |||
| 4941 | total_data_rate -= rate; | |||
| 4942 | ||||
| 4943 | if (total_data_rate == 0) | |||
| 4944 | break; | |||
| 4945 | ||||
| 4946 | rate = uv_plane_data_rate[plane_id]; | |||
| 4947 | extra = min_t(u16, alloc_size,({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }) | |||
| 4948 | DIV64_U64_ROUND_UP(alloc_size * rate,({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }) | |||
| 4949 | total_data_rate))({ u16 __min_a = (alloc_size); u16 __min_b = (({ uint64_t _t = (total_data_rate); div64_u64((alloc_size * rate) + _t - 1, _t ); })); __min_a < __min_b ? __min_a : __min_b; }); | |||
| 4950 | uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra; | |||
| 4951 | alloc_size -= extra; | |||
| 4952 | total_data_rate -= rate; | |||
| 4953 | } | |||
| 4954 | drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0)({ int __ret = !!((alloc_size != 0 || total_data_rate != 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv ->drm))->dev), "", "drm_WARN_ON(" "alloc_size != 0 || total_data_rate != 0" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 4955 | ||||
| 4956 | /* Set the actual DDB start/end points for each plane */ | |||
| 4957 | start = alloc->start; | |||
| 4958 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 4959 | struct skl_ddb_entry *plane_alloc = | |||
| 4960 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 4961 | struct skl_ddb_entry *uv_plane_alloc = | |||
| 4962 | &crtc_state->wm.skl.plane_ddb_uv[plane_id]; | |||
| 4963 | ||||
| 4964 | if (plane_id == PLANE_CURSOR) | |||
| 4965 | continue; | |||
| 4966 | ||||
| 4967 | /* Gen11+ uses a separate plane for UV watermarks */ | |||
| 4968 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((((&(dev_priv)->__info)->gen) >= 11 && uv_total[plane_id])); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "((&(dev_priv)->__info)->gen) >= 11 && uv_total[plane_id]" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 4969 | INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id])({ int __ret = !!((((&(dev_priv)->__info)->gen) >= 11 && uv_total[plane_id])); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "((&(dev_priv)->__info)->gen) >= 11 && uv_total[plane_id]" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 4970 | ||||
| 4971 | /* Leave disabled planes at (0,0) */ | |||
| 4972 | if (total[plane_id]) { | |||
| 4973 | plane_alloc->start = start; | |||
| 4974 | start += total[plane_id]; | |||
| 4975 | plane_alloc->end = start; | |||
| 4976 | } | |||
| 4977 | ||||
| 4978 | if (uv_total[plane_id]) { | |||
| 4979 | uv_plane_alloc->start = start; | |||
| 4980 | start += uv_total[plane_id]; | |||
| 4981 | uv_plane_alloc->end = start; | |||
| 4982 | } | |||
| 4983 | } | |||
| 4984 | ||||
| 4985 | /* | |||
| 4986 | * When we calculated watermark values we didn't know how high | |||
| 4987 | * of a level we'd actually be able to hit, so we just marked | |||
| 4988 | * all levels as "enabled." Go back now and disable the ones | |||
| 4989 | * that aren't actually possible. | |||
| 4990 | */ | |||
| 4991 | for (level++; level <= ilk_wm_max_level(dev_priv); level++) { | |||
| 4992 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 4993 | struct skl_plane_wm *wm = | |||
| 4994 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 4995 | ||||
| 4996 | /* | |||
| 4997 | * We only disable the watermarks for each plane if | |||
| 4998 | * they exceed the ddb allocation of said plane. This | |||
| 4999 | * is done so that we don't end up touching cursor | |||
| 5000 | * watermarks needlessly when some other plane reduces | |||
| 5001 | * our max possible watermark level. | |||
| 5002 | * | |||
| 5003 | * Bspec has this to say about the PLANE_WM enable bit: | |||
| 5004 | * "All the watermarks at this level for all enabled | |||
| 5005 | * planes must be enabled before the level will be used." | |||
| 5006 | * So this is actually safe to do. | |||
| 5007 | */ | |||
| 5008 | if (wm->wm[level].min_ddb_alloc > total[plane_id] || | |||
| 5009 | wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id]) | |||
| 5010 | memset(&wm->wm[level], 0, sizeof(wm->wm[level]))__builtin_memset((&wm->wm[level]), (0), (sizeof(wm-> wm[level]))); | |||
| 5011 | ||||
| 5012 | /* | |||
| 5013 | * Wa_1408961008:icl, ehl | |||
| 5014 | * Underruns with WM1+ disabled | |||
| 5015 | */ | |||
| 5016 | if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11)) && | |||
| 5017 | level == 1 && wm->wm[0].plane_en) { | |||
| 5018 | wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; | |||
| 5019 | wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; | |||
| 5020 | wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; | |||
| 5021 | } | |||
| 5022 | } | |||
| 5023 | } | |||
| 5024 | ||||
| 5025 | /* | |||
| 5026 | * Go back and disable the transition watermark if it turns out we | |||
| 5027 | * don't have enough DDB blocks for it. | |||
| 5028 | */ | |||
| 5029 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 5030 | struct skl_plane_wm *wm = | |||
| 5031 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 5032 | ||||
| 5033 | if (wm->trans_wm.plane_res_b >= total[plane_id]) | |||
| 5034 | memset(&wm->trans_wm, 0, sizeof(wm->trans_wm))__builtin_memset((&wm->trans_wm), (0), (sizeof(wm-> trans_wm))); | |||
| 5035 | } | |||
| 5036 | ||||
| 5037 | return 0; | |||
| 5038 | } | |||
| 5039 | ||||
| 5040 | /* | |||
| 5041 | * The max latency should be 257 (max the punit can code is 255 and we add 2us | |||
| 5042 | * for the read latency) and cpp should always be <= 8, so that | |||
| 5043 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max | |||
| 5044 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. | |||
| 5045 | */ | |||
| 5046 | static uint_fixed_16_16_t | |||
| 5047 | skl_wm_method1(const struct drm_i915_privateinteldrm_softc *dev_priv, u32 pixel_rate, | |||
| 5048 | u8 cpp, u32 latency, u32 dbuf_block_size) | |||
| 5049 | { | |||
| 5050 | u32 wm_intermediate_val; | |||
| 5051 | uint_fixed_16_16_t ret; | |||
| 5052 | ||||
| 5053 | if (latency == 0) | |||
| 5054 | return FP_16_16_MAX((uint_fixed_16_16_t){ .val = 0xffffffffU }); | |||
| 5055 | ||||
| 5056 | wm_intermediate_val = latency * pixel_rate * cpp; | |||
| 5057 | ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); | |||
| 5058 | ||||
| 5059 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5060 | ret = add_fixed16_u32(ret, 1); | |||
| 5061 | ||||
| 5062 | return ret; | |||
| 5063 | } | |||
| 5064 | ||||
| 5065 | static uint_fixed_16_16_t | |||
| 5066 | skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, | |||
| 5067 | uint_fixed_16_16_t plane_blocks_per_line) | |||
| 5068 | { | |||
| 5069 | u32 wm_intermediate_val; | |||
| 5070 | uint_fixed_16_16_t ret; | |||
| 5071 | ||||
| 5072 | if (latency == 0) | |||
| 5073 | return FP_16_16_MAX((uint_fixed_16_16_t){ .val = 0xffffffffU }); | |||
| 5074 | ||||
| 5075 | wm_intermediate_val = latency * pixel_rate; | |||
| 5076 | wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,(((wm_intermediate_val) + ((pipe_htotal * 1000) - 1)) / (pipe_htotal * 1000)) | |||
| 5077 | pipe_htotal * 1000)(((wm_intermediate_val) + ((pipe_htotal * 1000) - 1)) / (pipe_htotal * 1000)); | |||
| 5078 | ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); | |||
| 5079 | return ret; | |||
| 5080 | } | |||
| 5081 | ||||
| 5082 | static uint_fixed_16_16_t | |||
| 5083 | intel_get_linetime_us(const struct intel_crtc_state *crtc_state) | |||
| 5084 | { | |||
| 5085 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5086 | u32 pixel_rate; | |||
| 5087 | u32 crtc_htotal; | |||
| 5088 | uint_fixed_16_16_t linetime_us; | |||
| 5089 | ||||
| 5090 | if (!crtc_state->hw.active) | |||
| 5091 | return u32_to_fixed16(0); | |||
| 5092 | ||||
| 5093 | pixel_rate = crtc_state->pixel_rate; | |||
| 5094 | ||||
| 5095 | if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0)({ int __ret = !!((pixel_rate == 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "" , "drm_WARN_ON(" "pixel_rate == 0" ")"); __builtin_expect(!!( __ret), 0); })) | |||
| 5096 | return u32_to_fixed16(0); | |||
| 5097 | ||||
| 5098 | crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal; | |||
| 5099 | linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); | |||
| 5100 | ||||
| 5101 | return linetime_us; | |||
| 5102 | } | |||
| 5103 | ||||
| 5104 | static u32 | |||
| 5105 | skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state, | |||
| 5106 | const struct intel_plane_state *plane_state) | |||
| 5107 | { | |||
| 5108 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5109 | u64 adjusted_pixel_rate; | |||
| 5110 | uint_fixed_16_16_t downscale_amount; | |||
| 5111 | ||||
| 5112 | /* Shouldn't reach here on disabled planes... */ | |||
| 5113 | if (drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 5114 | !intel_wm_plane_visible(crtc_state, plane_state))({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); })) | |||
| 5115 | return 0; | |||
| 5116 | ||||
| 5117 | /* | |||
| 5118 | * Adjusted plane pixel rate is just the pipe's adjusted pixel rate | |||
| 5119 | * with additional adjustments for plane-specific scaling. | |||
| 5120 | */ | |||
| 5121 | adjusted_pixel_rate = crtc_state->pixel_rate; | |||
| 5122 | downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state); | |||
| 5123 | ||||
| 5124 | return mul_round_up_u32_fixed16(adjusted_pixel_rate, | |||
| 5125 | downscale_amount); | |||
| 5126 | } | |||
| 5127 | ||||
| 5128 | static int | |||
| 5129 | skl_compute_wm_params(const struct intel_crtc_state *crtc_state, | |||
| 5130 | int width, const struct drm_format_info *format, | |||
| 5131 | u64 modifier, unsigned int rotation, | |||
| 5132 | u32 plane_pixel_rate, struct skl_wm_params *wp, | |||
| 5133 | int color_plane) | |||
| 5134 | { | |||
| 5135 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 5136 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 5137 | u32 interm_pbpl; | |||
| 5138 | ||||
| 5139 | /* only planar format has two planes */ | |||
| 5140 | if (color_plane == 1 && | |||
| 5141 | !intel_format_info_is_yuv_semiplanar(format, modifier)) { | |||
| 5142 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Non planar format have single plane\n" ) | |||
| 5143 | "Non planar format have single plane\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Non planar format have single plane\n" ); | |||
| 5144 | return -EINVAL22; | |||
| 5145 | } | |||
| 5146 | ||||
| 5147 | wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED((((__u64)0x01) << 56) | ((2) & 0x00ffffffffffffffULL )) || | |||
| 5148 | modifier == I915_FORMAT_MOD_Yf_TILED((((__u64)0x01) << 56) | ((3) & 0x00ffffffffffffffULL )) || | |||
| 5149 | modifier == I915_FORMAT_MOD_Y_TILED_CCS((((__u64)0x01) << 56) | ((4) & 0x00ffffffffffffffULL )) || | |||
| 5150 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS((((__u64)0x01) << 56) | ((5) & 0x00ffffffffffffffULL )); | |||
| 5151 | wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED((((__u64)0x01) << 56) | ((1) & 0x00ffffffffffffffULL )); | |||
| 5152 | wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS((((__u64)0x01) << 56) | ((4) & 0x00ffffffffffffffULL )) || | |||
| 5153 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS((((__u64)0x01) << 56) | ((5) & 0x00ffffffffffffffULL )); | |||
| 5154 | wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); | |||
| 5155 | ||||
| 5156 | wp->width = width; | |||
| 5157 | if (color_plane == 1 && wp->is_planar) | |||
| 5158 | wp->width /= 2; | |||
| 5159 | ||||
| 5160 | wp->cpp = format->cpp[color_plane]; | |||
| 5161 | wp->plane_pixel_rate = plane_pixel_rate; | |||
| 5162 | ||||
| 5163 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11 && | |||
| 5164 | modifier == I915_FORMAT_MOD_Yf_TILED((((__u64)0x01) << 56) | ((3) & 0x00ffffffffffffffULL )) && wp->cpp == 1) | |||
| 5165 | wp->dbuf_block_size = 256; | |||
| 5166 | else | |||
| 5167 | wp->dbuf_block_size = 512; | |||
| 5168 | ||||
| 5169 | if (drm_rotation_90_or_270(rotation)) { | |||
| 5170 | switch (wp->cpp) { | |||
| 5171 | case 1: | |||
| 5172 | wp->y_min_scanlines = 16; | |||
| 5173 | break; | |||
| 5174 | case 2: | |||
| 5175 | wp->y_min_scanlines = 8; | |||
| 5176 | break; | |||
| 5177 | case 4: | |||
| 5178 | wp->y_min_scanlines = 4; | |||
| 5179 | break; | |||
| 5180 | default: | |||
| 5181 | MISSING_CASE(wp->cpp)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "wp->cpp", (long)(wp->cpp)); __builtin_expect(!!(__ret ), 0); }); | |||
| 5182 | return -EINVAL22; | |||
| 5183 | } | |||
| 5184 | } else { | |||
| 5185 | wp->y_min_scanlines = 4; | |||
| 5186 | } | |||
| 5187 | ||||
| 5188 | if (skl_needs_memory_bw_wa(dev_priv)) | |||
| 5189 | wp->y_min_scanlines *= 2; | |||
| 5190 | ||||
| 5191 | wp->plane_bytes_per_line = wp->width * wp->cpp; | |||
| 5192 | if (wp->y_tiled) { | |||
| 5193 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) | |||
| 5194 | wp->y_min_scanlines,(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) | |||
| 5195 | wp->dbuf_block_size)(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)); | |||
| 5196 | ||||
| 5197 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5198 | interm_pbpl++; | |||
| 5199 | ||||
| 5200 | wp->plane_blocks_per_line = div_fixed16(interm_pbpl, | |||
| 5201 | wp->y_min_scanlines); | |||
| 5202 | } else { | |||
| 5203 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,(((wp->plane_bytes_per_line) + ((wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) | |||
| 5204 | wp->dbuf_block_size)(((wp->plane_bytes_per_line) + ((wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)); | |||
| 5205 | ||||
| 5206 | if (!wp->x_tiled || | |||
| 5207 | INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5208 | interm_pbpl++; | |||
| 5209 | ||||
| 5210 | wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); | |||
| 5211 | } | |||
| 5212 | ||||
| 5213 | wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, | |||
| 5214 | wp->plane_blocks_per_line); | |||
| 5215 | ||||
| 5216 | wp->linetime_us = fixed16_to_u32_round_up( | |||
| 5217 | intel_get_linetime_us(crtc_state)); | |||
| 5218 | ||||
| 5219 | return 0; | |||
| 5220 | } | |||
| 5221 | ||||
| 5222 | static int | |||
| 5223 | skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, | |||
| 5224 | const struct intel_plane_state *plane_state, | |||
| 5225 | struct skl_wm_params *wp, int color_plane) | |||
| 5226 | { | |||
| 5227 | const struct drm_framebuffer *fb = plane_state->hw.fb; | |||
| 5228 | int width; | |||
| 5229 | ||||
| 5230 | /* | |||
| 5231 | * Src coordinates are already rotated by 270 degrees for | |||
| 5232 | * the 90/270 degree plane rotation cases (to match the | |||
| 5233 | * GTT mapping), hence no need to account for rotation here. | |||
| 5234 | */ | |||
| 5235 | width = drm_rect_width(&plane_state->uapi.src) >> 16; | |||
| 5236 | ||||
| 5237 | return skl_compute_wm_params(crtc_state, width, | |||
| 5238 | fb->format, fb->modifier, | |||
| 5239 | plane_state->hw.rotation, | |||
| 5240 | skl_adjusted_plane_pixel_rate(crtc_state, plane_state), | |||
| 5241 | wp, color_plane); | |||
| 5242 | } | |||
| 5243 | ||||
| 5244 | static bool_Bool skl_wm_has_lines(struct drm_i915_privateinteldrm_softc *dev_priv, int level) | |||
| 5245 | { | |||
| 5246 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5247 | return true1; | |||
| 5248 | ||||
| 5249 | /* The number of lines are ignored for the level 0 watermark. */ | |||
| 5250 | return level > 0; | |||
| 5251 | } | |||
| 5252 | ||||
| 5253 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, | |||
| 5254 | int level, | |||
| 5255 | unsigned int latency, | |||
| 5256 | const struct skl_wm_params *wp, | |||
| 5257 | const struct skl_wm_level *result_prev, | |||
| 5258 | struct skl_wm_level *result /* out */) | |||
| 5259 | { | |||
| 5260 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5261 | uint_fixed_16_16_t method1, method2; | |||
| 5262 | uint_fixed_16_16_t selected_result; | |||
| 5263 | u32 res_blocks, res_lines, min_ddb_alloc = 0; | |||
| 5264 | ||||
| 5265 | if (latency == 0) { | |||
| 5266 | /* reject it */ | |||
| 5267 | result->min_ddb_alloc = U16_MAX0xffff; | |||
| 5268 | return; | |||
| 5269 | } | |||
| 5270 | ||||
| 5271 | /* | |||
| 5272 | * WaIncreaseLatencyIPCEnabled: kbl,cfl | |||
| 5273 | * Display WA #1141: kbl,cfl | |||
| 5274 | */ | |||
| 5275 | if ((IS_KABYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_KABYLAKE) || | |||
| 5276 | IS_COFFEELAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) || | |||
| 5277 | IS_COMETLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COMETLAKE)) && | |||
| 5278 | dev_priv->ipc_enabled) | |||
| 5279 | latency += 4; | |||
| 5280 | ||||
| 5281 | if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled) | |||
| 5282 | latency += 15; | |||
| 5283 | ||||
| 5284 | method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, | |||
| 5285 | wp->cpp, latency, wp->dbuf_block_size); | |||
| 5286 | method2 = skl_wm_method2(wp->plane_pixel_rate, | |||
| 5287 | crtc_state->hw.adjusted_mode.crtc_htotal, | |||
| 5288 | latency, | |||
| 5289 | wp->plane_blocks_per_line); | |||
| 5290 | ||||
| 5291 | if (wp->y_tiled) { | |||
| 5292 | selected_result = max_fixed16(method2, wp->y_tile_minimum); | |||
| 5293 | } else { | |||
| 5294 | if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal / | |||
| 5295 | wp->dbuf_block_size < 1) && | |||
| 5296 | (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { | |||
| 5297 | selected_result = method2; | |||
| 5298 | } else if (latency >= wp->linetime_us) { | |||
| 5299 | if (IS_GEN(dev_priv, 9)(0 + (&(dev_priv)->__info)->gen == (9)) && | |||
| 5300 | !IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5301 | selected_result = min_fixed16(method1, method2); | |||
| 5302 | else | |||
| 5303 | selected_result = method2; | |||
| 5304 | } else { | |||
| 5305 | selected_result = method1; | |||
| 5306 | } | |||
| 5307 | } | |||
| 5308 | ||||
| 5309 | res_blocks = fixed16_to_u32_round_up(selected_result) + 1; | |||
| 5310 | res_lines = div_round_up_fixed16(selected_result, | |||
| 5311 | wp->plane_blocks_per_line); | |||
| 5312 | ||||
| 5313 | if (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) && !((&(dev_priv)->__info)->is_lp)) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) { | |||
| 5314 | /* Display WA #1125: skl,bxt,kbl */ | |||
| 5315 | if (level == 0 && wp->rc_surface) | |||
| 5316 | res_blocks += | |||
| 5317 | fixed16_to_u32_round_up(wp->y_tile_minimum); | |||
| 5318 | ||||
| 5319 | /* Display WA #1126: skl,bxt,kbl */ | |||
| 5320 | if (level >= 1 && level <= 7) { | |||
| 5321 | if (wp->y_tiled) { | |||
| 5322 | res_blocks += | |||
| 5323 | fixed16_to_u32_round_up(wp->y_tile_minimum); | |||
| 5324 | res_lines += wp->y_min_scanlines; | |||
| 5325 | } else { | |||
| 5326 | res_blocks++; | |||
| 5327 | } | |||
| 5328 | ||||
| 5329 | /* | |||
| 5330 | * Make sure result blocks for higher latency levels are | |||
| 5331 | * atleast as high as level below the current level. | |||
| 5332 | * Assumption in DDB algorithm optimization for special | |||
| 5333 | * cases. Also covers Display WA #1125 for RC. | |||
| 5334 | */ | |||
| 5335 | if (result_prev->plane_res_b > res_blocks) | |||
| 5336 | res_blocks = result_prev->plane_res_b; | |||
| 5337 | } | |||
| 5338 | } | |||
| 5339 | ||||
| 5340 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) { | |||
| 5341 | if (wp->y_tiled) { | |||
| 5342 | int extra_lines; | |||
| 5343 | ||||
| 5344 | if (res_lines % wp->y_min_scanlines == 0) | |||
| 5345 | extra_lines = wp->y_min_scanlines; | |||
| 5346 | else | |||
| 5347 | extra_lines = wp->y_min_scanlines * 2 - | |||
| 5348 | res_lines % wp->y_min_scanlines; | |||
| 5349 | ||||
| 5350 | min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines, | |||
| 5351 | wp->plane_blocks_per_line); | |||
| 5352 | } else { | |||
| 5353 | min_ddb_alloc = res_blocks + | |||
| 5354 | DIV_ROUND_UP(res_blocks, 10)(((res_blocks) + ((10) - 1)) / (10)); | |||
| 5355 | } | |||
| 5356 | } | |||
| 5357 | ||||
| 5358 | if (!skl_wm_has_lines(dev_priv, level)) | |||
| 5359 | res_lines = 0; | |||
| 5360 | ||||
| 5361 | if (res_lines > 31) { | |||
| 5362 | /* reject it */ | |||
| 5363 | result->min_ddb_alloc = U16_MAX0xffff; | |||
| 5364 | return; | |||
| 5365 | } | |||
| 5366 | ||||
| 5367 | /* | |||
| 5368 | * If res_lines is valid, assume we can use this watermark level | |||
| 5369 | * for now. We'll come back and disable it after we calculate the | |||
| 5370 | * DDB allocation if it turns out we don't actually have enough | |||
| 5371 | * blocks to satisfy it. | |||
| 5372 | */ | |||
| 5373 | result->plane_res_b = res_blocks; | |||
| 5374 | result->plane_res_l = res_lines; | |||
| 5375 | /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ | |||
| 5376 | result->min_ddb_alloc = max(min_ddb_alloc, res_blocks)(((min_ddb_alloc)>(res_blocks))?(min_ddb_alloc):(res_blocks )) + 1; | |||
| 5377 | result->plane_en = true1; | |||
| 5378 | } | |||
| 5379 | ||||
| 5380 | static void | |||
| 5381 | skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, | |||
| 5382 | const struct skl_wm_params *wm_params, | |||
| 5383 | struct skl_wm_level *levels) | |||
| 5384 | { | |||
| 5385 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5386 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 5387 | struct skl_wm_level *result_prev = &levels[0]; | |||
| 5388 | ||||
| 5389 | for (level = 0; level <= max_level; level++) { | |||
| 5390 | struct skl_wm_level *result = &levels[level]; | |||
| 5391 | unsigned int latency = dev_priv->wm.skl_latency[level]; | |||
| 5392 | ||||
| 5393 | skl_compute_plane_wm(crtc_state, level, latency, | |||
| 5394 | wm_params, result_prev, result); | |||
| 5395 | ||||
| 5396 | result_prev = result; | |||
| 5397 | } | |||
| 5398 | } | |||
| 5399 | ||||
| 5400 | static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, | |||
| 5401 | const struct skl_wm_params *wm_params, | |||
| 5402 | struct skl_plane_wm *plane_wm) | |||
| 5403 | { | |||
| 5404 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5405 | struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0; | |||
| 5406 | struct skl_wm_level *levels = plane_wm->wm; | |||
| 5407 | unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; | |||
| 5408 | ||||
| 5409 | skl_compute_plane_wm(crtc_state, 0, latency, | |||
| 5410 | wm_params, &levels[0], | |||
| 5411 | sagv_wm); | |||
| 5412 | } | |||
| 5413 | ||||
| 5414 | static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, | |||
| 5415 | const struct skl_wm_params *wp, | |||
| 5416 | struct skl_plane_wm *wm) | |||
| 5417 | { | |||
| 5418 | struct drm_device *dev = crtc_state->uapi.crtc->dev; | |||
| 5419 | const struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | |||
| 5420 | u16 trans_min, trans_amount, trans_y_tile_min; | |||
| 5421 | u16 wm0_sel_res_b, trans_offset_b, res_blocks; | |||
| 5422 | ||||
| 5423 | /* Transition WM don't make any sense if ipc is disabled */ | |||
| 5424 | if (!dev_priv->ipc_enabled) | |||
| 5425 | return; | |||
| 5426 | ||||
| 5427 | /* | |||
| 5428 | * WaDisableTWM:skl,kbl,cfl,bxt | |||
| 5429 | * Transition WM are not recommended by HW team for GEN9 | |||
| 5430 | */ | |||
| 5431 | if (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) && !((&(dev_priv)->__info)->is_lp)) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) | |||
| 5432 | return; | |||
| 5433 | ||||
| 5434 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) | |||
| 5435 | trans_min = 4; | |||
| 5436 | else | |||
| 5437 | trans_min = 14; | |||
| 5438 | ||||
| 5439 | /* Display WA #1140: glk,cnl */ | |||
| 5440 | if (IS_CANNONLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 5441 | trans_amount = 0; | |||
| 5442 | else | |||
| 5443 | trans_amount = 10; /* This is configurable amount */ | |||
| 5444 | ||||
| 5445 | trans_offset_b = trans_min + trans_amount; | |||
| 5446 | ||||
| 5447 | /* | |||
| 5448 | * The spec asks for Selected Result Blocks for wm0 (the real value), | |||
| 5449 | * not Result Blocks (the integer value). Pay attention to the capital | |||
| 5450 | * letters. The value wm_l0->plane_res_b is actually Result Blocks, but | |||
| 5451 | * since Result Blocks is the ceiling of Selected Result Blocks plus 1, | |||
| 5452 | * and since we later will have to get the ceiling of the sum in the | |||
| 5453 | * transition watermarks calculation, we can just pretend Selected | |||
| 5454 | * Result Blocks is Result Blocks minus 1 and it should work for the | |||
| 5455 | * current platforms. | |||
| 5456 | */ | |||
| 5457 | wm0_sel_res_b = wm->wm[0].plane_res_b - 1; | |||
| 5458 | ||||
| 5459 | if (wp->y_tiled) { | |||
| 5460 | trans_y_tile_min = | |||
| 5461 | (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); | |||
| 5462 | res_blocks = max(wm0_sel_res_b, trans_y_tile_min)(((wm0_sel_res_b)>(trans_y_tile_min))?(wm0_sel_res_b):(trans_y_tile_min )) + | |||
| 5463 | trans_offset_b; | |||
| 5464 | } else { | |||
| 5465 | res_blocks = wm0_sel_res_b + trans_offset_b; | |||
| 5466 | } | |||
| 5467 | ||||
| 5468 | /* | |||
| 5469 | * Just assume we can enable the transition watermark. After | |||
| 5470 | * computing the DDB we'll come back and disable it if that | |||
| 5471 | * assumption turns out to be false. | |||
| 5472 | */ | |||
| 5473 | wm->trans_wm.plane_res_b = res_blocks + 1; | |||
| 5474 | wm->trans_wm.plane_en = true1; | |||
| 5475 | } | |||
| 5476 | ||||
| 5477 | static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, | |||
| 5478 | const struct intel_plane_state *plane_state, | |||
| 5479 | enum plane_id plane_id, int color_plane) | |||
| 5480 | { | |||
| 5481 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 5482 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 5483 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 5484 | struct skl_wm_params wm_params; | |||
| 5485 | int ret; | |||
| 5486 | ||||
| 5487 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, | |||
| 5488 | &wm_params, color_plane); | |||
| 5489 | if (ret) | |||
| 5490 | return ret; | |||
| 5491 | ||||
| 5492 | skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); | |||
| 5493 | ||||
| 5494 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) | |||
| 5495 | tgl_compute_sagv_wm(crtc_state, &wm_params, wm); | |||
| 5496 | ||||
| 5497 | skl_compute_transition_wm(crtc_state, &wm_params, wm); | |||
| 5498 | ||||
| 5499 | return 0; | |||
| 5500 | } | |||
| 5501 | ||||
| 5502 | static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, | |||
| 5503 | const struct intel_plane_state *plane_state, | |||
| 5504 | enum plane_id plane_id) | |||
| 5505 | { | |||
| 5506 | struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 5507 | struct skl_wm_params wm_params; | |||
| 5508 | int ret; | |||
| 5509 | ||||
| 5510 | wm->is_planar = true1; | |||
| 5511 | ||||
| 5512 | /* uv plane watermarks must also be validated for NV12/Planar */ | |||
| 5513 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, | |||
| 5514 | &wm_params, 1); | |||
| 5515 | if (ret) | |||
| 5516 | return ret; | |||
| 5517 | ||||
| 5518 | skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); | |||
| 5519 | ||||
| 5520 | return 0; | |||
| 5521 | } | |||
| 5522 | ||||
| 5523 | static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, | |||
| 5524 | const struct intel_plane_state *plane_state) | |||
| 5525 | { | |||
| 5526 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); | |||
| 5527 | const struct drm_framebuffer *fb = plane_state->hw.fb; | |||
| 5528 | enum plane_id plane_id = plane->id; | |||
| 5529 | int ret; | |||
| 5530 | ||||
| 5531 | if (!intel_wm_plane_visible(crtc_state, plane_state)) | |||
| 5532 | return 0; | |||
| 5533 | ||||
| 5534 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | |||
| 5535 | plane_id, 0); | |||
| 5536 | if (ret) | |||
| 5537 | return ret; | |||
| 5538 | ||||
| 5539 | if (fb->format->is_yuv && fb->format->num_planes > 1) { | |||
| 5540 | ret = skl_build_plane_wm_uv(crtc_state, plane_state, | |||
| 5541 | plane_id); | |||
| 5542 | if (ret) | |||
| 5543 | return ret; | |||
| 5544 | } | |||
| 5545 | ||||
| 5546 | return 0; | |||
| 5547 | } | |||
| 5548 | ||||
| 5549 | static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, | |||
| 5550 | const struct intel_plane_state *plane_state) | |||
| 5551 | { | |||
| 5552 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5553 | enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );})->id; | |||
| 5554 | int ret; | |||
| 5555 | ||||
| 5556 | /* Watermarks calculated in master */ | |||
| 5557 | if (plane_state->planar_slave) | |||
| 5558 | return 0; | |||
| 5559 | ||||
| 5560 | if (plane_state->planar_linked_plane) { | |||
| 5561 | const struct drm_framebuffer *fb = plane_state->hw.fb; | |||
| 5562 | enum plane_id y_plane_id = plane_state->planar_linked_plane->id; | |||
| 5563 | ||||
| 5564 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 5565 | !intel_wm_plane_visible(crtc_state, plane_state))({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& dev_priv->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 5566 | drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||({ int __ret = !!((!fb->format->is_yuv || fb->format ->num_planes == 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!fb->format->is_yuv || fb->format->num_planes == 1" ")"); __builtin_expect(!!(__ret), 0); }) | |||
| 5567 | fb->format->num_planes == 1)({ int __ret = !!((!fb->format->is_yuv || fb->format ->num_planes == 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!fb->format->is_yuv || fb->format->num_planes == 1" ")"); __builtin_expect(!!(__ret), 0); }); | |||
| 5568 | ||||
| 5569 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | |||
| 5570 | y_plane_id, 0); | |||
| 5571 | if (ret) | |||
| 5572 | return ret; | |||
| 5573 | ||||
| 5574 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | |||
| 5575 | plane_id, 1); | |||
| 5576 | if (ret) | |||
| 5577 | return ret; | |||
| 5578 | } else if (intel_wm_plane_visible(crtc_state, plane_state)) { | |||
| 5579 | ret = skl_build_plane_wm_single(crtc_state, plane_state, | |||
| 5580 | plane_id, 0); | |||
| 5581 | if (ret) | |||
| 5582 | return ret; | |||
| 5583 | } | |||
| 5584 | ||||
| 5585 | return 0; | |||
| 5586 | } | |||
| 5587 | ||||
| 5588 | static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) | |||
| 5589 | { | |||
| 5590 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev); | |||
| 5591 | struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; | |||
| 5592 | struct intel_plane *plane; | |||
| 5593 | const struct intel_plane_state *plane_state; | |||
| 5594 | int ret; | |||
| 5595 | ||||
| 5596 | /* | |||
| 5597 | * We'll only calculate watermarks for planes that are actually | |||
| 5598 | * enabled, so make sure all other planes are set as disabled. | |||
| 5599 | */ | |||
| 5600 | memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes))__builtin_memset((pipe_wm->planes), (0), (sizeof(pipe_wm-> planes))); | |||
| 5601 | ||||
| 5602 | intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else | |||
| 5603 | crtc_state)for ((plane) = ({ const __typeof( ((__typeof(*(plane)) *)0)-> base.head ) *__mptr = ((&(((crtc_state)->uapi.state-> dev))->mode_config.plane_list)->next); (__typeof(*(plane )) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)) , base.head) );}); &(plane)->base.head != (&(((crtc_state )->uapi.state->dev))->mode_config.plane_list); (plane ) = ({ const __typeof( ((__typeof(*(plane)) *)0)->base.head ) *__mptr = ((plane)->base.head.next); (__typeof(*(plane) ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(plane)), base.head) );})) if (!((((crtc_state)->uapi.plane_mask)) & drm_plane_mask(&(plane)->base))) {} else if (!((plane_state = ({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (__drm_atomic_get_current_plane_state((crtc_state )->uapi.state, &plane->base)); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );})))) {} else { | |||
| 5604 | ||||
| 5605 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) | |||
| 5606 | ret = icl_build_plane_wm(crtc_state, plane_state); | |||
| 5607 | else | |||
| 5608 | ret = skl_build_plane_wm(crtc_state, plane_state); | |||
| 5609 | if (ret) | |||
| 5610 | return ret; | |||
| 5611 | } | |||
| 5612 | ||||
| 5613 | return 0; | |||
| 5614 | } | |||
| 5615 | ||||
| 5616 | static void skl_ddb_entry_write(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 5617 | i915_reg_t reg, | |||
| 5618 | const struct skl_ddb_entry *entry) | |||
| 5619 | { | |||
| 5620 | if (entry->end) | |||
| 5621 | intel_de_write_fw(dev_priv, reg, | |||
| 5622 | (entry->end - 1) << 16 | entry->start); | |||
| 5623 | else | |||
| 5624 | intel_de_write_fw(dev_priv, reg, 0); | |||
| 5625 | } | |||
| 5626 | ||||
| 5627 | static void skl_write_wm_level(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 5628 | i915_reg_t reg, | |||
| 5629 | const struct skl_wm_level *level) | |||
| 5630 | { | |||
| 5631 | u32 val = 0; | |||
| 5632 | ||||
| 5633 | if (level->plane_en) | |||
| 5634 | val |= PLANE_WM_EN(1 << 31); | |||
| 5635 | if (level->ignore_lines) | |||
| 5636 | val |= PLANE_WM_IGNORE_LINES(1 << 30); | |||
| 5637 | val |= level->plane_res_b; | |||
| 5638 | val |= level->plane_res_l << PLANE_WM_LINES_SHIFT14; | |||
| 5639 | ||||
| 5640 | intel_de_write_fw(dev_priv, reg, val); | |||
| 5641 | } | |||
| 5642 | ||||
| 5643 | void skl_write_plane_wm(struct intel_plane *plane, | |||
| 5644 | const struct intel_crtc_state *crtc_state) | |||
| 5645 | { | |||
| 5646 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev); | |||
| 5647 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 5648 | enum plane_id plane_id = plane->id; | |||
| 5649 | enum pipe pipe = plane->pipe; | |||
| 5650 | const struct skl_plane_wm *wm = | |||
| 5651 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 5652 | const struct skl_ddb_entry *ddb_y = | |||
| 5653 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 5654 | const struct skl_ddb_entry *ddb_uv = | |||
| 5655 | &crtc_state->wm.skl.plane_ddb_uv[plane_id]; | |||
| 5656 | ||||
| 5657 | for (level = 0; level <= max_level; level++) { | |||
| 5658 | const struct skl_wm_level *wm_level; | |||
| 5659 | ||||
| 5660 | wm_level = skl_plane_wm_level(crtc_state, plane_id, level); | |||
| 5661 | ||||
| 5662 | skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level)((const i915_reg_t){ .reg = (((((0x70240) + (pipe) * ((0x71240 ) - (0x70240)))) + (plane_id) * ((((0x70340) + (pipe) * ((0x71340 ) - (0x70340)))) - (((0x70240) + (pipe) * ((0x71240) - (0x70240 )))))) + ((4) * (level))) }), | |||
| 5663 | wm_level); | |||
| 5664 | } | |||
| 5665 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70268) + (pipe) * ((0x71268 ) - (0x70268)))) + (plane_id) * ((((0x70368) + (pipe) * ((0x71368 ) - (0x70368)))) - (((0x70268) + (pipe) * ((0x71268) - (0x70268 ))))))) }), | |||
| 5666 | &wm->trans_wm); | |||
| 5667 | ||||
| 5668 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) { | |||
| 5669 | skl_ddb_entry_write(dev_priv, | |||
| 5670 | PLANE_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7027c) + (pipe) * ((0x7127c ) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c ) - (0x7037c)))) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c ))))))) }), ddb_y); | |||
| 5671 | return; | |||
| 5672 | } | |||
| 5673 | ||||
| 5674 | if (wm->is_planar) | |||
| 5675 | swap(ddb_y, ddb_uv)do { __typeof(ddb_y) __tmp = (ddb_y); (ddb_y) = (ddb_uv); (ddb_uv ) = __tmp; } while(0); | |||
| 5676 | ||||
| 5677 | skl_ddb_entry_write(dev_priv, | |||
| 5678 | PLANE_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7027c) + (pipe) * ((0x7127c ) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c ) - (0x7037c)))) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c ))))))) }), ddb_y); | |||
| 5679 | skl_ddb_entry_write(dev_priv, | |||
| 5680 | PLANE_NV12_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70278) + (pipe) * ((0x71278 ) - (0x70278)))) + (plane_id) * ((((0x70378) + (pipe) * ((0x71378 ) - (0x70378)))) - (((0x70278) + (pipe) * ((0x71278) - (0x70278 ))))))) }), ddb_uv); | |||
| 5681 | } | |||
| 5682 | ||||
| 5683 | void skl_write_cursor_wm(struct intel_plane *plane, | |||
| 5684 | const struct intel_crtc_state *crtc_state) | |||
| 5685 | { | |||
| 5686 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev); | |||
| 5687 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 5688 | enum plane_id plane_id = plane->id; | |||
| 5689 | enum pipe pipe = plane->pipe; | |||
| 5690 | const struct skl_plane_wm *wm = | |||
| 5691 | &crtc_state->wm.skl.optimal.planes[plane_id]; | |||
| 5692 | const struct skl_ddb_entry *ddb = | |||
| 5693 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 5694 | ||||
| 5695 | for (level = 0; level <= max_level; level++) { | |||
| 5696 | const struct skl_wm_level *wm_level; | |||
| 5697 | ||||
| 5698 | wm_level = skl_plane_wm_level(crtc_state, plane_id, level); | |||
| 5699 | ||||
| 5700 | skl_write_wm_level(dev_priv, CUR_WM(pipe, level)((const i915_reg_t){ .reg = (((0x70140) + (pipe) * ((0x71140) - (0x70140))) + ((4) * (level))) }), | |||
| 5701 | wm_level); | |||
| 5702 | } | |||
| 5703 | skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe)((const i915_reg_t){ .reg = (((0x70168) + (pipe) * ((0x71168) - (0x70168)))) }), &wm->trans_wm); | |||
| 5704 | ||||
| 5705 | skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe)((const i915_reg_t){ .reg = (((0x7017c) + (pipe) * ((0x7117c) - (0x7017c)))) }), ddb); | |||
| 5706 | } | |||
| 5707 | ||||
| 5708 | bool_Bool skl_wm_level_equals(const struct skl_wm_level *l1, | |||
| 5709 | const struct skl_wm_level *l2) | |||
| 5710 | { | |||
| 5711 | return l1->plane_en == l2->plane_en && | |||
| 5712 | l1->ignore_lines == l2->ignore_lines && | |||
| 5713 | l1->plane_res_l == l2->plane_res_l && | |||
| 5714 | l1->plane_res_b == l2->plane_res_b; | |||
| 5715 | } | |||
| 5716 | ||||
| 5717 | static bool_Bool skl_plane_wm_equals(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 5718 | const struct skl_plane_wm *wm1, | |||
| 5719 | const struct skl_plane_wm *wm2) | |||
| 5720 | { | |||
| 5721 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 5722 | ||||
| 5723 | for (level = 0; level <= max_level; level++) { | |||
| 5724 | /* | |||
| 5725 | * We don't check uv_wm as the hardware doesn't actually | |||
| 5726 | * use it. It only gets used for calculating the required | |||
| 5727 | * ddb allocation. | |||
| 5728 | */ | |||
| 5729 | if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) | |||
| 5730 | return false0; | |||
| 5731 | } | |||
| 5732 | ||||
| 5733 | return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); | |||
| 5734 | } | |||
| 5735 | ||||
| 5736 | static bool_Bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, | |||
| 5737 | const struct skl_ddb_entry *b) | |||
| 5738 | { | |||
| 5739 | return a->start < b->end && b->start < a->end; | |||
| 5740 | } | |||
| 5741 | ||||
| 5742 | bool_Bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, | |||
| 5743 | const struct skl_ddb_entry *entries, | |||
| 5744 | int num_entries, int ignore_idx) | |||
| 5745 | { | |||
| 5746 | int i; | |||
| 5747 | ||||
| 5748 | for (i = 0; i < num_entries; i++) { | |||
| 5749 | if (i != ignore_idx && | |||
| 5750 | skl_ddb_entries_overlap(ddb, &entries[i])) | |||
| 5751 | return true1; | |||
| 5752 | } | |||
| 5753 | ||||
| 5754 | return false0; | |||
| 5755 | } | |||
| 5756 | ||||
| 5757 | static int | |||
| 5758 | skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, | |||
| 5759 | struct intel_crtc_state *new_crtc_state) | |||
| 5760 | { | |||
| 5761 | struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (new_crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); | |||
| 5762 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (new_crtc_state->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); | |||
| 5763 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 5764 | struct intel_plane *plane; | |||
| 5765 | ||||
| 5766 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) if (!((plane)->pipe == (crtc)->pipe )) {} else { | |||
| 5767 | struct intel_plane_state *plane_state; | |||
| 5768 | enum plane_id plane_id = plane->id; | |||
| 5769 | ||||
| 5770 | if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], | |||
| 5771 | &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) && | |||
| 5772 | skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id], | |||
| 5773 | &new_crtc_state->wm.skl.plane_ddb_uv[plane_id])) | |||
| 5774 | continue; | |||
| 5775 | ||||
| 5776 | plane_state = intel_atomic_get_plane_state(state, plane); | |||
| 5777 | if (IS_ERR(plane_state)) | |||
| 5778 | return PTR_ERR(plane_state); | |||
| 5779 | ||||
| 5780 | new_crtc_state->update_planes |= BIT(plane_id)(1UL << (plane_id)); | |||
| 5781 | } | |||
| 5782 | ||||
| 5783 | return 0; | |||
| 5784 | } | |||
| 5785 | ||||
| 5786 | static int | |||
| 5787 | skl_compute_ddb(struct intel_atomic_state *state) | |||
| 5788 | { | |||
| 5789 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 5790 | const struct intel_dbuf_state *old_dbuf_state; | |||
| 5791 | const struct intel_dbuf_state *new_dbuf_state; | |||
| 5792 | const struct intel_crtc_state *old_crtc_state; | |||
| 5793 | struct intel_crtc_state *new_crtc_state; | |||
| 5794 | struct intel_crtc *crtc; | |||
| 5795 | int ret, i; | |||
| 5796 | ||||
| 5797 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 5798 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 5799 | ret = skl_allocate_pipe_ddb(new_crtc_state); | |||
| 5800 | if (ret) | |||
| 5801 | return ret; | |||
| 5802 | ||||
| 5803 | ret = skl_ddb_add_affected_planes(old_crtc_state, | |||
| 5804 | new_crtc_state); | |||
| 5805 | if (ret) | |||
| 5806 | return ret; | |||
| 5807 | } | |||
| 5808 | ||||
| 5809 | old_dbuf_state = intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 5810 | new_dbuf_state = intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 5811 | ||||
| 5812 | if (new_dbuf_state && | |||
| 5813 | new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices) | |||
| 5814 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(dev_priv)->__info)->num_supported_dbuf_slices) | |||
| 5815 | "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(dev_priv)->__info)->num_supported_dbuf_slices) | |||
| 5816 | old_dbuf_state->enabled_slices,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(dev_priv)->__info)->num_supported_dbuf_slices) | |||
| 5817 | new_dbuf_state->enabled_slices,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(dev_priv)->__info)->num_supported_dbuf_slices) | |||
| 5818 | INTEL_INFO(dev_priv)->num_supported_dbuf_slices)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(dev_priv)->__info)->num_supported_dbuf_slices); | |||
| 5819 | ||||
| 5820 | return 0; | |||
| 5821 | } | |||
| 5822 | ||||
| 5823 | static char enast(bool_Bool enable) | |||
| 5824 | { | |||
| 5825 | return enable ? '*' : ' '; | |||
| 5826 | } | |||
| 5827 | ||||
| 5828 | static void | |||
| 5829 | skl_print_wm_changes(struct intel_atomic_state *state) | |||
| 5830 | { | |||
| 5831 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 5832 | const struct intel_crtc_state *old_crtc_state; | |||
| 5833 | const struct intel_crtc_state *new_crtc_state; | |||
| 5834 | struct intel_plane *plane; | |||
| 5835 | struct intel_crtc *crtc; | |||
| 5836 | int i; | |||
| 5837 | ||||
| 5838 | if (!drm_debug_enabled(DRM_UT_KMS)) | |||
| 5839 | return; | |||
| 5840 | ||||
| 5841 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 5842 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 5843 | const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; | |||
| 5844 | ||||
| 5845 | old_pipe_wm = &old_crtc_state->wm.skl.optimal; | |||
| 5846 | new_pipe_wm = &new_crtc_state->wm.skl.optimal; | |||
| 5847 | ||||
| 5848 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) if (!((plane)->pipe == (crtc)->pipe )) {} else { | |||
| 5849 | enum plane_id plane_id = plane->id; | |||
| 5850 | const struct skl_ddb_entry *old, *new; | |||
| 5851 | ||||
| 5852 | old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 5853 | new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id]; | |||
| 5854 | ||||
| 5855 | if (skl_ddb_entry_equal(old, new)) | |||
| 5856 | continue; | |||
| 5857 | ||||
| 5858 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) | |||
| 5859 | "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) | |||
| 5860 | plane->base.base.id, plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) | |||
| 5861 | old->start, old->end, new->start, new->end,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) | |||
| 5862 | skl_ddb_entry_size(old), skl_ddb_entry_size(new))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)); | |||
| 5863 | } | |||
| 5864 | ||||
| 5865 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) if (!((plane)->pipe == (crtc)->pipe )) {} else { | |||
| 5866 | enum plane_id plane_id = plane->id; | |||
| 5867 | const struct skl_plane_wm *old_wm, *new_wm; | |||
| 5868 | ||||
| 5869 | old_wm = &old_pipe_wm->planes[plane_id]; | |||
| 5870 | new_wm = &new_pipe_wm->planes[plane_id]; | |||
| 5871 | ||||
| 5872 | if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) | |||
| 5873 | continue; | |||
| 5874 | ||||
| 5875 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5876 | "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm"drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5877 | " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5878 | plane->base.base.id, plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5879 | enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5880 | enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5881 | enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5882 | enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5883 | enast(old_wm->trans_wm.plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5884 | enast(old_wm->sagv_wm0.plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5885 | enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5886 | enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5887 | enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5888 | enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5889 | enast(new_wm->trans_wm.plane_en),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)) | |||
| 5890 | enast(new_wm->sagv_wm0.plane_en))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].plane_en), enast(old_wm->wm[1].plane_en), enast(old_wm ->wm[2].plane_en), enast(old_wm->wm[3].plane_en), enast (old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en) , enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en ), enast(old_wm->trans_wm.plane_en), enast(old_wm->sagv_wm0 .plane_en), enast(new_wm->wm[0].plane_en), enast(new_wm-> wm[1].plane_en), enast(new_wm->wm[2].plane_en), enast(new_wm ->wm[3].plane_en), enast(new_wm->wm[4].plane_en), enast (new_wm->wm[5].plane_en), enast(new_wm->wm[6].plane_en) , enast(new_wm->wm[7].plane_en), enast(new_wm->trans_wm .plane_en), enast(new_wm->sagv_wm0.plane_en)); | |||
| 5891 | ||||
| 5892 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5893 | "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5894 | " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5895 | plane->base.base.id, plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5896 | enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5897 | enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5898 | enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5899 | enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5900 | enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5901 | enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5902 | enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5903 | enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5904 | enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5905 | enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5906 | ||||
| 5907 | enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5908 | enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5909 | enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5910 | enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5911 | enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5912 | enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5913 | enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5914 | enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5915 | enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l) | |||
| 5916 | enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].plane_res_l, enast(old_wm ->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, enast (old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l , enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l , enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l , enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l , enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l , enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l , enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm .plane_res_l, enast(old_wm->sagv_wm0.ignore_lines), old_wm ->sagv_wm0.plane_res_l, enast(new_wm->wm[0].ignore_lines ), new_wm->wm[0].plane_res_l, enast(new_wm->wm[1].ignore_lines ), new_wm->wm[1].plane_res_l, enast(new_wm->wm[2].ignore_lines ), new_wm->wm[2].plane_res_l, enast(new_wm->wm[3].ignore_lines ), new_wm->wm[3].plane_res_l, enast(new_wm->wm[4].ignore_lines ), new_wm->wm[4].plane_res_l, enast(new_wm->wm[5].ignore_lines ), new_wm->wm[5].plane_res_l, enast(new_wm->wm[6].ignore_lines ), new_wm->wm[6].plane_res_l, enast(new_wm->wm[7].ignore_lines ), new_wm->wm[7].plane_res_l, enast(new_wm->trans_wm.ignore_lines ), new_wm->trans_wm.plane_res_l, enast(new_wm->sagv_wm0 .ignore_lines), new_wm->sagv_wm0.plane_res_l); | |||
| 5917 | ||||
| 5918 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5919 | "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5920 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5921 | plane->base.base.id, plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5922 | old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5923 | old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5924 | old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5925 | old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5926 | old_wm->trans_wm.plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5927 | old_wm->sagv_wm0.plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5928 | new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5929 | new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5930 | new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5931 | new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5932 | new_wm->trans_wm.plane_res_b,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b) | |||
| 5933 | new_wm->sagv_wm0.plane_res_b)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].plane_res_b , old_wm->wm[1].plane_res_b, old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, old_wm->wm[4].plane_res_b, old_wm ->wm[5].plane_res_b, old_wm->wm[6].plane_res_b, old_wm-> wm[7].plane_res_b, old_wm->trans_wm.plane_res_b, old_wm-> sagv_wm0.plane_res_b, new_wm->wm[0].plane_res_b, new_wm-> wm[1].plane_res_b, new_wm->wm[2].plane_res_b, new_wm->wm [3].plane_res_b, new_wm->wm[4].plane_res_b, new_wm->wm[ 5].plane_res_b, new_wm->wm[6].plane_res_b, new_wm->wm[7 ].plane_res_b, new_wm->trans_wm.plane_res_b, new_wm->sagv_wm0 .plane_res_b); | |||
| 5934 | ||||
| 5935 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5936 | "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5937 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5938 | plane->base.base.id, plane->base.name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5939 | old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5940 | old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5941 | old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5942 | old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5943 | old_wm->trans_wm.min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5944 | old_wm->sagv_wm0.min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5945 | new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5946 | new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5947 | new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5948 | new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5949 | new_wm->trans_wm.min_ddb_alloc,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc) | |||
| 5950 | new_wm->sagv_wm0.min_ddb_alloc)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane-> base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv_wm0.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc , new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc , new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc , new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc , new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc , new_wm->sagv_wm0.min_ddb_alloc); | |||
| 5951 | } | |||
| 5952 | } | |||
| 5953 | } | |||
| 5954 | ||||
| 5955 | static int intel_add_affected_pipes(struct intel_atomic_state *state, | |||
| 5956 | u8 pipe_mask) | |||
| 5957 | { | |||
| 5958 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 5959 | struct intel_crtc *crtc; | |||
| 5960 | ||||
| 5961 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 5962 | struct intel_crtc_state *crtc_state; | |||
| 5963 | ||||
| 5964 | if ((pipe_mask & BIT(crtc->pipe)(1UL << (crtc->pipe))) == 0) | |||
| 5965 | continue; | |||
| 5966 | ||||
| 5967 | crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); | |||
| 5968 | if (IS_ERR(crtc_state)) | |||
| 5969 | return PTR_ERR(crtc_state); | |||
| 5970 | } | |||
| 5971 | ||||
| 5972 | return 0; | |||
| 5973 | } | |||
| 5974 | ||||
| 5975 | static int | |||
| 5976 | skl_ddb_add_affected_pipes(struct intel_atomic_state *state) | |||
| 5977 | { | |||
| 5978 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 5979 | struct intel_crtc_state *crtc_state; | |||
| 5980 | struct intel_crtc *crtc; | |||
| 5981 | int i, ret; | |||
| 5982 | ||||
| 5983 | if (dev_priv->wm.distrust_bios_wm) { | |||
| 5984 | /* | |||
| 5985 | * skl_ddb_get_pipe_allocation_limits() currently requires | |||
| 5986 | * all active pipes to be included in the state so that | |||
| 5987 | * it can redistribute the dbuf among them, and it really | |||
| 5988 | * wants to recompute things when distrust_bios_wm is set | |||
| 5989 | * so we add all the pipes to the state. | |||
| 5990 | */ | |||
| 5991 | ret = intel_add_affected_pipes(state, ~0); | |||
| 5992 | if (ret) | |||
| 5993 | return ret; | |||
| 5994 | } | |||
| 5995 | ||||
| 5996 | for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 5997 | struct intel_dbuf_state *new_dbuf_state; | |||
| 5998 | const struct intel_dbuf_state *old_dbuf_state; | |||
| 5999 | ||||
| 6000 | new_dbuf_state = intel_atomic_get_dbuf_state(state); | |||
| 6001 | if (IS_ERR(new_dbuf_state)) | |||
| 6002 | return PTR_ERR(new_dbuf_state); | |||
| 6003 | ||||
| 6004 | old_dbuf_state = intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 6005 | ||||
| 6006 | new_dbuf_state->active_pipes = | |||
| 6007 | intel_calc_active_pipes(state, old_dbuf_state->active_pipes); | |||
| 6008 | ||||
| 6009 | if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes) | |||
| 6010 | break; | |||
| 6011 | ||||
| 6012 | ret = intel_atomic_lock_global_state(&new_dbuf_state->base); | |||
| 6013 | if (ret) | |||
| 6014 | return ret; | |||
| 6015 | ||||
| 6016 | /* | |||
| 6017 | * skl_ddb_get_pipe_allocation_limits() currently requires | |||
| 6018 | * all active pipes to be included in the state so that | |||
| 6019 | * it can redistribute the dbuf among them. | |||
| 6020 | */ | |||
| 6021 | ret = intel_add_affected_pipes(state, | |||
| 6022 | new_dbuf_state->active_pipes); | |||
| 6023 | if (ret) | |||
| 6024 | return ret; | |||
| 6025 | ||||
| 6026 | break; | |||
| 6027 | } | |||
| 6028 | ||||
| 6029 | return 0; | |||
| 6030 | } | |||
| 6031 | ||||
| 6032 | /* | |||
| 6033 | * To make sure the cursor watermark registers are always consistent | |||
| 6034 | * with our computed state the following scenario needs special | |||
| 6035 | * treatment: | |||
| 6036 | * | |||
| 6037 | * 1. enable cursor | |||
| 6038 | * 2. move cursor entirely offscreen | |||
| 6039 | * 3. disable cursor | |||
| 6040 | * | |||
| 6041 | * Step 2. does call .disable_plane() but does not zero the watermarks | |||
| 6042 | * (since we consider an offscreen cursor still active for the purposes | |||
| 6043 | * of watermarks). Step 3. would not normally call .disable_plane() | |||
| 6044 | * because the actual plane visibility isn't changing, and we don't | |||
| 6045 | * deallocate the cursor ddb until the pipe gets disabled. So we must | |||
| 6046 | * force step 3. to call .disable_plane() to update the watermark | |||
| 6047 | * registers properly. | |||
| 6048 | * | |||
| 6049 | * Other planes do not suffer from this issues as their watermarks are | |||
| 6050 | * calculated based on the actual plane visibility. The only time this | |||
| 6051 | * can trigger for the other planes is during the initial readout as the | |||
| 6052 | * default value of the watermarks registers is not zero. | |||
| 6053 | */ | |||
| 6054 | static int skl_wm_add_affected_planes(struct intel_atomic_state *state, | |||
| 6055 | struct intel_crtc *crtc) | |||
| 6056 | { | |||
| 6057 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 6058 | const struct intel_crtc_state *old_crtc_state = | |||
| 6059 | intel_atomic_get_old_crtc_state(state, crtc); | |||
| 6060 | struct intel_crtc_state *new_crtc_state = | |||
| 6061 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 6062 | struct intel_plane *plane; | |||
| 6063 | ||||
| 6064 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) if (!((plane)->pipe == (crtc)->pipe )) {} else { | |||
| 6065 | struct intel_plane_state *plane_state; | |||
| 6066 | enum plane_id plane_id = plane->id; | |||
| 6067 | ||||
| 6068 | /* | |||
| 6069 | * Force a full wm update for every plane on modeset. | |||
| 6070 | * Required because the reset value of the wm registers | |||
| 6071 | * is non-zero, whereas we want all disabled planes to | |||
| 6072 | * have zero watermarks. So if we turn off the relevant | |||
| 6073 | * power well the hardware state will go out of sync | |||
| 6074 | * with the software state. | |||
| 6075 | */ | |||
| 6076 | if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && | |||
| 6077 | skl_plane_wm_equals(dev_priv, | |||
| 6078 | &old_crtc_state->wm.skl.optimal.planes[plane_id], | |||
| 6079 | &new_crtc_state->wm.skl.optimal.planes[plane_id])) | |||
| 6080 | continue; | |||
| 6081 | ||||
| 6082 | plane_state = intel_atomic_get_plane_state(state, plane); | |||
| 6083 | if (IS_ERR(plane_state)) | |||
| 6084 | return PTR_ERR(plane_state); | |||
| 6085 | ||||
| 6086 | new_crtc_state->update_planes |= BIT(plane_id)(1UL << (plane_id)); | |||
| 6087 | } | |||
| 6088 | ||||
| 6089 | return 0; | |||
| 6090 | } | |||
| 6091 | ||||
| 6092 | static int | |||
| 6093 | skl_compute_wm(struct intel_atomic_state *state) | |||
| 6094 | { | |||
| 6095 | struct intel_crtc *crtc; | |||
| 6096 | struct intel_crtc_state *new_crtc_state; | |||
| 6097 | struct intel_crtc_state *old_crtc_state; | |||
| 6098 | int ret, i; | |||
| 6099 | ||||
| 6100 | ret = skl_ddb_add_affected_pipes(state); | |||
| 6101 | if (ret) | |||
| ||||
| 6102 | return ret; | |||
| 6103 | ||||
| 6104 | /* | |||
| 6105 | * Calculate WM's for all pipes that are part of this transaction. | |||
| 6106 | * Note that skl_ddb_add_affected_pipes may have added more CRTC's that | |||
| 6107 | * weren't otherwise being modified if pipe allocations had to change. | |||
| 6108 | */ | |||
| 6109 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 6110 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 6111 | ret = skl_build_pipe_wm(new_crtc_state); | |||
| 6112 | if (ret
| |||
| 6113 | return ret; | |||
| 6114 | } | |||
| 6115 | ||||
| 6116 | ret = skl_compute_ddb(state); | |||
| 6117 | if (ret) | |||
| 6118 | return ret; | |||
| 6119 | ||||
| 6120 | ret = intel_compute_sagv_mask(state); | |||
| 6121 | if (ret) | |||
| 6122 | return ret; | |||
| 6123 | ||||
| 6124 | /* | |||
| 6125 | * skl_compute_ddb() will have adjusted the final watermarks | |||
| 6126 | * based on how much ddb is available. Now we can actually | |||
| 6127 | * check if the final watermarks changed. | |||
| 6128 | */ | |||
| 6129 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else | |||
| 6130 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { | |||
| 6131 | ret = skl_wm_add_affected_planes(state, crtc); | |||
| 6132 | if (ret) | |||
| 6133 | return ret; | |||
| 6134 | } | |||
| 6135 | ||||
| 6136 | skl_print_wm_changes(state); | |||
| 6137 | ||||
| 6138 | return 0; | |||
| 6139 | } | |||
| 6140 | ||||
| 6141 | static void ilk_compute_wm_config(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 6142 | struct intel_wm_config *config) | |||
| 6143 | { | |||
| 6144 | struct intel_crtc *crtc; | |||
| 6145 | ||||
| 6146 | /* Compute the currently _active_ config */ | |||
| 6147 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6148 | const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; | |||
| 6149 | ||||
| 6150 | if (!wm->pipe_enabled) | |||
| 6151 | continue; | |||
| 6152 | ||||
| 6153 | config->sprites_enabled |= wm->sprites_enabled; | |||
| 6154 | config->sprites_scaled |= wm->sprites_scaled; | |||
| 6155 | config->num_pipes_active++; | |||
| 6156 | } | |||
| 6157 | } | |||
| 6158 | ||||
| 6159 | static void ilk_program_watermarks(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6160 | { | |||
| 6161 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; | |||
| 6162 | struct ilk_wm_maximums max; | |||
| 6163 | struct intel_wm_config config = {}; | |||
| 6164 | struct ilk_wm_values results = {}; | |||
| 6165 | enum intel_ddb_partitioning partitioning; | |||
| 6166 | ||||
| 6167 | ilk_compute_wm_config(dev_priv, &config); | |||
| 6168 | ||||
| 6169 | ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); | |||
| 6170 | ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); | |||
| 6171 | ||||
| 6172 | /* 5/6 split only in single pipe config on IVB+ */ | |||
| 6173 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7 && | |||
| 6174 | config.num_pipes_active == 1 && config.sprites_enabled) { | |||
| 6175 | ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); | |||
| 6176 | ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); | |||
| 6177 | ||||
| 6178 | best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); | |||
| 6179 | } else { | |||
| 6180 | best_lp_wm = &lp_wm_1_2; | |||
| 6181 | } | |||
| 6182 | ||||
| 6183 | partitioning = (best_lp_wm == &lp_wm_1_2) ? | |||
| 6184 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; | |||
| 6185 | ||||
| 6186 | ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); | |||
| 6187 | ||||
| 6188 | ilk_write_wm_values(dev_priv, &results); | |||
| 6189 | } | |||
| 6190 | ||||
| 6191 | static void ilk_initial_watermarks(struct intel_atomic_state *state, | |||
| 6192 | struct intel_crtc *crtc) | |||
| 6193 | { | |||
| 6194 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 6195 | const struct intel_crtc_state *crtc_state = | |||
| 6196 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 6197 | ||||
| 6198 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 6199 | crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; | |||
| 6200 | ilk_program_watermarks(dev_priv); | |||
| 6201 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 6202 | } | |||
| 6203 | ||||
| 6204 | static void ilk_optimize_watermarks(struct intel_atomic_state *state, | |||
| 6205 | struct intel_crtc *crtc) | |||
| 6206 | { | |||
| 6207 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 6208 | const struct intel_crtc_state *crtc_state = | |||
| 6209 | intel_atomic_get_new_crtc_state(state, crtc); | |||
| 6210 | ||||
| 6211 | if (!crtc_state->wm.need_postvbl_update) | |||
| 6212 | return; | |||
| 6213 | ||||
| 6214 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 6215 | crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; | |||
| 6216 | ilk_program_watermarks(dev_priv); | |||
| 6217 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 6218 | } | |||
| 6219 | ||||
| 6220 | static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) | |||
| 6221 | { | |||
| 6222 | level->plane_en = val & PLANE_WM_EN(1 << 31); | |||
| 6223 | level->ignore_lines = val & PLANE_WM_IGNORE_LINES(1 << 30); | |||
| 6224 | level->plane_res_b = val & PLANE_WM_BLOCKS_MASK0x7ff; | |||
| 6225 | level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT14) & | |||
| 6226 | PLANE_WM_LINES_MASK0x1f; | |||
| 6227 | } | |||
| 6228 | ||||
| 6229 | void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, | |||
| 6230 | struct skl_pipe_wm *out) | |||
| 6231 | { | |||
| 6232 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 6233 | enum pipe pipe = crtc->pipe; | |||
| 6234 | int level, max_level; | |||
| 6235 | enum plane_id plane_id; | |||
| 6236 | u32 val; | |||
| 6237 | ||||
| 6238 | max_level = ilk_wm_max_level(dev_priv); | |||
| 6239 | ||||
| 6240 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 6241 | struct skl_plane_wm *wm = &out->planes[plane_id]; | |||
| 6242 | ||||
| 6243 | for (level = 0; level <= max_level; level++) { | |||
| 6244 | if (plane_id != PLANE_CURSOR) | |||
| 6245 | val = I915_READ(PLANE_WM(pipe, plane_id, level))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x70240) + (pipe) * ((0x71240) - (0x70240)))) + (plane_id) * ((((0x70340) + (pipe) * ((0x71340) - (0x70340)) )) - (((0x70240) + (pipe) * ((0x71240) - (0x70240)))))) + ((4 ) * (level))) }))); | |||
| 6246 | else | |||
| 6247 | val = I915_READ(CUR_WM(pipe, level))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x70140) + (pipe) * ((0x71140) - (0x70140))) + ( (4) * (level))) }))); | |||
| 6248 | ||||
| 6249 | skl_wm_level_from_reg_val(val, &wm->wm[level]); | |||
| 6250 | } | |||
| 6251 | ||||
| 6252 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) | |||
| 6253 | wm->sagv_wm0 = wm->wm[0]; | |||
| 6254 | ||||
| 6255 | if (plane_id != PLANE_CURSOR) | |||
| 6256 | val = I915_READ(PLANE_WM_TRANS(pipe, plane_id))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((((0x70268) + (pipe) * ((0x71268) - (0x70268)))) + (plane_id) * ((((0x70368) + (pipe) * ((0x71368) - (0x70368)) )) - (((0x70268) + (pipe) * ((0x71268) - (0x70268))))))) }))); | |||
| 6257 | else | |||
| 6258 | val = I915_READ(CUR_WM_TRANS(pipe))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x70168) + (pipe) * ((0x71168) - (0x70168)))) }) )); | |||
| 6259 | ||||
| 6260 | skl_wm_level_from_reg_val(val, &wm->trans_wm); | |||
| 6261 | } | |||
| 6262 | ||||
| 6263 | if (!crtc->active) | |||
| 6264 | return; | |||
| 6265 | } | |||
| 6266 | ||||
| 6267 | void skl_wm_get_hw_state(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6268 | { | |||
| 6269 | struct intel_crtc *crtc; | |||
| 6270 | struct intel_crtc_state *crtc_state; | |||
| 6271 | ||||
| 6272 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6273 | crtc_state = to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6274 | ||||
| 6275 | skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); | |||
| 6276 | } | |||
| 6277 | ||||
| 6278 | if (dev_priv->active_pipes) { | |||
| 6279 | /* Fully recompute DDB on first atomic commit */ | |||
| 6280 | dev_priv->wm.distrust_bios_wm = true1; | |||
| 6281 | } | |||
| 6282 | } | |||
| 6283 | ||||
| 6284 | static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) | |||
| 6285 | { | |||
| 6286 | struct drm_device *dev = crtc->base.dev; | |||
| 6287 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev); | |||
| 6288 | struct ilk_wm_values *hw = &dev_priv->wm.hw; | |||
| 6289 | struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6290 | struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; | |||
| 6291 | enum pipe pipe = crtc->pipe; | |||
| 6292 | static const i915_reg_t wm0_pipe_reg[] = { | |||
| 6293 | [PIPE_A] = WM0_PIPEA_ILK((const i915_reg_t){ .reg = (0x45100) }), | |||
| 6294 | [PIPE_B] = WM0_PIPEB_ILK((const i915_reg_t){ .reg = (0x45104) }), | |||
| 6295 | [PIPE_C] = WM0_PIPEC_IVB((const i915_reg_t){ .reg = (0x45200) }), | |||
| 6296 | }; | |||
| 6297 | ||||
| 6298 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe])intel_uncore_read(&(dev_priv)->uncore, (wm0_pipe_reg[pipe ])); | |||
| 6299 | ||||
| 6300 | memset(active, 0, sizeof(*active))__builtin_memset((active), (0), (sizeof(*active))); | |||
| 6301 | ||||
| 6302 | active->pipe_enabled = crtc->active; | |||
| 6303 | ||||
| 6304 | if (active->pipe_enabled) { | |||
| 6305 | u32 tmp = hw->wm_pipe[pipe]; | |||
| 6306 | ||||
| 6307 | /* | |||
| 6308 | * For active pipes LP0 watermark is marked as | |||
| 6309 | * enabled, and LP1+ watermaks as disabled since | |||
| 6310 | * we can't really reverse compute them in case | |||
| 6311 | * multiple pipes are active. | |||
| 6312 | */ | |||
| 6313 | active->wm[0].enable = true1; | |||
| 6314 | active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK(0xffff << 16)) >> WM0_PIPE_PLANE_SHIFT16; | |||
| 6315 | active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK(0xff << 8)) >> WM0_PIPE_SPRITE_SHIFT8; | |||
| 6316 | active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK(0xff); | |||
| 6317 | } else { | |||
| 6318 | int level, max_level = ilk_wm_max_level(dev_priv); | |||
| 6319 | ||||
| 6320 | /* | |||
| 6321 | * For inactive pipes, all watermark levels | |||
| 6322 | * should be marked as enabled but zeroed, | |||
| 6323 | * which is what we'd compute them to. | |||
| 6324 | */ | |||
| 6325 | for (level = 0; level <= max_level; level++) | |||
| 6326 | active->wm[level].enable = true1; | |||
| 6327 | } | |||
| 6328 | ||||
| 6329 | crtc->wm.active.ilk = *active; | |||
| 6330 | } | |||
| 6331 | ||||
| 6332 | #define _FW_WM(value, plane) \ | |||
| 6333 | (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) | |||
| 6334 | #define _FW_WM_VLV(value, plane) \ | |||
| 6335 | (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) | |||
| 6336 | ||||
| 6337 | static void g4x_read_wm_values(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 6338 | struct g4x_wm_values *wm) | |||
| 6339 | { | |||
| 6340 | u32 tmp; | |||
| 6341 | ||||
| 6342 | tmp = I915_READ(DSPFW1)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) }))); | |||
| 6343 | wm->sr.plane = _FW_WM(tmp, SR); | |||
| 6344 | wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); | |||
| 6345 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); | |||
| 6346 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); | |||
| 6347 | ||||
| 6348 | tmp = I915_READ(DSPFW2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) }))); | |||
| 6349 | wm->fbc_en = tmp & DSPFW_FBC_SR_EN(1 << 31); | |||
| 6350 | wm->sr.fbc = _FW_WM(tmp, FBC_SR); | |||
| 6351 | wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); | |||
| 6352 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); | |||
| 6353 | wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); | |||
| 6354 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); | |||
| 6355 | ||||
| 6356 | tmp = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 6357 | wm->hpll_en = tmp & DSPFW_HPLL_SR_EN(1 << 31); | |||
| 6358 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | |||
| 6359 | wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); | |||
| 6360 | wm->hpll.plane = _FW_WM(tmp, HPLL_SR); | |||
| 6361 | } | |||
| 6362 | ||||
| 6363 | static void vlv_read_wm_values(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 6364 | struct vlv_wm_values *wm) | |||
| 6365 | { | |||
| 6366 | enum pipe pipe; | |||
| 6367 | u32 tmp; | |||
| 6368 | ||||
| 6369 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 6370 | tmp = I915_READ(VLV_DDL(pipe))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70050 + 4 * (pipe)) }))); | |||
| 6371 | ||||
| 6372 | wm->ddl[pipe].plane[PLANE_PRIMARY] = | |||
| 6373 | (tmp >> DDL_PLANE_SHIFT0) & (DDL_PRECISION_HIGH(1 << 7) | DRAIN_LATENCY_MASK0x7f); | |||
| 6374 | wm->ddl[pipe].plane[PLANE_CURSOR] = | |||
| 6375 | (tmp >> DDL_CURSOR_SHIFT24) & (DDL_PRECISION_HIGH(1 << 7) | DRAIN_LATENCY_MASK0x7f); | |||
| 6376 | wm->ddl[pipe].plane[PLANE_SPRITE0] = | |||
| 6377 | (tmp >> DDL_SPRITE_SHIFT(0)(8 + 8 * (0))) & (DDL_PRECISION_HIGH(1 << 7) | DRAIN_LATENCY_MASK0x7f); | |||
| 6378 | wm->ddl[pipe].plane[PLANE_SPRITE1] = | |||
| 6379 | (tmp >> DDL_SPRITE_SHIFT(1)(8 + 8 * (1))) & (DDL_PRECISION_HIGH(1 << 7) | DRAIN_LATENCY_MASK0x7f); | |||
| 6380 | } | |||
| 6381 | ||||
| 6382 | tmp = I915_READ(DSPFW1)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70034) }))); | |||
| 6383 | wm->sr.plane = _FW_WM(tmp, SR); | |||
| 6384 | wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); | |||
| 6385 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); | |||
| 6386 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); | |||
| 6387 | ||||
| 6388 | tmp = I915_READ(DSPFW2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x70038) }))); | |||
| 6389 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); | |||
| 6390 | wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); | |||
| 6391 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); | |||
| 6392 | ||||
| 6393 | tmp = I915_READ(DSPFW3)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x7003c) }))); | |||
| 6394 | wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); | |||
| 6395 | ||||
| 6396 | if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 6397 | tmp = I915_READ(DSPFW7_CHV)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b4) }))); | |||
| 6398 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); | |||
| 6399 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); | |||
| 6400 | ||||
| 6401 | tmp = I915_READ(DSPFW8_CHV)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x700b8) }))); | |||
| 6402 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); | |||
| 6403 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); | |||
| 6404 | ||||
| 6405 | tmp = I915_READ(DSPFW9_CHV)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) }))); | |||
| 6406 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); | |||
| 6407 | wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); | |||
| 6408 | ||||
| 6409 | tmp = I915_READ(DSPHOWM)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) }))); | |||
| 6410 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |||
| 6411 | wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; | |||
| 6412 | wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; | |||
| 6413 | wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; | |||
| 6414 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; | |||
| 6415 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |||
| 6416 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; | |||
| 6417 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |||
| 6418 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |||
| 6419 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; | |||
| 6420 | } else { | |||
| 6421 | tmp = I915_READ(DSPFW7)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x7007c) }))); | |||
| 6422 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); | |||
| 6423 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); | |||
| 6424 | ||||
| 6425 | tmp = I915_READ(DSPHOWM)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x70064) }))); | |||
| 6426 | wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; | |||
| 6427 | wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; | |||
| 6428 | wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; | |||
| 6429 | wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; | |||
| 6430 | wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; | |||
| 6431 | wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; | |||
| 6432 | wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; | |||
| 6433 | } | |||
| 6434 | } | |||
| 6435 | ||||
| 6436 | #undef _FW_WM | |||
| 6437 | #undef _FW_WM_VLV | |||
| 6438 | ||||
| 6439 | void g4x_wm_get_hw_state(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6440 | { | |||
| 6441 | struct g4x_wm_values *wm = &dev_priv->wm.g4x; | |||
| 6442 | struct intel_crtc *crtc; | |||
| 6443 | ||||
| 6444 | g4x_read_wm_values(dev_priv, wm); | |||
| 6445 | ||||
| 6446 | wm->cxsr = I915_READ(FW_BLC_SELF)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e0) }))) & FW_BLC_SELF_EN(1 << 15); | |||
| 6447 | ||||
| 6448 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6449 | struct intel_crtc_state *crtc_state = | |||
| 6450 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6451 | struct g4x_wm_state *active = &crtc->wm.active.g4x; | |||
| 6452 | struct g4x_pipe_wm *raw; | |||
| 6453 | enum pipe pipe = crtc->pipe; | |||
| 6454 | enum plane_id plane_id; | |||
| 6455 | int level, max_level; | |||
| 6456 | ||||
| 6457 | active->cxsr = wm->cxsr; | |||
| 6458 | active->hpll_en = wm->hpll_en; | |||
| 6459 | active->fbc_en = wm->fbc_en; | |||
| 6460 | ||||
| 6461 | active->sr = wm->sr; | |||
| 6462 | active->hpll = wm->hpll; | |||
| 6463 | ||||
| 6464 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 6465 | active->wm.plane[plane_id] = | |||
| 6466 | wm->pipe[pipe].plane[plane_id]; | |||
| 6467 | } | |||
| 6468 | ||||
| 6469 | if (wm->cxsr && wm->hpll_en) | |||
| 6470 | max_level = G4X_WM_LEVEL_HPLL; | |||
| 6471 | else if (wm->cxsr) | |||
| 6472 | max_level = G4X_WM_LEVEL_SR; | |||
| 6473 | else | |||
| 6474 | max_level = G4X_WM_LEVEL_NORMAL; | |||
| 6475 | ||||
| 6476 | level = G4X_WM_LEVEL_NORMAL; | |||
| 6477 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 6478 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 6479 | raw->plane[plane_id] = active->wm.plane[plane_id]; | |||
| 6480 | ||||
| 6481 | if (++level > max_level) | |||
| 6482 | goto out; | |||
| 6483 | ||||
| 6484 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 6485 | raw->plane[PLANE_PRIMARY] = active->sr.plane; | |||
| 6486 | raw->plane[PLANE_CURSOR] = active->sr.cursor; | |||
| 6487 | raw->plane[PLANE_SPRITE0] = 0; | |||
| 6488 | raw->fbc = active->sr.fbc; | |||
| 6489 | ||||
| 6490 | if (++level > max_level) | |||
| 6491 | goto out; | |||
| 6492 | ||||
| 6493 | raw = &crtc_state->wm.g4x.raw[level]; | |||
| 6494 | raw->plane[PLANE_PRIMARY] = active->hpll.plane; | |||
| 6495 | raw->plane[PLANE_CURSOR] = active->hpll.cursor; | |||
| 6496 | raw->plane[PLANE_SPRITE0] = 0; | |||
| 6497 | raw->fbc = active->hpll.fbc; | |||
| 6498 | ||||
| 6499 | out: | |||
| 6500 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 6501 | g4x_raw_plane_wm_set(crtc_state, level, | |||
| 6502 | plane_id, USHRT_MAX0xffff); | |||
| 6503 | g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX0xffff); | |||
| 6504 | ||||
| 6505 | crtc_state->wm.g4x.optimal = *active; | |||
| 6506 | crtc_state->wm.g4x.intermediate = *active; | |||
| 6507 | ||||
| 6508 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]) | |||
| 6509 | "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]) | |||
| 6510 | pipe_name(pipe),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]) | |||
| 6511 | wm->pipe[pipe].plane[PLANE_PRIMARY],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]) | |||
| 6512 | wm->pipe[pipe].plane[PLANE_CURSOR],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]) | |||
| 6513 | wm->pipe[pipe].plane[PLANE_SPRITE0])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ]); | |||
| 6514 | } | |||
| 6515 | ||||
| 6516 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n" , wm->sr.plane, wm->sr.cursor, wm->sr.fbc) | |||
| 6517 | "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n" , wm->sr.plane, wm->sr.cursor, wm->sr.fbc) | |||
| 6518 | wm->sr.plane, wm->sr.cursor, wm->sr.fbc)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n" , wm->sr.plane, wm->sr.cursor, wm->sr.fbc); | |||
| 6519 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n" , wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc) | |||
| 6520 | "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n" , wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc) | |||
| 6521 | wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n" , wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); | |||
| 6522 | drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial SR=%s HPLL=%s FBC=%s\n" , yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en )) | |||
| 6523 | yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial SR=%s HPLL=%s FBC=%s\n" , yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en )); | |||
| 6524 | } | |||
| 6525 | ||||
| 6526 | void g4x_wm_sanitize(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6527 | { | |||
| 6528 | struct intel_plane *plane; | |||
| 6529 | struct intel_crtc *crtc; | |||
| 6530 | ||||
| 6531 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 6532 | ||||
| 6533 | for_each_intel_plane(&dev_priv->drm, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) { | |||
| 6534 | struct intel_crtc *crtc = | |||
| 6535 | intel_get_crtc_for_pipe(dev_priv, plane->pipe); | |||
| 6536 | struct intel_crtc_state *crtc_state = | |||
| 6537 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6538 | struct intel_plane_state *plane_state = | |||
| 6539 | to_intel_plane_state(plane->base.state)({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (plane->base.state); (struct intel_plane_state * )( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}); | |||
| 6540 | struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; | |||
| 6541 | enum plane_id plane_id = plane->id; | |||
| 6542 | int level; | |||
| 6543 | ||||
| 6544 | if (plane_state->uapi.visible) | |||
| 6545 | continue; | |||
| 6546 | ||||
| 6547 | for (level = 0; level < 3; level++) { | |||
| 6548 | struct g4x_pipe_wm *raw = | |||
| 6549 | &crtc_state->wm.g4x.raw[level]; | |||
| 6550 | ||||
| 6551 | raw->plane[plane_id] = 0; | |||
| 6552 | wm_state->wm.plane[plane_id] = 0; | |||
| 6553 | } | |||
| 6554 | ||||
| 6555 | if (plane_id == PLANE_PRIMARY) { | |||
| 6556 | for (level = 0; level < 3; level++) { | |||
| 6557 | struct g4x_pipe_wm *raw = | |||
| 6558 | &crtc_state->wm.g4x.raw[level]; | |||
| 6559 | raw->fbc = 0; | |||
| 6560 | } | |||
| 6561 | ||||
| 6562 | wm_state->sr.fbc = 0; | |||
| 6563 | wm_state->hpll.fbc = 0; | |||
| 6564 | wm_state->fbc_en = false0; | |||
| 6565 | } | |||
| 6566 | } | |||
| 6567 | ||||
| 6568 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6569 | struct intel_crtc_state *crtc_state = | |||
| 6570 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6571 | ||||
| 6572 | crtc_state->wm.g4x.intermediate = | |||
| 6573 | crtc_state->wm.g4x.optimal; | |||
| 6574 | crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; | |||
| 6575 | } | |||
| 6576 | ||||
| 6577 | g4x_program_watermarks(dev_priv); | |||
| 6578 | ||||
| 6579 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 6580 | } | |||
| 6581 | ||||
| 6582 | void vlv_wm_get_hw_state(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6583 | { | |||
| 6584 | struct vlv_wm_values *wm = &dev_priv->wm.vlv; | |||
| 6585 | struct intel_crtc *crtc; | |||
| 6586 | u32 val; | |||
| 6587 | ||||
| 6588 | vlv_read_wm_values(dev_priv, wm); | |||
| 6589 | ||||
| 6590 | wm->cxsr = I915_READ(FW_BLC_SELF_VLV)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x6500) }))) & FW_CSPWRDWNEN(1 << 15); | |||
| 6591 | wm->level = VLV_WM_LEVEL_PM2; | |||
| 6592 | ||||
| 6593 | if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 6594 | vlv_punit_get(dev_priv); | |||
| 6595 | ||||
| 6596 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36); | |||
| 6597 | if (val & DSP_MAXFIFO_PM5_ENABLE(1 << 6)) | |||
| 6598 | wm->level = VLV_WM_LEVEL_PM5; | |||
| 6599 | ||||
| 6600 | /* | |||
| 6601 | * If DDR DVFS is disabled in the BIOS, Punit | |||
| 6602 | * will never ack the request. So if that happens | |||
| 6603 | * assume we don't have to enable/disable DDR DVFS | |||
| 6604 | * dynamically. To test that just set the REQ_ACK | |||
| 6605 | * bit to poke the Punit, but don't change the | |||
| 6606 | * HIGH/LOW bits so that we don't actually change | |||
| 6607 | * the current state. | |||
| 6608 | */ | |||
| 6609 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP20x139); | |||
| 6610 | val |= FORCE_DDR_FREQ_REQ_ACK(1 << 8); | |||
| 6611 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP20x139, val); | |||
| 6612 | ||||
| 6613 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((3) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((vlv_punit_read (dev_priv, 0x139) & (1 << 8)) == 0))) { ret__ = 0; break ; } if (expired__) { ret__ = -60; break; } usleep_range(wait__ , wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; }) | |||
| 6614 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((3) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if ((((vlv_punit_read (dev_priv, 0x139) & (1 << 8)) == 0))) { ret__ = 0; break ; } if (expired__) { ret__ = -60; break; } usleep_range(wait__ , wait__ * 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__; })) { | |||
| 6615 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n") | |||
| 6616 | "Punit not acking DDR DVFS request, "drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n") | |||
| 6617 | "assuming DDR DVFS is disabled\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); | |||
| 6618 | dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; | |||
| 6619 | } else { | |||
| 6620 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP20x139); | |||
| 6621 | if ((val & FORCE_DDR_HIGH_FREQ(1 << 0)) == 0) | |||
| 6622 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | |||
| 6623 | } | |||
| 6624 | ||||
| 6625 | vlv_punit_put(dev_priv); | |||
| 6626 | } | |||
| 6627 | ||||
| 6628 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6629 | struct intel_crtc_state *crtc_state = | |||
| 6630 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6631 | struct vlv_wm_state *active = &crtc->wm.active.vlv; | |||
| 6632 | const struct vlv_fifo_state *fifo_state = | |||
| 6633 | &crtc_state->wm.vlv.fifo_state; | |||
| 6634 | enum pipe pipe = crtc->pipe; | |||
| 6635 | enum plane_id plane_id; | |||
| 6636 | int level; | |||
| 6637 | ||||
| 6638 | vlv_get_fifo_size(crtc_state); | |||
| 6639 | ||||
| 6640 | active->num_levels = wm->level + 1; | |||
| 6641 | active->cxsr = wm->cxsr; | |||
| 6642 | ||||
| 6643 | for (level = 0; level < active->num_levels; level++) { | |||
| 6644 | struct g4x_pipe_wm *raw = | |||
| 6645 | &crtc_state->wm.vlv.raw[level]; | |||
| 6646 | ||||
| 6647 | active->sr[level].plane = wm->sr.plane; | |||
| 6648 | active->sr[level].cursor = wm->sr.cursor; | |||
| 6649 | ||||
| 6650 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { | |||
| 6651 | active->wm[level].plane[plane_id] = | |||
| 6652 | wm->pipe[pipe].plane[plane_id]; | |||
| 6653 | ||||
| 6654 | raw->plane[plane_id] = | |||
| 6655 | vlv_invert_wm_value(active->wm[level].plane[plane_id], | |||
| 6656 | fifo_state->plane[plane_id]); | |||
| 6657 | } | |||
| 6658 | } | |||
| 6659 | ||||
| 6660 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else | |||
| 6661 | vlv_raw_plane_wm_set(crtc_state, level, | |||
| 6662 | plane_id, USHRT_MAX0xffff); | |||
| 6663 | vlv_invalidate_wms(crtc, active, level); | |||
| 6664 | ||||
| 6665 | crtc_state->wm.vlv.optimal = *active; | |||
| 6666 | crtc_state->wm.vlv.intermediate = *active; | |||
| 6667 | ||||
| 6668 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6669 | "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6670 | pipe_name(pipe),drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6671 | wm->pipe[pipe].plane[PLANE_PRIMARY],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6672 | wm->pipe[pipe].plane[PLANE_CURSOR],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6673 | wm->pipe[pipe].plane[PLANE_SPRITE0],drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]) | |||
| 6674 | wm->pipe[pipe].plane[PLANE_SPRITE1])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n" , ((pipe) + 'A'), wm->pipe[pipe].plane[PLANE_PRIMARY], wm-> pipe[pipe].plane[PLANE_CURSOR], wm->pipe[pipe].plane[PLANE_SPRITE0 ], wm->pipe[pipe].plane[PLANE_SPRITE1]); | |||
| 6675 | } | |||
| 6676 | ||||
| 6677 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n" , wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr ) | |||
| 6678 | "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n" , wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr ) | |||
| 6679 | wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n" , wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr ); | |||
| 6680 | } | |||
| 6681 | ||||
| 6682 | void vlv_wm_sanitize(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6683 | { | |||
| 6684 | struct intel_plane *plane; | |||
| 6685 | struct intel_crtc *crtc; | |||
| 6686 | ||||
| 6687 | mutex_lock(&dev_priv->wm.wm_mutex)rw_enter_write(&dev_priv->wm.wm_mutex); | |||
| 6688 | ||||
| 6689 | for_each_intel_plane(&dev_priv->drm, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&dev_priv->drm)->mode_config .plane_list); plane = ({ const __typeof( ((__typeof(*plane) * )0)->base.head ) *__mptr = (plane->base.head.next); (__typeof (*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane ), base.head) );})) { | |||
| 6690 | struct intel_crtc *crtc = | |||
| 6691 | intel_get_crtc_for_pipe(dev_priv, plane->pipe); | |||
| 6692 | struct intel_crtc_state *crtc_state = | |||
| 6693 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6694 | struct intel_plane_state *plane_state = | |||
| 6695 | to_intel_plane_state(plane->base.state)({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (plane->base.state); (struct intel_plane_state * )( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}); | |||
| 6696 | struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; | |||
| 6697 | const struct vlv_fifo_state *fifo_state = | |||
| 6698 | &crtc_state->wm.vlv.fifo_state; | |||
| 6699 | enum plane_id plane_id = plane->id; | |||
| 6700 | int level; | |||
| 6701 | ||||
| 6702 | if (plane_state->uapi.visible) | |||
| 6703 | continue; | |||
| 6704 | ||||
| 6705 | for (level = 0; level < wm_state->num_levels; level++) { | |||
| 6706 | struct g4x_pipe_wm *raw = | |||
| 6707 | &crtc_state->wm.vlv.raw[level]; | |||
| 6708 | ||||
| 6709 | raw->plane[plane_id] = 0; | |||
| 6710 | ||||
| 6711 | wm_state->wm[level].plane[plane_id] = | |||
| 6712 | vlv_invert_wm_value(raw->plane[plane_id], | |||
| 6713 | fifo_state->plane[plane_id]); | |||
| 6714 | } | |||
| 6715 | } | |||
| 6716 | ||||
| 6717 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) { | |||
| 6718 | struct intel_crtc_state *crtc_state = | |||
| 6719 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); | |||
| 6720 | ||||
| 6721 | crtc_state->wm.vlv.intermediate = | |||
| 6722 | crtc_state->wm.vlv.optimal; | |||
| 6723 | crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; | |||
| 6724 | } | |||
| 6725 | ||||
| 6726 | vlv_program_watermarks(dev_priv); | |||
| 6727 | ||||
| 6728 | mutex_unlock(&dev_priv->wm.wm_mutex)rw_exit_write(&dev_priv->wm.wm_mutex); | |||
| 6729 | } | |||
| 6730 | ||||
| 6731 | /* | |||
| 6732 | * FIXME should probably kill this and improve | |||
| 6733 | * the real watermark readout/sanitation instead | |||
| 6734 | */ | |||
| 6735 | static void ilk_init_lp_watermarks(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6736 | { | |||
| 6737 | I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45110) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45110) }))) & ~(1 << 31))); | |||
| 6738 | I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4510c) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x4510c) }))) & ~(1 << 31))); | |||
| 6739 | I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45108) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45108) }))) & ~(1 << 31))); | |||
| 6740 | ||||
| 6741 | /* | |||
| 6742 | * Don't touch WM1S_LP_EN here. | |||
| 6743 | * Doing so could cause underruns. | |||
| 6744 | */ | |||
| 6745 | } | |||
| 6746 | ||||
| 6747 | void ilk_wm_get_hw_state(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6748 | { | |||
| 6749 | struct ilk_wm_values *hw = &dev_priv->wm.hw; | |||
| 6750 | struct intel_crtc *crtc; | |||
| 6751 | ||||
| 6752 | ilk_init_lp_watermarks(dev_priv); | |||
| 6753 | ||||
| 6754 | for_each_intel_crtc(&dev_priv->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&dev_priv->drm)->mode_config .crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&dev_priv->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) * __mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) ); })) | |||
| 6755 | ilk_pipe_wm_get_hw_state(crtc); | |||
| 6756 | ||||
| 6757 | hw->wm_lp[0] = I915_READ(WM1_LP_ILK)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45108) }))); | |||
| 6758 | hw->wm_lp[1] = I915_READ(WM2_LP_ILK)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4510c) }))); | |||
| 6759 | hw->wm_lp[2] = I915_READ(WM3_LP_ILK)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45110) }))); | |||
| 6760 | ||||
| 6761 | hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45120) }))); | |||
| 6762 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 7) { | |||
| 6763 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45124) }))); | |||
| 6764 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45128) }))); | |||
| 6765 | } | |||
| 6766 | ||||
| 6767 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) | |||
| 6768 | hw->partitioning = (I915_READ(WM_MISC)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45260) }))) & WM_MISC_DATA_PARTITION_5_6(1 << 0)) ? | |||
| 6769 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |||
| 6770 | else if (IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)) | |||
| 6771 | hw->partitioning = (I915_READ(DISP_ARB_CTL2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45004) }))) & DISP_DATA_PARTITION_5_6(1 << 6)) ? | |||
| 6772 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | |||
| 6773 | ||||
| 6774 | hw->enable_fbc_wm = | |||
| 6775 | !(I915_READ(DISP_ARB_CTL)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) }))) & DISP_FBC_WM_DIS(1 << 15)); | |||
| 6776 | } | |||
| 6777 | ||||
| 6778 | /** | |||
| 6779 | * intel_update_watermarks - update FIFO watermark values based on current modes | |||
| 6780 | * @crtc: the #intel_crtc on which to compute the WM | |||
| 6781 | * | |||
| 6782 | * Calculate watermark values for the various WM regs based on current mode | |||
| 6783 | * and plane configuration. | |||
| 6784 | * | |||
| 6785 | * There are several cases to deal with here: | |||
| 6786 | * - normal (i.e. non-self-refresh) | |||
| 6787 | * - self-refresh (SR) mode | |||
| 6788 | * - lines are large relative to FIFO size (buffer can hold up to 2) | |||
| 6789 | * - lines are small relative to FIFO size (buffer can hold more than 2 | |||
| 6790 | * lines), so need to account for TLB latency | |||
| 6791 | * | |||
| 6792 | * The normal calculation is: | |||
| 6793 | * watermark = dotclock * bytes per pixel * latency | |||
| 6794 | * where latency is platform & configuration dependent (we assume pessimal | |||
| 6795 | * values here). | |||
| 6796 | * | |||
| 6797 | * The SR calculation is: | |||
| 6798 | * watermark = (trunc(latency/line time)+1) * surface width * | |||
| 6799 | * bytes per pixel | |||
| 6800 | * where | |||
| 6801 | * line time = htotal / dotclock | |||
| 6802 | * surface width = hdisplay for normal plane and 64 for cursor | |||
| 6803 | * and latency is assumed to be high, as above. | |||
| 6804 | * | |||
| 6805 | * The final value programmed to the register should always be rounded up, | |||
| 6806 | * and include an extra 2 entries to account for clock crossings. | |||
| 6807 | * | |||
| 6808 | * We don't use the sprite, so we can ignore that. And on Crestline we have | |||
| 6809 | * to set the non-SR watermarks to 8. | |||
| 6810 | */ | |||
| 6811 | void intel_update_watermarks(struct intel_crtc *crtc) | |||
| 6812 | { | |||
| 6813 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); | |||
| 6814 | ||||
| 6815 | if (dev_priv->display.update_wm) | |||
| 6816 | dev_priv->display.update_wm(crtc); | |||
| 6817 | } | |||
| 6818 | ||||
| 6819 | void intel_enable_ipc(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6820 | { | |||
| 6821 | u32 val; | |||
| 6822 | ||||
| 6823 | if (!HAS_IPC(dev_priv)((&(dev_priv)->__info)->display.has_ipc)) | |||
| 6824 | return; | |||
| 6825 | ||||
| 6826 | val = I915_READ(DISP_ARB_CTL2)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45004) }))); | |||
| 6827 | ||||
| 6828 | if (dev_priv->ipc_enabled) | |||
| 6829 | val |= DISP_IPC_ENABLE(1 << 3); | |||
| 6830 | else | |||
| 6831 | val &= ~DISP_IPC_ENABLE(1 << 3); | |||
| 6832 | ||||
| 6833 | I915_WRITE(DISP_ARB_CTL2, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45004) })), (val)); | |||
| 6834 | } | |||
| 6835 | ||||
| 6836 | static bool_Bool intel_can_enable_ipc(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6837 | { | |||
| 6838 | /* Display WA #0477 WaDisableIPC: skl */ | |||
| 6839 | if (IS_SKYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_SKYLAKE)) | |||
| 6840 | return false0; | |||
| 6841 | ||||
| 6842 | /* Display WA #1141: SKL:all KBL:all CFL */ | |||
| 6843 | if (IS_KABYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_KABYLAKE) || | |||
| 6844 | IS_COFFEELAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) || | |||
| 6845 | IS_COMETLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COMETLAKE)) | |||
| 6846 | return dev_priv->dram_info.symmetric_memory; | |||
| 6847 | ||||
| 6848 | return true1; | |||
| 6849 | } | |||
| 6850 | ||||
| 6851 | void intel_init_ipc(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6852 | { | |||
| 6853 | if (!HAS_IPC(dev_priv)((&(dev_priv)->__info)->display.has_ipc)) | |||
| 6854 | return; | |||
| 6855 | ||||
| 6856 | dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv); | |||
| 6857 | ||||
| 6858 | intel_enable_ipc(dev_priv); | |||
| 6859 | } | |||
| 6860 | ||||
| 6861 | static void ibx_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6862 | { | |||
| 6863 | /* | |||
| 6864 | * On Ibex Peak and Cougar Point, we need to disable clock | |||
| 6865 | * gating for the panel power sequencer or it will fail to | |||
| 6866 | * start up when no ports are active. | |||
| 6867 | */ | |||
| 6868 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), ((1 << 29))); | |||
| 6869 | } | |||
| 6870 | ||||
| 6871 | static void g4x_disable_trickle_feed(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6872 | { | |||
| 6873 | enum pipe pipe; | |||
| 6874 | ||||
| 6875 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 6876 | I915_WRITE(DSPCNTR(pipe),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = ((&(dev_priv)->__info)->pipe_offsets[pipe ] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A] + ( 0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) })), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->pipe_offsets [pipe] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A ] + (0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) }))) | (1 << 14))) | |||
| 6877 | I915_READ(DSPCNTR(pipe)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = ((&(dev_priv)->__info)->pipe_offsets[pipe ] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A] + ( 0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) })), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->pipe_offsets [pipe] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A ] + (0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) }))) | (1 << 14))) | |||
| 6878 | DISPPLANE_TRICKLE_FEED_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = ((&(dev_priv)->__info)->pipe_offsets[pipe ] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A] + ( 0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) })), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->pipe_offsets [pipe] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A ] + (0x70180) + ((&(dev_priv)->__info)->display_mmio_offset )) }))) | (1 << 14))); | |||
| 6879 | ||||
| 6880 | I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = ((&(dev_priv)->__info)->pipe_offsets[pipe ] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A] + ( 0x7019C) + ((&(dev_priv)->__info)->display_mmio_offset )) })), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->pipe_offsets [pipe] - (&(dev_priv)->__info)->pipe_offsets[PIPE_A ] + (0x7019C) + ((&(dev_priv)->__info)->display_mmio_offset )) }))))); | |||
| 6881 | POSTING_READ(DSPSURF(pipe))((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> pipe_offsets[pipe] - (&(dev_priv)->__info)->pipe_offsets [PIPE_A] + (0x7019C) + ((&(dev_priv)->__info)->display_mmio_offset )) })))); | |||
| 6882 | } | |||
| 6883 | } | |||
| 6884 | ||||
| 6885 | static void ilk_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6886 | { | |||
| 6887 | u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE(1 << 28); | |||
| 6888 | ||||
| 6889 | /* | |||
| 6890 | * Required for FBC | |||
| 6891 | * WaFbcDisableDpfcClockGating:ilk | |||
| 6892 | */ | |||
| 6893 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE(1 << 8) | | |||
| 6894 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE(1 << 9) | | |||
| 6895 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE(1 << 7); | |||
| 6896 | ||||
| 6897 | I915_WRITE(PCH_3DCGDIS0,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46020) })), ((1 << 18) | (1 << 1))) | |||
| 6898 | MARIUNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46020) })), ((1 << 18) | (1 << 1))) | |||
| 6899 | SVSMUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46020) })), ((1 << 18) | (1 << 1))); | |||
| 6900 | I915_WRITE(PCH_3DCGDIS1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46024) })), ((1 << 11))) | |||
| 6901 | VFMUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46024) })), ((1 << 11))); | |||
| 6902 | ||||
| 6903 | /* | |||
| 6904 | * According to the spec the following bits should be set in | |||
| 6905 | * order to enable memory self-refresh | |||
| 6906 | * The bit 22/21 of 0x42004 | |||
| 6907 | * The bit 5 of 0x42020 | |||
| 6908 | * The bit 15 of 0x45000 | |||
| 6909 | */ | |||
| 6910 | I915_WRITE(ILK_DISPLAY_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21)))) | |||
| 6911 | (I915_READ(ILK_DISPLAY_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21)))) | |||
| 6912 | ILK_DPARB_GATE | ILK_VSDPFD_FULL))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21)))); | |||
| 6913 | dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE(1 << 5); | |||
| 6914 | I915_WRITE(DISP_ARB_CTL,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15)))) | |||
| 6915 | (I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15)))) | |||
| 6916 | DISP_FBC_WM_DIS))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), ((intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15)))); | |||
| 6917 | ||||
| 6918 | /* | |||
| 6919 | * Based on the document from hardware guys the following bits | |||
| 6920 | * should be set unconditionally in order to enable FBC. | |||
| 6921 | * The bit 22 of 0x42000 | |||
| 6922 | * The bit 22 of 0x42004 | |||
| 6923 | * The bit 7,8,9 of 0x42020. | |||
| 6924 | */ | |||
| 6925 | if (IS_IRONLAKE_M(dev_priv)(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && ((&(dev_priv )->__info)->is_mobile))) { | |||
| 6926 | /* WaFbcAsynchFlipDisableFbcQueue:ilk */ | |||
| 6927 | I915_WRITE(ILK_DISPLAY_CHICKEN1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))) | |||
| 6928 | I915_READ(ILK_DISPLAY_CHICKEN1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))) | |||
| 6929 | ILK_FBCQ_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))); | |||
| 6930 | I915_WRITE(ILK_DISPLAY_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22))) | |||
| 6931 | I915_READ(ILK_DISPLAY_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22))) | |||
| 6932 | ILK_DPARB_GATE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22))); | |||
| 6933 | } | |||
| 6934 | ||||
| 6935 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (dspclk_gate)); | |||
| 6936 | ||||
| 6937 | I915_WRITE(ILK_DISPLAY_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))) | |||
| 6938 | I915_READ(ILK_DISPLAY_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))) | |||
| 6939 | ILK_ELPIN_409_SELECT)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))); | |||
| 6940 | ||||
| 6941 | g4x_disable_trickle_feed(dev_priv); | |||
| 6942 | ||||
| 6943 | ibx_init_clock_gating(dev_priv); | |||
| 6944 | } | |||
| 6945 | ||||
| 6946 | static void cpt_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6947 | { | |||
| 6948 | enum pipe pipe; | |||
| 6949 | u32 val; | |||
| 6950 | ||||
| 6951 | /* | |||
| 6952 | * On Ibex Peak and Cougar Point, we need to disable clock | |||
| 6953 | * gating for the panel power sequencer or it will fail to | |||
| 6954 | * start up when no ports are active. | |||
| 6955 | */ | |||
| 6956 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), ((1 << 29) | (1 << 30) | (1 << 14))) | |||
| 6957 | PCH_DPLUNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), ((1 << 29) | (1 << 30) | (1 << 14))) | |||
| 6958 | PCH_CPUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), ((1 << 29) | (1 << 30) | (1 << 14))); | |||
| 6959 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2004) }))) | (1 << 0))) | |||
| 6960 | DPLS_EDP_PPS_FIX_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2004) }))) | (1 << 0))); | |||
| 6961 | /* The below fixes the weird display corruption, a few pixels shifted | |||
| 6962 | * downward, on (only) LVDS of some HP laptops with IVY. | |||
| 6963 | */ | |||
| 6964 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 6965 | val = I915_READ(TRANS_CHICKEN2(pipe))intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0064) + (pipe) * ((0xf1064) - (0xf0064)))) }) )); | |||
| 6966 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE(1 << 31); | |||
| 6967 | val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED(1 << 29); | |||
| 6968 | if (dev_priv->vbt.fdi_rx_polarity_inverted) | |||
| 6969 | val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED(1 << 29); | |||
| 6970 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER(1 << 26); | |||
| 6971 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH(1 << 25); | |||
| 6972 | I915_WRITE(TRANS_CHICKEN2(pipe), val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0064) + (pipe) * ((0xf1064) - (0xf0064)))) }) ), (val)); | |||
| 6973 | } | |||
| 6974 | /* WADP0ClockGatingDisable */ | |||
| 6975 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 6976 | I915_WRITE(TRANS_CHICKEN1(pipe),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (pipe) * ((0xf1060) - (0xf0060)))) }) ), ((1 << 4))) | |||
| 6977 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (pipe) * ((0xf1060) - (0xf0060)))) }) ), ((1 << 4))); | |||
| 6978 | } | |||
| 6979 | } | |||
| 6980 | ||||
| 6981 | static void gen6_check_mch_setup(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6982 | { | |||
| 6983 | u32 tmp; | |||
| 6984 | ||||
| 6985 | tmp = I915_READ(MCH_SSKPD)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x140000 + 0x5d10) }))); | |||
| 6986 | if ((tmp & MCH_SSKPD_WM0_MASK0x3f) != MCH_SSKPD_WM0_VAL0xc) | |||
| 6987 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n" , tmp) | |||
| 6988 | "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n" , tmp) | |||
| 6989 | tmp)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n" , tmp); | |||
| 6990 | } | |||
| 6991 | ||||
| 6992 | static void gen6_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 6993 | { | |||
| 6994 | u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE(1 << 28); | |||
| 6995 | ||||
| 6996 | I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (dspclk_gate)); | |||
| 6997 | ||||
| 6998 | I915_WRITE(ILK_DISPLAY_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))) | |||
| 6999 | I915_READ(ILK_DISPLAY_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))) | |||
| 7000 | ILK_ELPIN_409_SELECT)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 25))); | |||
| 7001 | ||||
| 7002 | I915_WRITE(GEN6_UCGCTL1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 5) | (1 << 7))) | |||
| 7003 | I915_READ(GEN6_UCGCTL1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 5) | (1 << 7))) | |||
| 7004 | GEN6_BLBUNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 5) | (1 << 7))) | |||
| 7005 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 5) | (1 << 7))); | |||
| 7006 | ||||
| 7007 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock | |||
| 7008 | * gating disable must be set. Failure to set it results in | |||
| 7009 | * flickering pixels due to Z write ordering failures after | |||
| 7010 | * some amount of runtime in the Mesa "fire" demo, and Unigine | |||
| 7011 | * Sanctuary and Tropics, and apparently anything else with | |||
| 7012 | * alpha test or pixel discard. | |||
| 7013 | * | |||
| 7014 | * According to the spec, bit 11 (RCCUNIT) must also be set, | |||
| 7015 | * but we didn't debug actual testcases to find it out. | |||
| 7016 | * | |||
| 7017 | * WaDisableRCCUnitClockGating:snb | |||
| 7018 | * WaDisableRCPBUnitClockGating:snb | |||
| 7019 | */ | |||
| 7020 | I915_WRITE(GEN6_UCGCTL2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 12) | (1 << 11))) | |||
| 7021 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 12) | (1 << 11))) | |||
| 7022 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 12) | (1 << 11))); | |||
| 7023 | ||||
| 7024 | /* | |||
| 7025 | * According to the spec the following bits should be | |||
| 7026 | * set in order to enable memory self-refresh and fbc: | |||
| 7027 | * The bit21 and bit22 of 0x42000 | |||
| 7028 | * The bit21 and bit22 of 0x42004 | |||
| 7029 | * The bit5 and bit7 of 0x42020 | |||
| 7030 | * The bit14 of 0x70180 | |||
| 7031 | * The bit14 of 0x71180 | |||
| 7032 | * | |||
| 7033 | * WaFbcAsynchFlipDisableFbcQueue:snb | |||
| 7034 | */ | |||
| 7035 | I915_WRITE(ILK_DISPLAY_CHICKEN1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22) | (1 << 21))) | |||
| 7036 | I915_READ(ILK_DISPLAY_CHICKEN1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22) | (1 << 21))) | |||
| 7037 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22) | (1 << 21))); | |||
| 7038 | I915_WRITE(ILK_DISPLAY_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21))) | |||
| 7039 | I915_READ(ILK_DISPLAY_CHICKEN2) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21))) | |||
| 7040 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42004) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42004) }))) | (1 << 22) | (1 << 21))); | |||
| 7041 | I915_WRITE(ILK_DSPCLK_GATE_D,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42020) }))) | (1 << 5) | (1 << 7))) | |||
| 7042 | I915_READ(ILK_DSPCLK_GATE_D) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42020) }))) | (1 << 5) | (1 << 7))) | |||
| 7043 | ILK_DPARBUNIT_CLOCK_GATE_ENABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42020) }))) | (1 << 5) | (1 << 7))) | |||
| 7044 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42020) }))) | (1 << 5) | (1 << 7))); | |||
| 7045 | ||||
| 7046 | g4x_disable_trickle_feed(dev_priv); | |||
| 7047 | ||||
| 7048 | cpt_init_clock_gating(dev_priv); | |||
| 7049 | ||||
| 7050 | gen6_check_mch_setup(dev_priv); | |||
| 7051 | } | |||
| 7052 | ||||
| 7053 | static void lpt_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7054 | { | |||
| 7055 | /* | |||
| 7056 | * TODO: this bit should only be enabled when really needed, then | |||
| 7057 | * disabled when not needed anymore in order to save power. | |||
| 7058 | */ | |||
| 7059 | if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id) == 0x9c80)) | |||
| 7060 | I915_WRITE(SOUTH_DSPCLK_GATE_D,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2020) }))) | (1 << 12))) | |||
| 7061 | I915_READ(SOUTH_DSPCLK_GATE_D) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2020) }))) | (1 << 12))) | |||
| 7062 | PCH_LP_PARTITION_LEVEL_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2020) }))) | (1 << 12))); | |||
| 7063 | ||||
| 7064 | /* WADPOClockGatingDisable:hsw */ | |||
| 7065 | I915_WRITE(TRANS_CHICKEN1(PIPE_A),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } ))) | (1 << 4))) | |||
| 7066 | I915_READ(TRANS_CHICKEN1(PIPE_A)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } ))) | (1 << 4))) | |||
| 7067 | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0xf0060) + (PIPE_A) * ((0xf1060) - (0xf0060)))) } ))) | (1 << 4))); | |||
| 7068 | } | |||
| 7069 | ||||
| 7070 | static void lpt_suspend_hw(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7071 | { | |||
| 7072 | if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id) == 0x9c80)) { | |||
| 7073 | u32 val = I915_READ(SOUTH_DSPCLK_GATE_D)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) }))); | |||
| 7074 | ||||
| 7075 | val &= ~PCH_LP_PARTITION_LEVEL_DISABLE(1 << 12); | |||
| 7076 | I915_WRITE(SOUTH_DSPCLK_GATE_D, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (val)); | |||
| 7077 | } | |||
| 7078 | } | |||
| 7079 | ||||
| 7080 | static void gen8_set_l3sqc_credits(struct drm_i915_privateinteldrm_softc *dev_priv, | |||
| 7081 | int general_prio_credits, | |||
| 7082 | int high_prio_credits) | |||
| 7083 | { | |||
| 7084 | u32 misccpctl; | |||
| 7085 | u32 val; | |||
| 7086 | ||||
| 7087 | /* WaTempDisableDOPClkGating:bdw */ | |||
| 7088 | misccpctl = I915_READ(GEN7_MISCCPCTL)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9424) }))); | |||
| 7089 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9424) })), (misccpctl & ~(1 << 0))); | |||
| 7090 | ||||
| 7091 | val = I915_READ(GEN8_L3SQCREG1)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xB100) }))); | |||
| 7092 | val &= ~L3_PRIO_CREDITS_MASK((0x1f << 19) | (0x1f << 14)); | |||
| 7093 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits)(((general_prio_credits) >> 1) << 19); | |||
| 7094 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits)(((high_prio_credits) >> 1) << 14); | |||
| 7095 | I915_WRITE(GEN8_L3SQCREG1, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xB100) })), (val)); | |||
| 7096 | ||||
| 7097 | /* | |||
| 7098 | * Wait at least 100 clocks before re-enabling clock gating. | |||
| 7099 | * See the definition of L3SQCREG1 in BSpec. | |||
| 7100 | */ | |||
| 7101 | POSTING_READ(GEN8_L3SQCREG1)((void)intel_uncore_read_notrace(&(dev_priv)->uncore, ( ((const i915_reg_t){ .reg = (0xB100) })))); | |||
| 7102 | udelay(1); | |||
| 7103 | I915_WRITE(GEN7_MISCCPCTL, misccpctl)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9424) })), (misccpctl)); | |||
| 7104 | } | |||
| 7105 | ||||
| 7106 | static void icl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7107 | { | |||
| 7108 | /* Wa_1409120013:icl,ehl */ | |||
| 7109 | I915_WRITE(ILK_DPFC_CHICKEN,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), ((1 << 14))) | |||
| 7110 | ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), ((1 << 14))); | |||
| 7111 | ||||
| 7112 | /* This is not an Wa. Enable to reduce Sampler power */ | |||
| 7113 | I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9550) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9550) }))) & ~(1 << 9))) | |||
| 7114 | I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9550) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9550) }))) & ~(1 << 9))); | |||
| 7115 | ||||
| 7116 | /*Wa_14010594013:icl, ehl */ | |||
| 7117 | intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) }), | |||
| 7118 | 0, CNL_DELAY_PMRSP(1 << 22)); | |||
| 7119 | } | |||
| 7120 | ||||
| 7121 | static void tgl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7122 | { | |||
| 7123 | /* Wa_1409120013:tgl */ | |||
| 7124 | I915_WRITE(ILK_DPFC_CHICKEN,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), ((1 << 14))) | |||
| 7125 | ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), ((1 << 14))); | |||
| 7126 | ||||
| 7127 | /* Wa_1409825376:tgl (pre-prod)*/ | |||
| 7128 | if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && tgl_revids_get (dev_priv)->disp_stepping >= (TGL_REVID_A0) && tgl_revids_get (dev_priv)->disp_stepping <= (TGL_REVID_B1))) | |||
| 7129 | I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46538) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46538) }))) | ((u32)( (1UL << (31)) + 0)))) | |||
| 7130 | TGL_VRH_GATING_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46538) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46538) }))) | ((u32)( (1UL << (31)) + 0)))); | |||
| 7131 | ||||
| 7132 | /* Wa_14011059788:tgl */ | |||
| 7133 | intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN((const i915_reg_t){ .reg = (0x9550) }), | |||
| 7134 | 0, DFR_DISABLE(1 << 9)); | |||
| 7135 | } | |||
| 7136 | ||||
| 7137 | static void cnp_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7138 | { | |||
| 7139 | if (!HAS_PCH_CNP(dev_priv)(((dev_priv)->pch_type) == PCH_CNP)) | |||
| 7140 | return; | |||
| 7141 | ||||
| 7142 | /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ | |||
| 7143 | I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2020) }))) | (1 << 13))) | |||
| 7144 | CNP_PWM_CGE_GATING_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xc2020) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0xc2020) }))) | (1 << 13))); | |||
| 7145 | } | |||
| 7146 | ||||
| 7147 | static void cnl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7148 | { | |||
| 7149 | u32 val; | |||
| 7150 | cnp_init_clock_gating(dev_priv); | |||
| 7151 | ||||
| 7152 | /* This is not an Wa. Enable for better image quality */ | |||
| 7153 | I915_WRITE(_3D_CHICKEN3,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2090) })), (({ typeof((1 << 5)) _a = ((1 << 5)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7154 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2090) })), (({ typeof((1 << 5)) _a = ((1 << 5)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7155 | ||||
| 7156 | /* WaEnableChickenDCPR:cnl */ | |||
| 7157 | I915_WRITE(GEN8_CHICKEN_DCPR_1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46430) }))) | (1 << 13))) | |||
| 7158 | I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x46430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x46430) }))) | (1 << 13))); | |||
| 7159 | ||||
| 7160 | /* | |||
| 7161 | * WaFbcWakeMemOn:cnl | |||
| 7162 | * Display WA #0859: cnl | |||
| 7163 | */ | |||
| 7164 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 31))) | |||
| 7165 | DISP_FBC_MEMORY_WAKE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 31))); | |||
| 7166 | ||||
| 7167 | val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x94d4) }))); | |||
| 7168 | /* ReadHitWriteOnlyDisable:cnl */ | |||
| 7169 | val |= RCCUNIT_CLKGATE_DIS(1 << 7); | |||
| 7170 | I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x94d4) })), (val)); | |||
| 7171 | ||||
| 7172 | /* Wa_2201832410:cnl */ | |||
| 7173 | val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9524) }))); | |||
| 7174 | val |= GWUNIT_CLKGATE_DIS(1 << 16); | |||
| 7175 | I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9524) })), (val)); | |||
| 7176 | ||||
| 7177 | /* WaDisableVFclkgate:cnl */ | |||
| 7178 | /* WaVFUnitClockGatingDisable:cnl */ | |||
| 7179 | val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9434) }))); | |||
| 7180 | val |= VFUNIT_CLKGATE_DIS((u32)((1UL << (20)) + 0)); | |||
| 7181 | I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9434) })), (val)); | |||
| 7182 | } | |||
| 7183 | ||||
| 7184 | static void cfl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7185 | { | |||
| 7186 | cnp_init_clock_gating(dev_priv); | |||
| 7187 | gen9_init_clock_gating(dev_priv); | |||
| 7188 | ||||
| 7189 | /* | |||
| 7190 | * WaFbcTurnOffFbcWatermark:cfl | |||
| 7191 | * Display WA #0562: cfl | |||
| 7192 | */ | |||
| 7193 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))) | |||
| 7194 | DISP_FBC_WM_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))); | |||
| 7195 | ||||
| 7196 | /* | |||
| 7197 | * WaFbcNukeOnHostModify:cfl | |||
| 7198 | * Display WA #0873: cfl | |||
| 7199 | */ | |||
| 7200 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))) | |||
| 7201 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))); | |||
| 7202 | } | |||
| 7203 | ||||
| 7204 | static void kbl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7205 | { | |||
| 7206 | gen9_init_clock_gating(dev_priv); | |||
| 7207 | ||||
| 7208 | /* WaDisableSDEUnitClockGating:kbl */ | |||
| 7209 | if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)(IS_PLATFORM(dev_priv, INTEL_KABYLAKE) && kbl_revids[ ((dev_priv)->drm.pdev->revision)].gt_stepping >= 0 && kbl_revids[((dev_priv)->drm.pdev->revision)].gt_stepping <= KBL_REVID_B0)) | |||
| 7210 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))) | |||
| 7211 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))); | |||
| 7212 | ||||
| 7213 | /* WaDisableGamClockGating:kbl */ | |||
| 7214 | if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)(IS_PLATFORM(dev_priv, INTEL_KABYLAKE) && kbl_revids[ ((dev_priv)->drm.pdev->revision)].gt_stepping >= 0 && kbl_revids[((dev_priv)->drm.pdev->revision)].gt_stepping <= KBL_REVID_B0)) | |||
| 7215 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 22))) | |||
| 7216 | GEN6_GAMUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 22))); | |||
| 7217 | ||||
| 7218 | /* | |||
| 7219 | * WaFbcTurnOffFbcWatermark:kbl | |||
| 7220 | * Display WA #0562: kbl | |||
| 7221 | */ | |||
| 7222 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))) | |||
| 7223 | DISP_FBC_WM_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))); | |||
| 7224 | ||||
| 7225 | /* | |||
| 7226 | * WaFbcNukeOnHostModify:kbl | |||
| 7227 | * Display WA #0873: kbl | |||
| 7228 | */ | |||
| 7229 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))) | |||
| 7230 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))); | |||
| 7231 | } | |||
| 7232 | ||||
| 7233 | static void skl_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7234 | { | |||
| 7235 | gen9_init_clock_gating(dev_priv); | |||
| 7236 | ||||
| 7237 | /* WaDisableDopClockGating:skl */ | |||
| 7238 | I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9424) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9424) }))) & ~(1 << 0))) | |||
| 7239 | ~GEN7_DOP_CLOCK_GATE_ENABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9424) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9424) }))) & ~(1 << 0))); | |||
| 7240 | ||||
| 7241 | /* WAC6entrylatency:skl */ | |||
| 7242 | I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9044) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9044) }))) | (1 << 30))) | |||
| 7243 | FBC_LLC_FULLY_OPEN)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9044) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9044) }))) | (1 << 30))); | |||
| 7244 | ||||
| 7245 | /* | |||
| 7246 | * WaFbcTurnOffFbcWatermark:skl | |||
| 7247 | * Display WA #0562: skl | |||
| 7248 | */ | |||
| 7249 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))) | |||
| 7250 | DISP_FBC_WM_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x45000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x45000) }))) | (1 << 15))); | |||
| 7251 | ||||
| 7252 | /* | |||
| 7253 | * WaFbcNukeOnHostModify:skl | |||
| 7254 | * Display WA #0873: skl | |||
| 7255 | */ | |||
| 7256 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))) | |||
| 7257 | ILK_DPFC_NUKE_ON_ANY_MODIFICATION)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 23))); | |||
| 7258 | ||||
| 7259 | /* | |||
| 7260 | * WaFbcHighMemBwCorruptionAvoidance:skl | |||
| 7261 | * Display WA #0883: skl | |||
| 7262 | */ | |||
| 7263 | I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 8))) | |||
| 7264 | ILK_DPFC_DISABLE_DUMMY0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x43224) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x43224) }))) | (1 << 8))); | |||
| 7265 | } | |||
| 7266 | ||||
| 7267 | static void bdw_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7268 | { | |||
| 7269 | enum pipe pipe; | |||
| 7270 | ||||
| 7271 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ | |||
| 7272 | I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))) | |||
| 7273 | I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))) | |||
| 7274 | HSW_FBCQ_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))); | |||
| 7275 | ||||
| 7276 | /* WaSwitchSolVfFArbitrationPriority:bdw */ | |||
| 7277 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4090) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x4090) }))) | (1 << 6))); | |||
| 7278 | ||||
| 7279 | /* WaPsrDPAMaskVBlankInSRD:bdw */ | |||
| 7280 | I915_WRITE(CHICKEN_PAR1_1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 15))) | |||
| 7281 | I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42080) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42080) }))) | (1 << 15))); | |||
| 7282 | ||||
| 7283 | /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ | |||
| 7284 | for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(dev_priv)->__info)->pipe_mask & (1UL << (pipe)))) {} else { | |||
| 7285 | I915_WRITE(CHICKEN_PIPESL_1(pipe),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) ), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) )) | (1 << 0))) | |||
| 7286 | I915_READ(CHICKEN_PIPESL_1(pipe)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) ), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) )) | (1 << 0))) | |||
| 7287 | BDW_DPRS_MASK_VBLANK_SRD)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) ), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (pipe) * ((0x420b4) - (0x420b0)))) }) )) | (1 << 0))); | |||
| 7288 | } | |||
| 7289 | ||||
| 7290 | /* WaVSRefCountFullforceMissDisable:bdw */ | |||
| 7291 | /* WaDSRefCountFullforceMissDisable:bdw */ | |||
| 7292 | I915_WRITE(GEN7_FF_THREAD_MODE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))) | |||
| 7293 | I915_READ(GEN7_FF_THREAD_MODE) &intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))) | |||
| 7294 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))); | |||
| 7295 | ||||
| 7296 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2050) })), (({ typeof((1 << 12)) _a = ((1 << 12)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7297 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2050) })), (({ typeof((1 << 12)) _a = ((1 << 12)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7298 | ||||
| 7299 | /* WaDisableSDEUnitClockGating:bdw */ | |||
| 7300 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))) | |||
| 7301 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))); | |||
| 7302 | ||||
| 7303 | /* WaProgramL3SqcReg1Default:bdw */ | |||
| 7304 | gen8_set_l3sqc_credits(dev_priv, 30, 2); | |||
| 7305 | ||||
| 7306 | /* WaKVMNotificationOnConfigChange:bdw */ | |||
| 7307 | I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42090) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42090) }))) | (1 << 14))) | |||
| 7308 | | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42090) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42090) }))) | (1 << 14))); | |||
| 7309 | ||||
| 7310 | lpt_init_clock_gating(dev_priv); | |||
| 7311 | ||||
| 7312 | /* WaDisableDopClockGating:bdw | |||
| 7313 | * | |||
| 7314 | * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP | |||
| 7315 | * clock gating. | |||
| 7316 | */ | |||
| 7317 | I915_WRITE(GEN6_UCGCTL1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 16))) | |||
| 7318 | I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 16))); | |||
| 7319 | } | |||
| 7320 | ||||
| 7321 | static void hsw_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7322 | { | |||
| 7323 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ | |||
| 7324 | I915_WRITE(CHICKEN_PIPESL_1(PIPE_A),intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))) | |||
| 7325 | I915_READ(CHICKEN_PIPESL_1(PIPE_A)) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))) | |||
| 7326 | HSW_FBCQ_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } )), (intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((0x420b0) + (PIPE_A) * ((0x420b4) - (0x420b0)))) } ))) | (1 << 22))); | |||
| 7327 | ||||
| 7328 | /* This is required by WaCatErrorRejectionIssue:hsw */ | |||
| 7329 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7330 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7331 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))); | |||
| 7332 | ||||
| 7333 | /* WaSwitchSolVfFArbitrationPriority:hsw */ | |||
| 7334 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4090) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x4090) }))) | (1 << 6))); | |||
| 7335 | ||||
| 7336 | lpt_init_clock_gating(dev_priv); | |||
| 7337 | } | |||
| 7338 | ||||
| 7339 | static void ivb_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7340 | { | |||
| 7341 | u32 snpcr; | |||
| 7342 | ||||
| 7343 | I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42020) })), ((1 << 28))); | |||
| 7344 | ||||
| 7345 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ | |||
| 7346 | I915_WRITE(ILK_DISPLAY_CHICKEN1,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))) | |||
| 7347 | I915_READ(ILK_DISPLAY_CHICKEN1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))) | |||
| 7348 | ILK_FBCQ_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x42000) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x42000) }))) | (1 << 22))); | |||
| 7349 | ||||
| 7350 | /* WaDisableBackToBackFlipFix:ivb */ | |||
| 7351 | I915_WRITE(IVB_CHICKEN3,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))) | |||
| 7352 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))) | |||
| 7353 | CHICKEN3_DGMG_DONE_FIX_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))); | |||
| 7354 | ||||
| 7355 | if (IS_IVB_GT1(dev_priv)(IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) && (&(dev_priv )->__info)->gt == 1)) | |||
| 7356 | I915_WRITE(GEN7_ROW_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7357 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7358 | else { | |||
| 7359 | /* must write both registers */ | |||
| 7360 | I915_WRITE(GEN7_ROW_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7361 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7362 | I915_WRITE(GEN7_ROW_CHICKEN2_GT2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xf4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7363 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xf4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7364 | } | |||
| 7365 | ||||
| 7366 | /* | |||
| 7367 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | |||
| 7368 | * This implements the WaDisableRCZUnitClockGating:ivb workaround. | |||
| 7369 | */ | |||
| 7370 | I915_WRITE(GEN6_UCGCTL2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 13))) | |||
| 7371 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 13))); | |||
| 7372 | ||||
| 7373 | /* This is required by WaCatErrorRejectionIssue:ivb */ | |||
| 7374 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7375 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7376 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))); | |||
| 7377 | ||||
| 7378 | g4x_disable_trickle_feed(dev_priv); | |||
| 7379 | ||||
| 7380 | snpcr = I915_READ(GEN6_MBCUNIT_SNPCR)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x900c) }))); | |||
| 7381 | snpcr &= ~GEN6_MBC_SNPCR_MASK(3 << 21); | |||
| 7382 | snpcr |= GEN6_MBC_SNPCR_MED(1 << 21); | |||
| 7383 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x900c) })), (snpcr)); | |||
| 7384 | ||||
| 7385 | if (!HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP)) | |||
| 7386 | cpt_init_clock_gating(dev_priv); | |||
| 7387 | ||||
| 7388 | gen6_check_mch_setup(dev_priv); | |||
| 7389 | } | |||
| 7390 | ||||
| 7391 | static void vlv_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7392 | { | |||
| 7393 | /* WaDisableBackToBackFlipFix:vlv */ | |||
| 7394 | I915_WRITE(IVB_CHICKEN3,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))) | |||
| 7395 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))) | |||
| 7396 | CHICKEN3_DGMG_DONE_FIX_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x4200c) })), ((1 << 5) | (1 << 2))); | |||
| 7397 | ||||
| 7398 | /* WaDisableDopClockGating:vlv */ | |||
| 7399 | I915_WRITE(GEN7_ROW_CHICKEN2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7400 | _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0xe4f4) })), (({ typeof((1 << 0)) _a = ((1 << 0)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7401 | ||||
| 7402 | /* This is required by WaCatErrorRejectionIssue:vlv */ | |||
| 7403 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7404 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))) | |||
| 7405 | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9030) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9030) }))) | (1 << 11))); | |||
| 7406 | ||||
| 7407 | /* | |||
| 7408 | * According to the spec, bit 13 (RCZUNIT) must be set on IVB. | |||
| 7409 | * This implements the WaDisableRCZUnitClockGating:vlv workaround. | |||
| 7410 | */ | |||
| 7411 | I915_WRITE(GEN6_UCGCTL2,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 13))) | |||
| 7412 | GEN6_RCZUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9404) })), ((1 << 13))); | |||
| 7413 | ||||
| 7414 | /* WaDisableL3Bank2xClockGate:vlv | |||
| 7415 | * Disabling L3 clock gating- MMIO 940c[25] = 1 | |||
| 7416 | * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ | |||
| 7417 | I915_WRITE(GEN7_UCGCTL4,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x940c) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x940c) }))) | (1 << 25))) | |||
| 7418 | I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x940c) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x940c) }))) | (1 << 25))); | |||
| 7419 | ||||
| 7420 | /* | |||
| 7421 | * WaDisableVLVClockGating_VBIIssue:vlv | |||
| 7422 | * Disable clock gating on th GCFG unit to prevent a delay | |||
| 7423 | * in the reporting of vblank events. | |||
| 7424 | */ | |||
| 7425 | I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x180000 + 0x2060) })), ((1 << 8))); | |||
| 7426 | } | |||
| 7427 | ||||
| 7428 | static void chv_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7429 | { | |||
| 7430 | /* WaVSRefCountFullforceMissDisable:chv */ | |||
| 7431 | /* WaDSRefCountFullforceMissDisable:chv */ | |||
| 7432 | I915_WRITE(GEN7_FF_THREAD_MODE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))) | |||
| 7433 | I915_READ(GEN7_FF_THREAD_MODE) &intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))) | |||
| 7434 | ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20a0) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x20a0) }))) & ~((1 << 19) | (1 << 15)))); | |||
| 7435 | ||||
| 7436 | /* WaDisableSemaphoreAndSyncFlipWait:chv */ | |||
| 7437 | I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2050) })), (({ typeof((1 << 12)) _a = ((1 << 12)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7438 | _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x2050) })), (({ typeof((1 << 12)) _a = ((1 << 12)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7439 | ||||
| 7440 | /* WaDisableCSUnitClockGating:chv */ | |||
| 7441 | I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 7))) | |||
| 7442 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9400) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9400) }))) | (1 << 7))); | |||
| 7443 | ||||
| 7444 | /* WaDisableSDEUnitClockGating:chv */ | |||
| 7445 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))) | |||
| 7446 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x9430) })), (intel_uncore_read(&(dev_priv)-> uncore, (((const i915_reg_t){ .reg = (0x9430) }))) | (1 << 14))); | |||
| 7447 | ||||
| 7448 | /* | |||
| 7449 | * WaProgramL3SqcReg1Default:chv | |||
| 7450 | * See gfxspecs/Related Documents/Performance Guide/ | |||
| 7451 | * LSQC Setting Recommendations. | |||
| 7452 | */ | |||
| 7453 | gen8_set_l3sqc_credits(dev_priv, 38, 2); | |||
| 7454 | } | |||
| 7455 | ||||
| 7456 | static void g4x_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7457 | { | |||
| 7458 | u32 dspclk_gate; | |||
| 7459 | ||||
| 7460 | I915_WRITE(RENCLK_GATE_D1, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), (0)); | |||
| 7461 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6208) })), ((1 << 9) | (1 << 7) | (1 << 6))) | |||
| 7462 | GS_UNIT_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6208) })), ((1 << 9) | (1 << 7) | (1 << 6))) | |||
| 7463 | CL_UNIT_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6208) })), ((1 << 9) | (1 << 7) | (1 << 6))); | |||
| 7464 | I915_WRITE(RAMCLK_GATE_D, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6210) })), (0)); | |||
| 7465 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE(1 << 28) | | |||
| 7466 | OVRUNIT_CLOCK_GATE_DISABLE(1 << 3) | | |||
| 7467 | OVCUNIT_CLOCK_GATE_DISABLE(1 << 2); | |||
| 7468 | if (IS_GM45(dev_priv)IS_PLATFORM(dev_priv, INTEL_GM45)) | |||
| 7469 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE(1 << 18); | |||
| 7470 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->display_mmio_offset ) + 0x6200) })), (dspclk_gate)); | |||
| 7471 | ||||
| 7472 | g4x_disable_trickle_feed(dev_priv); | |||
| 7473 | } | |||
| 7474 | ||||
| 7475 | static void i965gm_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7476 | { | |||
| 7477 | struct intel_uncore *uncore = &dev_priv->uncore; | |||
| 7478 | ||||
| 7479 | intel_uncore_write(uncore, RENCLK_GATE_D1((const i915_reg_t){ .reg = (0x6204) }), I965_RCC_CLOCK_GATE_DISABLE(1 << 29)); | |||
| 7480 | intel_uncore_write(uncore, RENCLK_GATE_D2((const i915_reg_t){ .reg = (0x6208) }), 0); | |||
| 7481 | intel_uncore_write(uncore, DSPCLK_GATE_D((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> display_mmio_offset) + 0x6200) }), 0); | |||
| 7482 | intel_uncore_write(uncore, RAMCLK_GATE_D((const i915_reg_t){ .reg = (0x6210) }), 0); | |||
| 7483 | intel_uncore_write16(uncore, DEUC((const i915_reg_t){ .reg = (0x6214) }), 0); | |||
| 7484 | intel_uncore_write(uncore, | |||
| 7485 | MI_ARB_STATE((const i915_reg_t){ .reg = (0x20e4) }), | |||
| 7486 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p (_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a) && __builtin_constant_p (_a)) do { } while (0); ((_a) << 16 | (_a)); }); })); | |||
| 7487 | } | |||
| 7488 | ||||
| 7489 | static void i965g_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7490 | { | |||
| 7491 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 30) | (1 << 29) | ( 1 << 28) | (1 << 23) | (1 << 16))) | |||
| 7492 | I965_RCC_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 30) | (1 << 29) | ( 1 << 28) | (1 << 23) | (1 << 16))) | |||
| 7493 | I965_RCPB_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 30) | (1 << 29) | ( 1 << 28) | (1 << 23) | (1 << 16))) | |||
| 7494 | I965_ISC_CLOCK_GATE_DISABLE |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 30) | (1 << 29) | ( 1 << 28) | (1 << 23) | (1 << 16))) | |||
| 7495 | I965_FBC_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 30) | (1 << 29) | ( 1 << 28) | (1 << 23) | (1 << 16))); | |||
| 7496 | I915_WRITE(RENCLK_GATE_D2, 0)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6208) })), (0)); | |||
| 7497 | I915_WRITE(MI_ARB_STATE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7498 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7499 | } | |||
| 7500 | ||||
| 7501 | static void gen3_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7502 | { | |||
| 7503 | u32 dstate = I915_READ(D_STATE)intel_uncore_read(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6104) }))); | |||
| 7504 | ||||
| 7505 | dstate |= DSTATE_PLL_D3_OFF(1 << 3) | DSTATE_GFX_CLOCK_GATING(1 << 1) | | |||
| 7506 | DSTATE_DOT_CLOCK_GATING(1 << 0); | |||
| 7507 | I915_WRITE(D_STATE, dstate)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6104) })), (dstate)); | |||
| 7508 | ||||
| 7509 | if (IS_PINEVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_PINEVIEW)) | |||
| 7510 | I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x21d0) })), (({ typeof((1 << 3)) _a = ((1 << 3)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7511 | ||||
| 7512 | /* IIR "flip pending" means done if this bit is set */ | |||
| 7513 | I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x21d0) })), ((({ if (__builtin_constant_p(((1 << 0)))) do { } while (0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p(((1 << 0))) && __builtin_constant_p (0)) do { } while (0); ((((1 << 0))) << 16 | (0)) ; })))); | |||
| 7514 | ||||
| 7515 | /* interrupts should cause a wake up from C3 */ | |||
| 7516 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20c0) })), (({ typeof((1 << 11)) _a = ((1 << 11)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7517 | ||||
| 7518 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | |||
| 7519 | I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 11)) _a = ((1 << 11)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7520 | ||||
| 7521 | I915_WRITE(MI_ARB_STATE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7522 | _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7523 | } | |||
| 7524 | ||||
| 7525 | static void i85x_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7526 | { | |||
| 7527 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE)intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x6204) })), ((1 << 0))); | |||
| 7528 | ||||
| 7529 | /* interrupts should cause a wake up from C3 */ | |||
| 7530 | I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 1)) _a = ((1 << 1)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | (({ if (__builtin_constant_p (((1 << 0)))) do { } while (0); if (__builtin_constant_p (0)) do { } while (0); if (__builtin_constant_p(((1 << 0 ))) && __builtin_constant_p(0)) do { } while (0); ((( (1 << 0))) << 16 | (0)); })))) | |||
| 7531 | _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20e4) })), (({ typeof((1 << 1)) _a = ((1 << 1)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | (({ if (__builtin_constant_p (((1 << 0)))) do { } while (0); if (__builtin_constant_p (0)) do { } while (0); if (__builtin_constant_p(((1 << 0 ))) && __builtin_constant_p(0)) do { } while (0); ((( (1 << 0))) << 16 | (0)); })))); | |||
| 7532 | ||||
| 7533 | I915_WRITE(MEM_MODE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20cc) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7534 | _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20cc) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7535 | ||||
| 7536 | /* | |||
| 7537 | * Have FBC ignore 3D activity since we use software | |||
| 7538 | * render tracking, and otherwise a pure 3D workload | |||
| 7539 | * (even if it just renders a single frame and then does | |||
| 7540 | * abosultely nothing) would not allow FBC to recompress | |||
| 7541 | * until a 2D blit occurs. | |||
| 7542 | */ | |||
| 7543 | I915_WRITE(SCPD0,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x209c) })), (({ typeof((1 << 6)) _a = ((1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7544 | _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x209c) })), (({ typeof((1 << 6)) _a = ((1 << 6)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7545 | } | |||
| 7546 | ||||
| 7547 | static void i830_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7548 | { | |||
| 7549 | I915_WRITE(MEM_MODE,intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20cc) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | ({ typeof((1 << 3)) _a = ((1 << 3)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7550 | _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20cc) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | ({ typeof((1 << 3)) _a = ((1 << 3)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))) | |||
| 7551 | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE))intel_uncore_write(&(dev_priv)->uncore, (((const i915_reg_t ){ .reg = (0x20cc) })), (({ typeof((1 << 2)) _a = ((1 << 2)); ({ if (__builtin_constant_p(_a)) do { } while (0); if ( __builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }) | ({ typeof((1 << 3)) _a = ((1 << 3)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p (_a) && __builtin_constant_p(_a)) do { } while (0); ( (_a) << 16 | (_a)); }); }))); | |||
| 7552 | } | |||
| 7553 | ||||
| 7554 | void intel_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7555 | { | |||
| 7556 | dev_priv->display.init_clock_gating(dev_priv); | |||
| 7557 | } | |||
| 7558 | ||||
| 7559 | void intel_suspend_hw(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7560 | { | |||
| 7561 | if (HAS_PCH_LPT(dev_priv)(((dev_priv)->pch_type) == PCH_LPT)) | |||
| 7562 | lpt_suspend_hw(dev_priv); | |||
| 7563 | } | |||
| 7564 | ||||
| 7565 | static void nop_init_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7566 | { | |||
| 7567 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "No clock gating settings or workarounds applied.\n" ) | |||
| 7568 | "No clock gating settings or workarounds applied.\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "No clock gating settings or workarounds applied.\n" ); | |||
| 7569 | } | |||
| 7570 | ||||
| 7571 | /** | |||
| 7572 | * intel_init_clock_gating_hooks - setup the clock gating hooks | |||
| 7573 | * @dev_priv: device private | |||
| 7574 | * | |||
| 7575 | * Setup the hooks that configure which clocks of a given platform can be | |||
| 7576 | * gated and also apply various GT and display specific workarounds for these | |||
| 7577 | * platforms. Note that some GT specific workarounds are applied separately | |||
| 7578 | * when GPU contexts or batchbuffers start their execution. | |||
| 7579 | */ | |||
| 7580 | void intel_init_clock_gating_hooks(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7581 | { | |||
| 7582 | if (IS_GEN(dev_priv, 12)(0 + (&(dev_priv)->__info)->gen == (12))) | |||
| 7583 | dev_priv->display.init_clock_gating = tgl_init_clock_gating; | |||
| 7584 | else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) | |||
| 7585 | dev_priv->display.init_clock_gating = icl_init_clock_gating; | |||
| 7586 | else if (IS_CANNONLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)) | |||
| 7587 | dev_priv->display.init_clock_gating = cnl_init_clock_gating; | |||
| 7588 | else if (IS_COFFEELAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) || IS_COMETLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_COMETLAKE)) | |||
| 7589 | dev_priv->display.init_clock_gating = cfl_init_clock_gating; | |||
| 7590 | else if (IS_SKYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_SKYLAKE)) | |||
| 7591 | dev_priv->display.init_clock_gating = skl_init_clock_gating; | |||
| 7592 | else if (IS_KABYLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_KABYLAKE)) | |||
| 7593 | dev_priv->display.init_clock_gating = kbl_init_clock_gating; | |||
| 7594 | else if (IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) | |||
| 7595 | dev_priv->display.init_clock_gating = bxt_init_clock_gating; | |||
| 7596 | else if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) | |||
| 7597 | dev_priv->display.init_clock_gating = glk_init_clock_gating; | |||
| 7598 | else if (IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) | |||
| 7599 | dev_priv->display.init_clock_gating = bdw_init_clock_gating; | |||
| 7600 | else if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) | |||
| 7601 | dev_priv->display.init_clock_gating = chv_init_clock_gating; | |||
| 7602 | else if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) | |||
| 7603 | dev_priv->display.init_clock_gating = hsw_init_clock_gating; | |||
| 7604 | else if (IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)) | |||
| 7605 | dev_priv->display.init_clock_gating = ivb_init_clock_gating; | |||
| 7606 | else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)) | |||
| 7607 | dev_priv->display.init_clock_gating = vlv_init_clock_gating; | |||
| 7608 | else if (IS_GEN(dev_priv, 6)(0 + (&(dev_priv)->__info)->gen == (6))) | |||
| 7609 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | |||
| 7610 | else if (IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5))) | |||
| 7611 | dev_priv->display.init_clock_gating = ilk_init_clock_gating; | |||
| 7612 | else if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45 ))) | |||
| 7613 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; | |||
| 7614 | else if (IS_I965GM(dev_priv)IS_PLATFORM(dev_priv, INTEL_I965GM)) | |||
| 7615 | dev_priv->display.init_clock_gating = i965gm_init_clock_gating; | |||
| 7616 | else if (IS_I965G(dev_priv)IS_PLATFORM(dev_priv, INTEL_I965G)) | |||
| 7617 | dev_priv->display.init_clock_gating = i965g_init_clock_gating; | |||
| 7618 | else if (IS_GEN(dev_priv, 3)(0 + (&(dev_priv)->__info)->gen == (3))) | |||
| 7619 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; | |||
| 7620 | else if (IS_I85X(dev_priv)IS_PLATFORM(dev_priv, INTEL_I85X) || IS_I865G(dev_priv)IS_PLATFORM(dev_priv, INTEL_I865G)) | |||
| 7621 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | |||
| 7622 | else if (IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) | |||
| 7623 | dev_priv->display.init_clock_gating = i830_init_clock_gating; | |||
| 7624 | else { | |||
| 7625 | MISSING_CASE(INTEL_DEVID(dev_priv))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "((&(dev_priv)->__runtime)->device_id)", (long)(( (&(dev_priv)->__runtime)->device_id))); __builtin_expect (!!(__ret), 0); }); | |||
| 7626 | dev_priv->display.init_clock_gating = nop_init_clock_gating; | |||
| 7627 | } | |||
| 7628 | } | |||
| 7629 | ||||
| 7630 | /* Set up chip specific power management-related functions */ | |||
| 7631 | void intel_init_pm(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7632 | { | |||
| 7633 | /* For cxsr */ | |||
| 7634 | if (IS_PINEVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_PINEVIEW)) | |||
| 7635 | pnv_get_mem_freq(dev_priv); | |||
| 7636 | else if (IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5))) | |||
| 7637 | ilk_get_mem_freq(dev_priv); | |||
| 7638 | ||||
| 7639 | if (intel_has_sagv(dev_priv)) | |||
| 7640 | skl_setup_sagv_block_time(dev_priv); | |||
| 7641 | ||||
| 7642 | /* For FIFO watermark updates */ | |||
| 7643 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) { | |||
| 7644 | skl_setup_wm_latency(dev_priv); | |||
| 7645 | dev_priv->display.compute_global_watermarks = skl_compute_wm; | |||
| 7646 | } else if (HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE)) { | |||
| 7647 | ilk_setup_wm_latency(dev_priv); | |||
| 7648 | ||||
| 7649 | if ((IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5)) && dev_priv->wm.pri_latency[1] && | |||
| 7650 | dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || | |||
| 7651 | (!IS_GEN(dev_priv, 5)(0 + (&(dev_priv)->__info)->gen == (5)) && dev_priv->wm.pri_latency[0] && | |||
| 7652 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { | |||
| 7653 | dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; | |||
| 7654 | dev_priv->display.compute_intermediate_wm = | |||
| 7655 | ilk_compute_intermediate_wm; | |||
| 7656 | dev_priv->display.initial_watermarks = | |||
| 7657 | ilk_initial_watermarks; | |||
| 7658 | dev_priv->display.optimize_watermarks = | |||
| 7659 | ilk_optimize_watermarks; | |||
| 7660 | } else { | |||
| 7661 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Failed to read display plane latency. " "Disable CxSR\n") | |||
| 7662 | "Failed to read display plane latency. "drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Failed to read display plane latency. " "Disable CxSR\n") | |||
| 7663 | "Disable CxSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Failed to read display plane latency. " "Disable CxSR\n"); | |||
| 7664 | } | |||
| 7665 | } else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) { | |||
| 7666 | vlv_setup_wm_latency(dev_priv); | |||
| 7667 | dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm; | |||
| 7668 | dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm; | |||
| 7669 | dev_priv->display.initial_watermarks = vlv_initial_watermarks; | |||
| 7670 | dev_priv->display.optimize_watermarks = vlv_optimize_watermarks; | |||
| 7671 | dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo; | |||
| 7672 | } else if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45 ))) { | |||
| 7673 | g4x_setup_wm_latency(dev_priv); | |||
| 7674 | dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm; | |||
| 7675 | dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm; | |||
| 7676 | dev_priv->display.initial_watermarks = g4x_initial_watermarks; | |||
| 7677 | dev_priv->display.optimize_watermarks = g4x_optimize_watermarks; | |||
| 7678 | } else if (IS_PINEVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_PINEVIEW)) { | |||
| 7679 | if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv)((&(dev_priv)->__info)->is_mobile), | |||
| 7680 | dev_priv->is_ddr3, | |||
| 7681 | dev_priv->fsb_freq, | |||
| 7682 | dev_priv->mem_freq)) { | |||
| 7683 | drm_info(&dev_priv->drm,do { } while(0) | |||
| 7684 | "failed to find known CxSR latency "do { } while(0) | |||
| 7685 | "(found ddr%s fsb freq %d, mem freq %d), "do { } while(0) | |||
| 7686 | "disabling CxSR\n",do { } while(0) | |||
| 7687 | (dev_priv->is_ddr3 == 1) ? "3" : "2",do { } while(0) | |||
| 7688 | dev_priv->fsb_freq, dev_priv->mem_freq)do { } while(0); | |||
| 7689 | /* Disable CxSR and never update its watermark again */ | |||
| 7690 | intel_set_memory_cxsr(dev_priv, false0); | |||
| 7691 | dev_priv->display.update_wm = NULL((void *)0); | |||
| 7692 | } else | |||
| 7693 | dev_priv->display.update_wm = pnv_update_wm; | |||
| 7694 | } else if (IS_GEN(dev_priv, 4)(0 + (&(dev_priv)->__info)->gen == (4))) { | |||
| 7695 | dev_priv->display.update_wm = i965_update_wm; | |||
| 7696 | } else if (IS_GEN(dev_priv, 3)(0 + (&(dev_priv)->__info)->gen == (3))) { | |||
| 7697 | dev_priv->display.update_wm = i9xx_update_wm; | |||
| 7698 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | |||
| 7699 | } else if (IS_GEN(dev_priv, 2)(0 + (&(dev_priv)->__info)->gen == (2))) { | |||
| 7700 | if (INTEL_NUM_PIPES(dev_priv)(hweight8((&(dev_priv)->__info)->pipe_mask)) == 1) { | |||
| 7701 | dev_priv->display.update_wm = i845_update_wm; | |||
| 7702 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | |||
| 7703 | } else { | |||
| 7704 | dev_priv->display.update_wm = i9xx_update_wm; | |||
| 7705 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | |||
| 7706 | } | |||
| 7707 | } else { | |||
| 7708 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "unexpected fall-through in %s\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ) | |||
| 7709 | "unexpected fall-through in %s\n", __func__)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "unexpected fall-through in %s\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); | |||
| 7710 | } | |||
| 7711 | } | |||
| 7712 | ||||
| 7713 | void intel_pm_setup(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7714 | { | |||
| 7715 | dev_priv->runtime_pm.suspended = false0; | |||
| 7716 | atomic_set(&dev_priv->runtime_pm.wakeref_count, 0)({ typeof(*(&dev_priv->runtime_pm.wakeref_count)) __tmp = ((0)); *(volatile typeof(*(&dev_priv->runtime_pm.wakeref_count )) *)&(*(&dev_priv->runtime_pm.wakeref_count)) = __tmp ; __tmp; }); | |||
| 7717 | } | |||
| 7718 | ||||
| 7719 | static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) | |||
| 7720 | { | |||
| 7721 | struct intel_dbuf_state *dbuf_state; | |||
| 7722 | ||||
| 7723 | dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 7724 | if (!dbuf_state) | |||
| 7725 | return NULL((void *)0); | |||
| 7726 | ||||
| 7727 | return &dbuf_state->base; | |||
| 7728 | } | |||
| 7729 | ||||
| 7730 | static void intel_dbuf_destroy_state(struct intel_global_obj *obj, | |||
| 7731 | struct intel_global_state *state) | |||
| 7732 | { | |||
| 7733 | kfree(state); | |||
| 7734 | } | |||
| 7735 | ||||
| 7736 | static const struct intel_global_state_funcs intel_dbuf_funcs = { | |||
| 7737 | .atomic_duplicate_state = intel_dbuf_duplicate_state, | |||
| 7738 | .atomic_destroy_state = intel_dbuf_destroy_state, | |||
| 7739 | }; | |||
| 7740 | ||||
| 7741 | struct intel_dbuf_state * | |||
| 7742 | intel_atomic_get_dbuf_state(struct intel_atomic_state *state) | |||
| 7743 | { | |||
| 7744 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 7745 | struct intel_global_state *dbuf_state; | |||
| 7746 | ||||
| 7747 | dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj); | |||
| 7748 | if (IS_ERR(dbuf_state)) | |||
| 7749 | return ERR_CAST(dbuf_state); | |||
| 7750 | ||||
| 7751 | return to_intel_dbuf_state(dbuf_state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((dbuf_state)); (struct intel_dbuf_state *)( (char * )__mptr - __builtin_offsetof(struct intel_dbuf_state, base) ) ;}); | |||
| 7752 | } | |||
| 7753 | ||||
| 7754 | int intel_dbuf_init(struct drm_i915_privateinteldrm_softc *dev_priv) | |||
| 7755 | { | |||
| 7756 | struct intel_dbuf_state *dbuf_state; | |||
| 7757 | ||||
| 7758 | dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 7759 | if (!dbuf_state) | |||
| 7760 | return -ENOMEM12; | |||
| 7761 | ||||
| 7762 | intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj, | |||
| 7763 | &dbuf_state->base, &intel_dbuf_funcs); | |||
| 7764 | ||||
| 7765 | return 0; | |||
| 7766 | } | |||
| 7767 | ||||
| 7768 | void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) | |||
| 7769 | { | |||
| 7770 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 7771 | const struct intel_dbuf_state *new_dbuf_state = | |||
| 7772 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 7773 | const struct intel_dbuf_state *old_dbuf_state = | |||
| 7774 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 7775 | ||||
| 7776 | if (!new_dbuf_state || | |||
| 7777 | new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) | |||
| 7778 | return; | |||
| 7779 | ||||
| 7780 | WARN_ON(!new_dbuf_state->base.changed)({ int __ret = !!((!new_dbuf_state->base.changed)); if (__ret ) printf("%s", "WARN_ON(" "!new_dbuf_state->base.changed" ")" ); __builtin_expect(!!(__ret), 0); }); | |||
| 7781 | ||||
| 7782 | gen9_dbuf_slices_update(dev_priv, | |||
| 7783 | old_dbuf_state->enabled_slices | | |||
| 7784 | new_dbuf_state->enabled_slices); | |||
| 7785 | } | |||
| 7786 | ||||
| 7787 | void intel_dbuf_post_plane_update(struct intel_atomic_state *state) | |||
| 7788 | { | |||
| 7789 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev); | |||
| 7790 | const struct intel_dbuf_state *new_dbuf_state = | |||
| 7791 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 7792 | const struct intel_dbuf_state *old_dbuf_state = | |||
| 7793 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); | |||
| 7794 | ||||
| 7795 | if (!new_dbuf_state || | |||
| 7796 | new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) | |||
| 7797 | return; | |||
| 7798 | ||||
| 7799 | WARN_ON(!new_dbuf_state->base.changed)({ int __ret = !!((!new_dbuf_state->base.changed)); if (__ret ) printf("%s", "WARN_ON(" "!new_dbuf_state->base.changed" ")" ); __builtin_expect(!!(__ret), 0); }); | |||
| 7800 | ||||
| 7801 | gen9_dbuf_slices_update(dev_priv, | |||
| 7802 | new_dbuf_state->enabled_slices); | |||
| 7803 | } |