Bug Summary

File:dev/pci/drm/i915/intel_uncore.h
Warning:line 308, column 1
Passed-by-value struct argument contains uninitialized data (e.g., field: 'reg')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name intel_display_power.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/display/intel_display_power.c

/usr/src/sys/dev/pci/drm/i915/display/intel_display_power.c

1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "display/intel_crt.h"
7#include "display/intel_dp.h"
8
9#include "i915_drv.h"
10#include "i915_irq.h"
11#include "intel_cdclk.h"
12#include "intel_combo_phy.h"
13#include "intel_csr.h"
14#include "intel_display_power.h"
15#include "intel_display_types.h"
16#include "intel_dpio_phy.h"
17#include "intel_hotplug.h"
18#include "intel_pm.h"
19#include "intel_sideband.h"
20#include "intel_tc.h"
21#include "intel_vga.h"
22
23bool_Bool intel_display_power_well_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
24 enum i915_power_well_id power_well_id);
25
26const char *
27intel_display_power_domain_str(enum intel_display_power_domain domain)
28{
29 switch (domain) {
30 case POWER_DOMAIN_DISPLAY_CORE:
31 return "DISPLAY_CORE";
32 case POWER_DOMAIN_PIPE_A:
33 return "PIPE_A";
34 case POWER_DOMAIN_PIPE_B:
35 return "PIPE_B";
36 case POWER_DOMAIN_PIPE_C:
37 return "PIPE_C";
38 case POWER_DOMAIN_PIPE_D:
39 return "PIPE_D";
40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 return "PIPE_A_PANEL_FITTER";
42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 return "PIPE_B_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 return "PIPE_C_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 return "PIPE_D_PANEL_FITTER";
48 case POWER_DOMAIN_TRANSCODER_A:
49 return "TRANSCODER_A";
50 case POWER_DOMAIN_TRANSCODER_B:
51 return "TRANSCODER_B";
52 case POWER_DOMAIN_TRANSCODER_C:
53 return "TRANSCODER_C";
54 case POWER_DOMAIN_TRANSCODER_D:
55 return "TRANSCODER_D";
56 case POWER_DOMAIN_TRANSCODER_EDP:
57 return "TRANSCODER_EDP";
58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 return "TRANSCODER_VDSC_PW2";
60 case POWER_DOMAIN_TRANSCODER_DSI_A:
61 return "TRANSCODER_DSI_A";
62 case POWER_DOMAIN_TRANSCODER_DSI_C:
63 return "TRANSCODER_DSI_C";
64 case POWER_DOMAIN_PORT_DDI_A_LANES:
65 return "PORT_DDI_A_LANES";
66 case POWER_DOMAIN_PORT_DDI_B_LANES:
67 return "PORT_DDI_B_LANES";
68 case POWER_DOMAIN_PORT_DDI_C_LANES:
69 return "PORT_DDI_C_LANES";
70 case POWER_DOMAIN_PORT_DDI_D_LANES:
71 return "PORT_DDI_D_LANES";
72 case POWER_DOMAIN_PORT_DDI_E_LANES:
73 return "PORT_DDI_E_LANES";
74 case POWER_DOMAIN_PORT_DDI_F_LANES:
75 return "PORT_DDI_F_LANES";
76 case POWER_DOMAIN_PORT_DDI_G_LANES:
77 return "PORT_DDI_G_LANES";
78 case POWER_DOMAIN_PORT_DDI_H_LANES:
79 return "PORT_DDI_H_LANES";
80 case POWER_DOMAIN_PORT_DDI_I_LANES:
81 return "PORT_DDI_I_LANES";
82 case POWER_DOMAIN_PORT_DDI_A_IO:
83 return "PORT_DDI_A_IO";
84 case POWER_DOMAIN_PORT_DDI_B_IO:
85 return "PORT_DDI_B_IO";
86 case POWER_DOMAIN_PORT_DDI_C_IO:
87 return "PORT_DDI_C_IO";
88 case POWER_DOMAIN_PORT_DDI_D_IO:
89 return "PORT_DDI_D_IO";
90 case POWER_DOMAIN_PORT_DDI_E_IO:
91 return "PORT_DDI_E_IO";
92 case POWER_DOMAIN_PORT_DDI_F_IO:
93 return "PORT_DDI_F_IO";
94 case POWER_DOMAIN_PORT_DDI_G_IO:
95 return "PORT_DDI_G_IO";
96 case POWER_DOMAIN_PORT_DDI_H_IO:
97 return "PORT_DDI_H_IO";
98 case POWER_DOMAIN_PORT_DDI_I_IO:
99 return "PORT_DDI_I_IO";
100 case POWER_DOMAIN_PORT_DSI:
101 return "PORT_DSI";
102 case POWER_DOMAIN_PORT_CRT:
103 return "PORT_CRT";
104 case POWER_DOMAIN_PORT_OTHER:
105 return "PORT_OTHER";
106 case POWER_DOMAIN_VGA:
107 return "VGA";
108 case POWER_DOMAIN_AUDIO:
109 return "AUDIO";
110 case POWER_DOMAIN_AUX_A:
111 return "AUX_A";
112 case POWER_DOMAIN_AUX_B:
113 return "AUX_B";
114 case POWER_DOMAIN_AUX_C:
115 return "AUX_C";
116 case POWER_DOMAIN_AUX_D:
117 return "AUX_D";
118 case POWER_DOMAIN_AUX_E:
119 return "AUX_E";
120 case POWER_DOMAIN_AUX_F:
121 return "AUX_F";
122 case POWER_DOMAIN_AUX_G:
123 return "AUX_G";
124 case POWER_DOMAIN_AUX_H:
125 return "AUX_H";
126 case POWER_DOMAIN_AUX_I:
127 return "AUX_I";
128 case POWER_DOMAIN_AUX_IO_A:
129 return "AUX_IO_A";
130 case POWER_DOMAIN_AUX_C_TBT:
131 return "AUX_C_TBT";
132 case POWER_DOMAIN_AUX_D_TBT:
133 return "AUX_D_TBT";
134 case POWER_DOMAIN_AUX_E_TBT:
135 return "AUX_E_TBT";
136 case POWER_DOMAIN_AUX_F_TBT:
137 return "AUX_F_TBT";
138 case POWER_DOMAIN_AUX_G_TBT:
139 return "AUX_G_TBT";
140 case POWER_DOMAIN_AUX_H_TBT:
141 return "AUX_H_TBT";
142 case POWER_DOMAIN_AUX_I_TBT:
143 return "AUX_I_TBT";
144 case POWER_DOMAIN_GMBUS:
145 return "GMBUS";
146 case POWER_DOMAIN_INIT:
147 return "INIT";
148 case POWER_DOMAIN_MODESET:
149 return "MODESET";
150 case POWER_DOMAIN_GT_IRQ:
151 return "GT_IRQ";
152 case POWER_DOMAIN_DPLL_DC_OFF:
153 return "DPLL_DC_OFF";
154 case POWER_DOMAIN_TC_COLD_OFF:
155 return "TC_COLD_OFF";
156 default:
157 MISSING_CASE(domain)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "domain", (long)(domain)); __builtin_expect(!!(__ret), 0); }
)
;
158 return "?";
159 }
160}
161
162static void intel_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
163 struct i915_power_well *power_well)
164{
165 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "enabling %s\n"
, power_well->desc->name)
;
166 power_well->desc->ops->enable(dev_priv, power_well);
167 power_well->hw_enabled = true1;
168}
169
170static void intel_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
171 struct i915_power_well *power_well)
172{
173 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "disabling %s\n"
, power_well->desc->name)
;
174 power_well->hw_enabled = false0;
175 power_well->desc->ops->disable(dev_priv, power_well);
176}
177
178static void intel_power_well_get(struct drm_i915_privateinteldrm_softc *dev_priv,
179 struct i915_power_well *power_well)
180{
181 if (!power_well->count++)
182 intel_power_well_enable(dev_priv, power_well);
183}
184
185static void intel_power_well_put(struct drm_i915_privateinteldrm_softc *dev_priv,
186 struct i915_power_well *power_well)
187{
188 drm_WARN(&dev_priv->drm, !power_well->count,({ int __ret = !!(!power_well->count); if (__ret) printf("%s %s: "
"Use count on power well %s is already zero", dev_driver_string
((&dev_priv->drm)->dev), "", power_well->desc->
name); __builtin_expect(!!(__ret), 0); })
189 "Use count on power well %s is already zero",({ int __ret = !!(!power_well->count); if (__ret) printf("%s %s: "
"Use count on power well %s is already zero", dev_driver_string
((&dev_priv->drm)->dev), "", power_well->desc->
name); __builtin_expect(!!(__ret), 0); })
190 power_well->desc->name)({ int __ret = !!(!power_well->count); if (__ret) printf("%s %s: "
"Use count on power well %s is already zero", dev_driver_string
((&dev_priv->drm)->dev), "", power_well->desc->
name); __builtin_expect(!!(__ret), 0); })
;
191
192 if (!--power_well->count)
193 intel_power_well_disable(dev_priv, power_well);
194}
195
196/**
197 * __intel_display_power_is_enabled - unlocked check for a power domain
198 * @dev_priv: i915 device instance
199 * @domain: power domain to check
200 *
201 * This is the unlocked version of intel_display_power_is_enabled() and should
202 * only be used from error capture and recovery code where deadlocks are
203 * possible.
204 *
205 * Returns:
206 * True when the power domain is enabled, false otherwise.
207 */
208bool_Bool __intel_display_power_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
209 enum intel_display_power_domain domain)
210{
211 struct i915_power_well *power_well;
212 bool_Bool is_enabled;
213
214 if (dev_priv->runtime_pm.suspended)
215 return false0;
216
217 is_enabled = true1;
218
219 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))for ((power_well) = (dev_priv)->power_domains.power_wells +
(dev_priv)->power_domains.power_well_count - 1; (power_well
) - (dev_priv)->power_domains.power_wells >= 0; (power_well
)--) if (!((power_well)->desc->domains & ((1ULL <<
(domain))))) {} else
{
220 if (power_well->desc->always_on)
221 continue;
222
223 if (!power_well->hw_enabled) {
224 is_enabled = false0;
225 break;
226 }
227 }
228
229 return is_enabled;
230}
231
232/**
233 * intel_display_power_is_enabled - check for a power domain
234 * @dev_priv: i915 device instance
235 * @domain: power domain to check
236 *
237 * This function can be used to check the hw power domain state. It is mostly
238 * used in hardware state readout functions. Everywhere else code should rely
239 * upon explicit power domain reference counting to ensure that the hardware
240 * block is powered up before accessing it.
241 *
242 * Callers must hold the relevant modesetting locks to ensure that concurrent
243 * threads can't disable the power well while the caller tries to read a few
244 * registers.
245 *
246 * Returns:
247 * True when the power domain is enabled, false otherwise.
248 */
249bool_Bool intel_display_power_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
250 enum intel_display_power_domain domain)
251{
252 struct i915_power_domains *power_domains;
253 bool_Bool ret;
254
255 power_domains = &dev_priv->power_domains;
256
257 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
258 ret = __intel_display_power_is_enabled(dev_priv, domain);
259 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
260
261 return ret;
262}
263
264/*
265 * Starting with Haswell, we have a "Power Down Well" that can be turned off
266 * when not needed anymore. We have 4 registers that can request the power well
267 * to be enabled, and it will only be disabled if none of the registers is
268 * requesting it to be enabled.
269 */
270static void hsw_power_well_post_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
271 u8 irq_pipe_mask, bool_Bool has_vga)
272{
273 if (has_vga)
274 intel_vga_reset_io_mem(dev_priv);
275
276 if (irq_pipe_mask)
277 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
278}
279
280static void hsw_power_well_pre_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
281 u8 irq_pipe_mask)
282{
283 if (irq_pipe_mask)
284 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
285}
286
287#define ICL_AUX_PW_TO_CH(pw_idx)((pw_idx) - 0 + AUX_CH_A) \
288 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A0 + AUX_CH_A)
289
290#define ICL_TBT_AUX_PW_TO_CH(pw_idx)((pw_idx) - 8 + AUX_CH_C) \
291 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT18 + AUX_CH_C)
292
293static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_privateinteldrm_softc *dev_priv,
294 struct i915_power_well *power_well)
295{
296 int pw_idx = power_well->desc->hsw.idx;
297
298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx)((pw_idx) - 8 + AUX_CH_C) :
299 ICL_AUX_PW_TO_CH(pw_idx)((pw_idx) - 0 + AUX_CH_A);
300}
301
302static struct intel_digital_port *
303aux_ch_to_digital_port(struct drm_i915_privateinteldrm_softc *dev_priv,
304 enum aux_ch aux_ch)
305{
306 struct intel_digital_port *dig_port = NULL((void *)0);
307 struct intel_encoder *encoder;
308
309 for_each_intel_encoder(&dev_priv->drm, encoder)for (encoder = ({ const __typeof( ((__typeof(*encoder) *)0)->
base.head ) *__mptr = ((&(&dev_priv->drm)->mode_config
.encoder_list)->next); (__typeof(*encoder) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*encoder), base.head) );}); &
encoder->base.head != (&(&dev_priv->drm)->mode_config
.encoder_list); encoder = ({ const __typeof( ((__typeof(*encoder
) *)0)->base.head ) *__mptr = (encoder->base.head.next)
; (__typeof(*encoder) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*encoder), base.head) );}))
{
310 /* We'll check the MST primary port */
311 if (encoder->type == INTEL_OUTPUT_DP_MST)
312 continue;
313
314 dig_port = enc_to_dig_port(encoder);
315 if (!dig_port)
316 continue;
317
318 if (dig_port->aux_ch != aux_ch) {
319 dig_port = NULL((void *)0);
320 continue;
321 }
322
323 break;
324 }
325
326 return dig_port;
327}
328
329static void hsw_wait_for_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
330 struct i915_power_well *power_well,
331 bool_Bool timeout_expected)
332{
333 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
334 int pw_idx = power_well->desc->hsw.idx;
335
336 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
337 if (intel_de_wait_for_set(dev_priv, regs->driver,
338 HSW_PWR_WELL_CTL_STATE(pw_idx)(0x1 << ((pw_idx) * 2)), 1)) {
339 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s power well enable timeout\n"
, power_well->desc->name)
340 power_well->desc->name)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s power well enable timeout\n"
, power_well->desc->name)
;
341
342 drm_WARN_ON(&dev_priv->drm, !timeout_expected)({ int __ret = !!((!timeout_expected)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "!timeout_expected" ")"); __builtin_expect(!
!(__ret), 0); })
;
343
344 }
345}
346
347static u32 hsw_power_well_requesters(struct drm_i915_privateinteldrm_softc *dev_priv,
348 const struct i915_power_well_regs *regs,
349 int pw_idx)
350{
351 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2));
352 u32 ret;
353
354 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
355 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
356 if (regs->kvmr.reg)
357 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
358 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
359
360 return ret;
361}
362
363static void hsw_wait_for_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
364 struct i915_power_well *power_well)
365{
366 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
367 int pw_idx = power_well->desc->hsw.idx;
368 bool_Bool disabled;
369 u32 reqs;
370
371 /*
372 * Bspec doesn't require waiting for PWs to get disabled, but still do
373 * this for paranoia. The known cases where a PW will be forced on:
374 * - a KVMR request on any power well via the KVMR request register
375 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
376 * DEBUG request registers
377 * Skip the wait in case any of the request bits are set and print a
378 * diagnostic message.
379 */
380 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((disabled
= !(intel_de_read(dev_priv, regs->driver) & (0x1 <<
((pw_idx) * 2)))) || (reqs = hsw_power_well_requesters(dev_priv
, regs, pw_idx))))) { ret__ = 0; break; } if (expired__) { ret__
= -60; break; } usleep_range(wait__, wait__ * 2); if (wait__
< ((1000))) wait__ <<= 1; } ret__; })
381 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((disabled
= !(intel_de_read(dev_priv, regs->driver) & (0x1 <<
((pw_idx) * 2)))) || (reqs = hsw_power_well_requesters(dev_priv
, regs, pw_idx))))) { ret__ = 0; break; } if (expired__) { ret__
= -60; break; } usleep_range(wait__, wait__ * 2); if (wait__
< ((1000))) wait__ <<= 1; } ret__; })
382 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((disabled
= !(intel_de_read(dev_priv, regs->driver) & (0x1 <<
((pw_idx) * 2)))) || (reqs = hsw_power_well_requesters(dev_priv
, regs, pw_idx))))) { ret__ = 0; break; } if (expired__) { ret__
= -60; break; } usleep_range(wait__, wait__ * 2); if (wait__
< ((1000))) wait__ <<= 1; } ret__; })
;
383 if (disabled)
384 return;
385
386 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n"
, power_well->desc->name, !!(reqs & 1), !!(reqs &
2), !!(reqs & 4), !!(reqs & 8))
387 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n"
, power_well->desc->name, !!(reqs & 1), !!(reqs &
2), !!(reqs & 4), !!(reqs & 8))
388 power_well->desc->name,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n"
, power_well->desc->name, !!(reqs & 1), !!(reqs &
2), !!(reqs & 4), !!(reqs & 8))
389 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n"
, power_well->desc->name, !!(reqs & 1), !!(reqs &
2), !!(reqs & 4), !!(reqs & 8))
;
390}
391
392static void gen9_wait_for_power_well_fuses(struct drm_i915_privateinteldrm_softc *dev_priv,
393 enum skl_power_gate pg)
394{
395 /* Timeout 5us for PG#0, for other PGs 1us */
396 drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_de_wait_for_set(dev_priv, ((const i915_reg_t
){ .reg = (0x42000) }), (1 << (27 - (pg))), 1))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "intel_de_wait_for_set(dev_priv, ((const i915_reg_t){ .reg = (0x42000) }), (1 << (27 - (pg))), 1)"
")"); __builtin_expect(!!(__ret), 0); })
397 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,({ int __ret = !!((intel_de_wait_for_set(dev_priv, ((const i915_reg_t
){ .reg = (0x42000) }), (1 << (27 - (pg))), 1))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "intel_de_wait_for_set(dev_priv, ((const i915_reg_t){ .reg = (0x42000) }), (1 << (27 - (pg))), 1)"
")"); __builtin_expect(!!(__ret), 0); })
398 SKL_FUSE_PG_DIST_STATUS(pg), 1))({ int __ret = !!((intel_de_wait_for_set(dev_priv, ((const i915_reg_t
){ .reg = (0x42000) }), (1 << (27 - (pg))), 1))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "intel_de_wait_for_set(dev_priv, ((const i915_reg_t){ .reg = (0x42000) }), (1 << (27 - (pg))), 1)"
")"); __builtin_expect(!!(__ret), 0); })
;
399}
400
401static void hsw_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
402 struct i915_power_well *power_well)
403{
404 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
405 int pw_idx = power_well->desc->hsw.idx;
406 u32 val;
407
408 if (power_well->desc->hsw.has_fuses) {
409 enum skl_power_gate pg;
410
411 pg = INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx)((pw_idx) - 0 + SKL_PG1) :
412 SKL_PW_CTL_IDX_TO_PG(pw_idx)((pw_idx) - 14 + SKL_PG1);
413 /*
414 * For PW1 we have to wait both for the PW0/PG0 fuse state
415 * before enabling the power well and PW1/PG1's own fuse
416 * state after the enabling. For all other power wells with
417 * fuses we only have to wait for that PW/PG's fuse state
418 * after the enabling.
419 */
420 if (pg == SKL_PG1)
421 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
422 }
423
424 val = intel_de_read(dev_priv, regs->driver);
425 intel_de_write(dev_priv, regs->driver,
426 val | HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2)));
427
428 hsw_wait_for_power_well_enable(dev_priv, power_well, false0);
429
430 /* Display WA #1178: cnl */
431 if (IS_CANNONLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) &&
432 pw_idx >= GLK_PW_CTL_IDX_AUX_B9 &&
433 pw_idx <= CNL_PW_CTL_IDX_AUX_F12) {
434 u32 val;
435
436 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162250, 0x162210
, 0x1622D0, 0x162A90 })[((pw_idx) - 9)])) })
);
437 val |= CNL_AUX_ANAOVRD1_ENABLE(1 << 16) | CNL_AUX_ANAOVRD1_LDO_BYPASS(1 << 23);
438 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162250, 0x162210
, 0x1622D0, 0x162A90 })[((pw_idx) - 9)])) })
, val);
439 }
440
441 if (power_well->desc->hsw.has_fuses) {
442 enum skl_power_gate pg;
443
444 pg = INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx)((pw_idx) - 0 + SKL_PG1) :
445 SKL_PW_CTL_IDX_TO_PG(pw_idx)((pw_idx) - 14 + SKL_PG1);
446 gen9_wait_for_power_well_fuses(dev_priv, pg);
447 }
448
449 hsw_power_well_post_enable(dev_priv,
450 power_well->desc->hsw.irq_pipe_mask,
451 power_well->desc->hsw.has_vga);
452}
453
454static void hsw_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
455 struct i915_power_well *power_well)
456{
457 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
458 int pw_idx = power_well->desc->hsw.idx;
459 u32 val;
460
461 hsw_power_well_pre_disable(dev_priv,
462 power_well->desc->hsw.irq_pipe_mask);
463
464 val = intel_de_read(dev_priv, regs->driver);
465 intel_de_write(dev_priv, regs->driver,
466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2)));
467 hsw_wait_for_power_well_disable(dev_priv, power_well);
468}
469
470#define ICL_AUX_PW_TO_PHY(pw_idx)((pw_idx) - 0) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A0)
471
472static void
473icl_combo_phy_aux_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
474 struct i915_power_well *power_well)
475{
476 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
477 int pw_idx = power_well->desc->hsw.idx;
478 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx)((pw_idx) - 0);
479 u32 val;
480
481 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv))({ int __ret = !!((!IS_PLATFORM(dev_priv, INTEL_ICELAKE))); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "!IS_PLATFORM(dev_priv, INTEL_ICELAKE)"
")"); __builtin_expect(!!(__ret), 0); })
;
482
483 val = intel_de_read(dev_priv, regs->driver);
484 intel_de_write(dev_priv, regs->driver,
485 val | HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2)));
486
487 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 12) {
488 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000
, 0x160000, 0x161000 })[phy]) + 4 * (12))) })
);
489 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000
, 0x160000, 0x161000 })[phy]) + 4 * (12))) })
,
490 val | ICL_LANE_ENABLE_AUX(1 << 0));
491 }
492
493 hsw_wait_for_power_well_enable(dev_priv, power_well, false0);
494
495 /* Display WA #1178: icl */
496 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A0 && pw_idx <= ICL_PW_CTL_IDX_AUX_B1 &&
497 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
498 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162398, 0x6C398
})[((pw_idx) - 0)])) })
);
499 val |= ICL_AUX_ANAOVRD1_ENABLE(1 << 0) | ICL_AUX_ANAOVRD1_LDO_BYPASS(1 << 7);
500 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)((const i915_reg_t){ .reg = ((((const u32 []){ 0x162398, 0x6C398
})[((pw_idx) - 0)])) })
, val);
501 }
502}
503
504static void
505icl_combo_phy_aux_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
506 struct i915_power_well *power_well)
507{
508 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
509 int pw_idx = power_well->desc->hsw.idx;
510 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx)((pw_idx) - 0);
511 u32 val;
512
513 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv))({ int __ret = !!((!IS_PLATFORM(dev_priv, INTEL_ICELAKE))); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "!IS_PLATFORM(dev_priv, INTEL_ICELAKE)"
")"); __builtin_expect(!!(__ret), 0); })
;
514
515 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000
, 0x160000, 0x161000 })[phy]) + 4 * (12))) })
);
516 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy)((const i915_reg_t){ .reg = (((((const u32 []){ 0x162000, 0x6C000
, 0x160000, 0x161000 })[phy]) + 4 * (12))) })
,
517 val & ~ICL_LANE_ENABLE_AUX(1 << 0));
518
519 val = intel_de_read(dev_priv, regs->driver);
520 intel_de_write(dev_priv, regs->driver,
521 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2)));
522
523 hsw_wait_for_power_well_disable(dev_priv, power_well);
524}
525
526#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0
527
528static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
529
530static int power_well_async_ref_count(struct drm_i915_privateinteldrm_softc *dev_priv,
531 struct i915_power_well *power_well)
532{
533 int refs = hweight64(power_well->desc->domains &
534 async_put_domains_mask(&dev_priv->power_domains));
535
536 drm_WARN_ON(&dev_priv->drm, refs > power_well->count)({ int __ret = !!((refs > power_well->count)); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "refs > power_well->count"
")"); __builtin_expect(!!(__ret), 0); })
;
537
538 return refs;
539}
540
541static void icl_tc_port_assert_ref_held(struct drm_i915_privateinteldrm_softc *dev_priv,
542 struct i915_power_well *power_well,
543 struct intel_digital_port *dig_port)
544{
545 /* Bypass the check if all references are released asynchronously */
546 if (power_well_async_ref_count(dev_priv, power_well) ==
547 power_well->count)
548 return;
549
550 if (drm_WARN_ON(&dev_priv->drm, !dig_port)({ int __ret = !!((!dig_port)); if (__ret) printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON("
"!dig_port" ")"); __builtin_expect(!!(__ret), 0); })
)
551 return;
552
553 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) == 11 && dig_port->tc_legacy_port)
554 return;
555
556 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port))({ int __ret = !!((!intel_tc_port_ref_held(dig_port))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_tc_port_ref_held(dig_port)"
")"); __builtin_expect(!!(__ret), 0); })
;
557}
558
559#else
560
561static void icl_tc_port_assert_ref_held(struct drm_i915_privateinteldrm_softc *dev_priv,
562 struct i915_power_well *power_well,
563 struct intel_digital_port *dig_port)
564{
565}
566
567#endif
568
569#define TGL_AUX_PW_TO_TC_PORT(pw_idx)((pw_idx) - 3) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC13)
570
571static void icl_tc_cold_exit(struct drm_i915_privateinteldrm_softc *i915)
572{
573 int ret, tries = 0;
574
575 while (1) {
576 ret = sandybridge_pcode_write_timeout(i915,
577 ICL_PCODE_EXIT_TCCOLD0x12,
578 0, 250, 1);
579 if (ret != -EAGAIN35 || ++tries == 3)
580 break;
581 drm_msleep(1)mdelay(1);
582 }
583
584 /* Spec states that TC cold exit can take up to 1ms to complete */
585 if (!ret)
586 drm_msleep(1)mdelay(1);
587
588 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
589 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "TC cold block %s\n"
, ret ? "failed" : "succeeded")
590 "succeeded")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "TC cold block %s\n"
, ret ? "failed" : "succeeded")
;
591}
592
593static void
594icl_tc_phy_aux_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
595 struct i915_power_well *power_well)
596{
597 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
598 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
599 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
600 bool_Bool is_tbt = power_well->desc->hsw.is_tc_tbt;
601 bool_Bool timeout_expected;
602 u32 val;
603
604 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
605
606 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)((const i915_reg_t){ .reg = ((((((&(dev_priv)->__info)
->display_mmio_offset) + 0x64010)) + (aux_ch) * (((((&
(dev_priv)->__info)->display_mmio_offset) + 0x64110)) -
((((&(dev_priv)->__info)->display_mmio_offset) + 0x64010
))))) })
);
607 val &= ~DP_AUX_CH_CTL_TBT_IO(1 << 11);
608 if (is_tbt)
609 val |= DP_AUX_CH_CTL_TBT_IO(1 << 11);
610 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch)((const i915_reg_t){ .reg = ((((((&(dev_priv)->__info)
->display_mmio_offset) + 0x64010)) + (aux_ch) * (((((&
(dev_priv)->__info)->display_mmio_offset) + 0x64110)) -
((((&(dev_priv)->__info)->display_mmio_offset) + 0x64010
))))) })
, val);
611
612 val = intel_de_read(dev_priv, regs->driver);
613 intel_de_write(dev_priv, regs->driver,
614 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)(0x2 << ((power_well->desc->hsw.idx) * 2)));
615
616 /*
617 * An AUX timeout is expected if the TBT DP tunnel is down,
618 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
619 * exit sequence.
620 */
621 timeout_expected = is_tbt;
622 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) == 11 && dig_port->tc_legacy_port) {
623 icl_tc_cold_exit(dev_priv);
624 timeout_expected = true1;
625 }
626
627 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
628
629 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12 && !is_tbt) {
630 enum tc_port tc_port;
631
632 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx)((power_well->desc->hsw.idx) - 3);
633 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port)((const i915_reg_t){ .reg = ((tc_port) < 4 ? 0x1010A0 : 0x1010A4
) })
,
634 HIP_INDEX_VAL(tc_port, 0x2)((0x2) << (8 * ((tc_port) % 4))));
635
636 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port)((const i915_reg_t){ .reg = (((0x168000) + (tc_port) * ((0x169000
) - (0x168000))) + 0x36C) })
,
637 DKL_CMN_UC_DW27_UC_HEALTH(0x1 << 15), 1))
638 drm_warn(&dev_priv->drm,printf("drm:pid%d:%s *WARNING* " "[drm] " "Timeout waiting TC uC health\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
639 "Timeout waiting TC uC health\n")printf("drm:pid%d:%s *WARNING* " "[drm] " "Timeout waiting TC uC health\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
640 }
641}
642
643static void
644icl_tc_phy_aux_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
645 struct i915_power_well *power_well)
646{
647 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
648 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
649
650 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
651
652 hsw_power_well_disable(dev_priv, power_well);
653}
654
655static void
656icl_aux_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
657 struct i915_power_well *power_well)
658{
659 int pw_idx = power_well->desc->hsw.idx;
660 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx)((pw_idx) - 0); /* non-TBT only */
661 bool_Bool is_tbt = power_well->desc->hsw.is_tc_tbt;
662
663 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
664 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
665 else if (IS_ICELAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ICELAKE))
666 return icl_combo_phy_aux_power_well_enable(dev_priv,
667 power_well);
668 else
669 return hsw_power_well_enable(dev_priv, power_well);
670}
671
672static void
673icl_aux_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
674 struct i915_power_well *power_well)
675{
676 int pw_idx = power_well->desc->hsw.idx;
677 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx)((pw_idx) - 0); /* non-TBT only */
678 bool_Bool is_tbt = power_well->desc->hsw.is_tc_tbt;
679
680 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
681 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
682 else if (IS_ICELAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ICELAKE))
683 return icl_combo_phy_aux_power_well_disable(dev_priv,
684 power_well);
685 else
686 return hsw_power_well_disable(dev_priv, power_well);
687}
688
689/*
690 * We should only use the power well if we explicitly asked the hardware to
691 * enable it, so check if it's enabled and also check if we've requested it to
692 * be enabled.
693 */
694static bool_Bool hsw_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
695 struct i915_power_well *power_well)
696{
697 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
698 enum i915_power_well_id id = power_well->desc->id;
699 int pw_idx = power_well->desc->hsw.idx;
700 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2)) |
701 HSW_PWR_WELL_CTL_STATE(pw_idx)(0x1 << ((pw_idx) * 2));
702 u32 val;
703
704 val = intel_de_read(dev_priv, regs->driver);
705
706 /*
707 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
708 * and the MISC_IO PW will be not restored, so check instead for the
709 * BIOS's own request bits, which are forced-on for these power wells
710 * when exiting DC5/6.
711 */
712 if (IS_GEN(dev_priv, 9)(0 + (&(dev_priv)->__info)->gen == (9)) && !IS_GEN9_LP(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
((&(dev_priv)->__info)->is_lp))
&&
713 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
714 val |= intel_de_read(dev_priv, regs->bios);
715
716 return (val & mask) == mask;
717}
718
719static void assert_can_enable_dc9(struct drm_i915_privateinteldrm_softc *dev_priv)
720{
721 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
3))); if (__ret && !__warned) { printf("%s %s: " "DC9 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
722 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
3))); if (__ret && !__warned) { printf("%s %s: " "DC9 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
723 "DC9 already programmed to be enabled.\n")({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
3))); if (__ret && !__warned) { printf("%s %s: " "DC9 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
724 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled to enable DC9.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
725 intel_de_read(dev_priv, DC_STATE_EN) &({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled to enable DC9.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
726 DC_STATE_EN_UPTO_DC5,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled to enable DC9.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
727 "DC5 still not disabled to enable DC9.\n")({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled to enable DC9.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
728 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45404) })) & (0x2 <<
((15) * 2))); if (__ret && !__warned) { printf("%s %s: "
"Power well 2 on.\n", dev_driver_string((&dev_priv->drm
)->dev), ""); __warned = 1; } __builtin_expect(!!(__ret), 0
); })
729 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45404) })) & (0x2 <<
((15) * 2))); if (__ret && !__warned) { printf("%s %s: "
"Power well 2 on.\n", dev_driver_string((&dev_priv->drm
)->dev), ""); __warned = 1; } __builtin_expect(!!(__ret), 0
); })
730 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45404) })) & (0x2 <<
((15) * 2))); if (__ret && !__warned) { printf("%s %s: "
"Power well 2 on.\n", dev_driver_string((&dev_priv->drm
)->dev), ""); __warned = 1; } __builtin_expect(!!(__ret), 0
); })
731 "Power well 2 on.\n")({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45404) })) & (0x2 <<
((15) * 2))); if (__ret && !__warned) { printf("%s %s: "
"Power well 2 on.\n", dev_driver_string((&dev_priv->drm
)->dev), ""); __warned = 1; } __builtin_expect(!!(__ret), 0
); })
;
732 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),({ static int __warned; int __ret = !!(intel_irqs_enabled(dev_priv
)); if (__ret && !__warned) { printf("%s %s: " "Interrupts not disabled yet.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
733 "Interrupts not disabled yet.\n")({ static int __warned; int __ret = !!(intel_irqs_enabled(dev_priv
)); if (__ret && !__warned) { printf("%s %s: " "Interrupts not disabled yet.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
734
735 /*
736 * TODO: check for the following to verify the conditions to enter DC9
737 * state are satisfied:
738 * 1] Check relevant display engine registers to verify if mode set
739 * disable sequence was followed.
740 * 2] Check if display uninitialize sequence is initialized.
741 */
742}
743
744static void assert_can_disable_dc9(struct drm_i915_privateinteldrm_softc *dev_priv)
745{
746 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),({ static int __warned; int __ret = !!(intel_irqs_enabled(dev_priv
)); if (__ret && !__warned) { printf("%s %s: " "Interrupts not disabled yet.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
747 "Interrupts not disabled yet.\n")({ static int __warned; int __ret = !!(intel_irqs_enabled(dev_priv
)); if (__ret && !__warned) { printf("%s %s: " "Interrupts not disabled yet.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
748 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
749 intel_de_read(dev_priv, DC_STATE_EN) &({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
750 DC_STATE_EN_UPTO_DC5,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
751 "DC5 still not disabled.\n")({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0)); if (__ret && !__warned) { printf("%s %s: " "DC5 still not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
752
753 /*
754 * TODO: check for the following to verify DC9 state was indeed
755 * entered before programming to disable it:
756 * 1] Check relevant display engine registers to verify if mode
757 * set disable sequence was followed.
758 * 2] Check if display uninitialize sequence is initialized.
759 */
760}
761
762static void gen9_write_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv,
763 u32 state)
764{
765 int rewrites = 0;
766 int rereads = 0;
767 u32 v;
768
769 intel_de_write(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }), state);
770
771 /* It has been observed that disabling the dc6 state sometimes
772 * doesn't stick and dmc keeps returning old value. Make sure
773 * the write really sticks enough times and also force rewrite until
774 * we are confident that state is exactly what we want.
775 */
776 do {
777 v = intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }));
778
779 if (v != state) {
780 intel_de_write(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }), state);
781 rewrites++;
782 rereads = 0;
783 } else if (rereads++ > 5) {
784 break;
785 }
786
787 } while (rewrites < 100);
788
789 if (v != state)
790 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Writing dc state to 0x%x failed, now 0x%x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
v)
791 "Writing dc state to 0x%x failed, now 0x%x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Writing dc state to 0x%x failed, now 0x%x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
v)
792 state, v)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Writing dc state to 0x%x failed, now 0x%x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
v)
;
793
794 /* Most of the times we need one retry, avoid spam */
795 if (rewrites > 1)
796 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Rewrote dc state to 0x%x %d times\n"
, state, rewrites)
797 "Rewrote dc state to 0x%x %d times\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Rewrote dc state to 0x%x %d times\n"
, state, rewrites)
798 state, rewrites)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Rewrote dc state to 0x%x %d times\n"
, state, rewrites)
;
799}
800
801static u32 gen9_dc_mask(struct drm_i915_privateinteldrm_softc *dev_priv)
802{
803 u32 mask;
804
805 mask = DC_STATE_EN_UPTO_DC5(1 << 0);
806
807 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12)
808 mask |= DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)) | DC_STATE_EN_UPTO_DC6(2 << 0)
809 | DC_STATE_EN_DC9(1 << 3);
810 else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11)))
811 mask |= DC_STATE_EN_UPTO_DC6(2 << 0) | DC_STATE_EN_DC9(1 << 3);
812 else if (IS_GEN9_LP(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
((&(dev_priv)->__info)->is_lp))
)
813 mask |= DC_STATE_EN_DC9(1 << 3);
814 else
815 mask |= DC_STATE_EN_UPTO_DC6(2 << 0);
816
817 return mask;
818}
819
820static void gen9_sanitize_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv)
821{
822 u32 val;
823
824 val = intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) })) & gen9_dc_mask(dev_priv);
825
826 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Resetting DC state tracking from %02x to %02x\n"
, dev_priv->csr.dc_state, val)
827 "Resetting DC state tracking from %02x to %02x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Resetting DC state tracking from %02x to %02x\n"
, dev_priv->csr.dc_state, val)
828 dev_priv->csr.dc_state, val)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Resetting DC state tracking from %02x to %02x\n"
, dev_priv->csr.dc_state, val)
;
829 dev_priv->csr.dc_state = val;
830}
831
832/**
833 * gen9_set_dc_state - set target display C power state
834 * @dev_priv: i915 device instance
835 * @state: target DC power state
836 * - DC_STATE_DISABLE
837 * - DC_STATE_EN_UPTO_DC5
838 * - DC_STATE_EN_UPTO_DC6
839 * - DC_STATE_EN_DC9
840 *
841 * Signal to DMC firmware/HW the target DC power state passed in @state.
842 * DMC/HW can turn off individual display clocks and power rails when entering
843 * a deeper DC power state (higher in number) and turns these back when exiting
844 * that state to a shallower power state (lower in number). The HW will decide
845 * when to actually enter a given state on an on-demand basis, for instance
846 * depending on the active state of display pipes. The state of display
847 * registers backed by affected power rails are saved/restored as needed.
848 *
849 * Based on the above enabling a deeper DC power state is asynchronous wrt.
850 * enabling it. Disabling a deeper power state is synchronous: for instance
851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
852 * back on and register state is restored. This is guaranteed by the MMIO write
853 * to DC_STATE_EN blocking until the state is restored.
854 */
855static void gen9_set_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv, u32 state)
856{
857 u32 val;
858 u32 mask;
859
860 if (drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((state & ~dev_priv
->csr.allowed_dc_mask)); if (__ret && !__warned) {
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "state & ~dev_priv->csr.allowed_dc_mask"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
861 state & ~dev_priv->csr.allowed_dc_mask)({ static int __warned; int __ret = !!((state & ~dev_priv
->csr.allowed_dc_mask)); if (__ret && !__warned) {
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "state & ~dev_priv->csr.allowed_dc_mask"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
862 state &= dev_priv->csr.allowed_dc_mask;
863
864 val = intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }));
865 mask = gen9_dc_mask(dev_priv);
866 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting DC state from %02x to %02x\n"
, val & mask, state)
867 val & mask, state)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Setting DC state from %02x to %02x\n"
, val & mask, state)
;
868
869 /* Check if DMC is ignoring our DC state requests */
870 if ((val & mask) != dev_priv->csr.dc_state)
871 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DC state mismatch (0x%x -> 0x%x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , dev_priv
->csr.dc_state, val & mask)
872 dev_priv->csr.dc_state, val & mask)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DC state mismatch (0x%x -> 0x%x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , dev_priv
->csr.dc_state, val & mask)
;
873
874 val &= ~mask;
875 val |= state;
876
877 gen9_write_dc_state(dev_priv, val);
878
879 dev_priv->csr.dc_state = val & mask;
880}
881
882static u32
883sanitize_target_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv,
884 u32 target_dc_state)
885{
886 u32 states[] = {
887 DC_STATE_EN_UPTO_DC6(2 << 0),
888 DC_STATE_EN_UPTO_DC5(1 << 0),
889 DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)),
890 DC_STATE_DISABLE0,
891 };
892 int i;
893
894 for (i = 0; i < ARRAY_SIZE(states)(sizeof((states)) / sizeof((states)[0])) - 1; i++) {
895 if (target_dc_state != states[i])
896 continue;
897
898 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
899 break;
900
901 target_dc_state = states[i + 1];
902 }
903
904 return target_dc_state;
905}
906
907static void tgl_enable_dc3co(struct drm_i915_privateinteldrm_softc *dev_priv)
908{
909 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling DC3CO\n"
)
;
910 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)));
911}
912
913static void tgl_disable_dc3co(struct drm_i915_privateinteldrm_softc *dev_priv)
914{
915 u32 val;
916
917 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling DC3CO\n"
)
;
918 val = intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }));
919 val &= ~DC_STATE_DC3CO_STATUS((u32)((1UL << (29)) + 0));
920 intel_de_write(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) }), val);
921 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
922 /*
923 * Delay of 200us DC3CO Exit time B.Spec 49196
924 */
925 usleep_range(200, 210);
926}
927
928static void bxt_enable_dc9(struct drm_i915_privateinteldrm_softc *dev_priv)
929{
930 assert_can_enable_dc9(dev_priv);
931
932 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling DC9\n"
)
;
933 /*
934 * Power sequencer reset is not needed on
935 * platforms with South Display Engine on PCH,
936 * because PPS registers are always on.
937 */
938 if (!HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE))
939 intel_power_sequencer_reset(dev_priv);
940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9(1 << 3));
941}
942
943static void bxt_disable_dc9(struct drm_i915_privateinteldrm_softc *dev_priv)
944{
945 assert_can_disable_dc9(dev_priv);
946
947 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling DC9\n"
)
;
948
949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
950
951 intel_pps_unlock_regs_wa(dev_priv);
952}
953
954static void assert_csr_loaded(struct drm_i915_privateinteldrm_softc *dev_priv)
955{
956 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x80000 + (0) * 4) }))); if (__ret
&& !__warned) { printf("%s %s: " "CSR program storage start is NULL\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
957 !intel_de_read(dev_priv, CSR_PROGRAM(0)),({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x80000 + (0) * 4) }))); if (__ret
&& !__warned) { printf("%s %s: " "CSR program storage start is NULL\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
958 "CSR program storage start is NULL\n")({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x80000 + (0) * 4) }))); if (__ret
&& !__warned) { printf("%s %s: " "CSR program storage start is NULL\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
959 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x8F074) }))); if (__ret &&
!__warned) { printf("%s %s: " "CSR SSP Base Not fine\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
960 "CSR SSP Base Not fine\n")({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x8F074) }))); if (__ret &&
!__warned) { printf("%s %s: " "CSR SSP Base Not fine\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
;
961 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x8F004) }))); if (__ret &&
!__warned) { printf("%s %s: " "CSR HTP Not fine\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
962 "CSR HTP Not fine\n")({ static int __warned; int __ret = !!(!intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x8F004) }))); if (__ret &&
!__warned) { printf("%s %s: " "CSR HTP Not fine\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
;
963}
964
965static struct i915_power_well *
966lookup_power_well(struct drm_i915_privateinteldrm_softc *dev_priv,
967 enum i915_power_well_id power_well_id)
968{
969 struct i915_power_well *power_well;
970
971 for_each_power_well(dev_priv, power_well)for ((power_well) = (dev_priv)->power_domains.power_wells;
(power_well) - (dev_priv)->power_domains.power_wells <
(dev_priv)->power_domains.power_well_count; (power_well)++
)
972 if (power_well->desc->id == power_well_id)
973 return power_well;
974
975 /*
976 * It's not feasible to add error checking code to the callers since
977 * this condition really shouldn't happen and it doesn't even make sense
978 * to abort things like display initialization sequences. Just return
979 * the first power well and hope the WARN gets reported so we can fix
980 * our driver.
981 */
982 drm_WARN(&dev_priv->drm, 1,({ int __ret = !!(1); if (__ret) printf("%s %s: " "Power well %d not defined for this platform\n"
, dev_driver_string((&dev_priv->drm)->dev), "", power_well_id
); __builtin_expect(!!(__ret), 0); })
983 "Power well %d not defined for this platform\n",({ int __ret = !!(1); if (__ret) printf("%s %s: " "Power well %d not defined for this platform\n"
, dev_driver_string((&dev_priv->drm)->dev), "", power_well_id
); __builtin_expect(!!(__ret), 0); })
984 power_well_id)({ int __ret = !!(1); if (__ret) printf("%s %s: " "Power well %d not defined for this platform\n"
, dev_driver_string((&dev_priv->drm)->dev), "", power_well_id
); __builtin_expect(!!(__ret), 0); })
;
985 return &dev_priv->power_domains.power_wells[0];
986}
987
988/**
989 * intel_display_power_set_target_dc_state - Set target dc state.
990 * @dev_priv: i915 device
991 * @state: state which needs to be set as target_dc_state.
992 *
993 * This function set the "DC off" power well target_dc_state,
994 * based upon this target_dc_stste, "DC off" power well will
995 * enable desired DC state.
996 */
997void intel_display_power_set_target_dc_state(struct drm_i915_privateinteldrm_softc *dev_priv,
998 u32 state)
999{
1000 struct i915_power_well *power_well;
1001 bool_Bool dc_off_enabled;
1002 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1003
1004 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
1005 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1006
1007 if (drm_WARN_ON(&dev_priv->drm, !power_well)({ int __ret = !!((!power_well)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "!power_well" ")"); __builtin_expect(!!(__ret
), 0); })
)
1008 goto unlock;
1009
1010 state = sanitize_target_dc_state(dev_priv, state);
1011
1012 if (state == dev_priv->csr.target_dc_state)
1013 goto unlock;
1014
1015 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1016 power_well);
1017 /*
1018 * If DC off power well is disabled, need to enable and disable the
1019 * DC off power well to effect target DC state.
1020 */
1021 if (!dc_off_enabled)
1022 power_well->desc->ops->enable(dev_priv, power_well);
1023
1024 dev_priv->csr.target_dc_state = state;
1025
1026 if (!dc_off_enabled)
1027 power_well->desc->ops->disable(dev_priv, power_well);
1028
1029unlock:
1030 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
1031}
1032
1033static void assert_can_enable_dc5(struct drm_i915_privateinteldrm_softc *dev_priv)
1034{
1035 enum i915_power_well_id high_pg;
1036
1037 /* Power wells at this level and above must be disabled for DC5 entry */
1038 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12)
1039 high_pg = ICL_DISP_PW_3;
1040 else
1041 high_pg = SKL_DISP_PW_2;
1042
1043 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(intel_display_power_well_is_enabled
(dev_priv, high_pg)); if (__ret && !__warned) { printf
("%s %s: " "Power wells above platform's DC5 limit still enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1044 intel_display_power_well_is_enabled(dev_priv, high_pg),({ static int __warned; int __ret = !!(intel_display_power_well_is_enabled
(dev_priv, high_pg)); if (__ret && !__warned) { printf
("%s %s: " "Power wells above platform's DC5 limit still enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1045 "Power wells above platform's DC5 limit still enabled.\n")({ static int __warned; int __ret = !!(intel_display_power_well_is_enabled
(dev_priv, high_pg)); if (__ret && !__warned) { printf
("%s %s: " "Power wells above platform's DC5 limit still enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
1046
1047 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC5 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1048 (intel_de_read(dev_priv, DC_STATE_EN) &({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC5 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1049 DC_STATE_EN_UPTO_DC5),({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC5 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1050 "DC5 already programmed to be enabled.\n")({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (1 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC5 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
1051 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1052
1053 assert_csr_loaded(dev_priv);
1054}
1055
1056static void gen9_enable_dc5(struct drm_i915_privateinteldrm_softc *dev_priv)
1057{
1058 assert_can_enable_dc5(dev_priv);
1059
1060 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling DC5\n"
)
;
1061
1062 /* Wa Display #1183: skl,kbl,cfl */
1063 if (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
!((&(dev_priv)->__info)->is_lp))
)
1064 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) }),
1065 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) })) | SKL_SELECT_ALTERNATE_DC_EXIT(1 << 30));
1066
1067 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5(1 << 0));
1068}
1069
1070static void assert_can_enable_dc6(struct drm_i915_privateinteldrm_softc *dev_priv)
1071{
1072 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x48400) })) & (1 <<
31)); if (__ret && !__warned) { printf("%s %s: " "Backlight is not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1073 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x48400) })) & (1 <<
31)); if (__ret && !__warned) { printf("%s %s: " "Backlight is not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1074 "Backlight is not disabled.\n")({ static int __warned; int __ret = !!(intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x48400) })) & (1 <<
31)); if (__ret && !__warned) { printf("%s %s: " "Backlight is not disabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
1075 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (2 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC6 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1076 (intel_de_read(dev_priv, DC_STATE_EN) &({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (2 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC6 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1077 DC_STATE_EN_UPTO_DC6),({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (2 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC6 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1078 "DC6 already programmed to be enabled.\n")({ static int __warned; int __ret = !!((intel_de_read(dev_priv
, ((const i915_reg_t){ .reg = (0x45504) })) & (2 <<
0))); if (__ret && !__warned) { printf("%s %s: " "DC6 already programmed to be enabled.\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
;
1079
1080 assert_csr_loaded(dev_priv);
1081}
1082
1083static void skl_enable_dc6(struct drm_i915_privateinteldrm_softc *dev_priv)
1084{
1085 assert_can_enable_dc6(dev_priv);
1086
1087 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling DC6\n"
)
;
1088
1089 /* Wa Display #1183: skl,kbl,cfl */
1090 if (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
!((&(dev_priv)->__info)->is_lp))
)
1091 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) }),
1092 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) })) | SKL_SELECT_ALTERNATE_DC_EXIT(1 << 30));
1093
1094 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6(2 << 0));
1095}
1096
1097static void hsw_power_well_sync_hw(struct drm_i915_privateinteldrm_softc *dev_priv,
1098 struct i915_power_well *power_well)
1099{
1100 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1101 int pw_idx = power_well->desc->hsw.idx;
1102 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx)(0x2 << ((pw_idx) * 2));
1103 u32 bios_req = intel_de_read(dev_priv, regs->bios);
1104
1105 /* Take over the request bit if set by BIOS. */
1106 if (bios_req & mask) {
1107 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1108
1109 if (!(drv_req & mask))
1110 intel_de_write(dev_priv, regs->driver, drv_req | mask);
1111 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1112 }
1113}
1114
1115static void bxt_dpio_cmn_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1116 struct i915_power_well *power_well)
1117{
1118 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1119}
1120
1121static void bxt_dpio_cmn_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1122 struct i915_power_well *power_well)
1123{
1124 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1125}
1126
1127static bool_Bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1128 struct i915_power_well *power_well)
1129{
1130 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1131}
1132
1133static void bxt_verify_ddi_phy_power_wells(struct drm_i915_privateinteldrm_softc *dev_priv)
1134{
1135 struct i915_power_well *power_well;
1136
1137 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1138 if (power_well->count > 0)
1139 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1140
1141 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1142 if (power_well->count > 0)
1143 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1144
1145 if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) {
1146 power_well = lookup_power_well(dev_priv,
1147 GLK_DISP_PW_DPIO_CMN_C);
1148 if (power_well->count > 0)
1149 bxt_ddi_phy_verify_state(dev_priv,
1150 power_well->desc->bxt.phy);
1151 }
1152}
1153
1154static bool_Bool gen9_dc_off_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1155 struct i915_power_well *power_well)
1156{
1157 return ((intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) })) & DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0))) == 0 &&
1158 (intel_de_read(dev_priv, DC_STATE_EN((const i915_reg_t){ .reg = (0x45504) })) & DC_STATE_EN_UPTO_DC5_DC6_MASK0x3) == 0);
1159}
1160
1161static void gen9_assert_dbuf_enabled(struct drm_i915_privateinteldrm_softc *dev_priv)
1162{
1163 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1164 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1165
1166 drm_WARN(&dev_priv->drm,({ int __ret = !!(hw_enabled_dbuf_slices != enabled_dbuf_slices
); if (__ret) printf("%s %s: " "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", hw_enabled_dbuf_slices
, enabled_dbuf_slices); __builtin_expect(!!(__ret), 0); })
1167 hw_enabled_dbuf_slices != enabled_dbuf_slices,({ int __ret = !!(hw_enabled_dbuf_slices != enabled_dbuf_slices
); if (__ret) printf("%s %s: " "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", hw_enabled_dbuf_slices
, enabled_dbuf_slices); __builtin_expect(!!(__ret), 0); })
1168 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",({ int __ret = !!(hw_enabled_dbuf_slices != enabled_dbuf_slices
); if (__ret) printf("%s %s: " "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", hw_enabled_dbuf_slices
, enabled_dbuf_slices); __builtin_expect(!!(__ret), 0); })
1169 hw_enabled_dbuf_slices,({ int __ret = !!(hw_enabled_dbuf_slices != enabled_dbuf_slices
); if (__ret) printf("%s %s: " "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", hw_enabled_dbuf_slices
, enabled_dbuf_slices); __builtin_expect(!!(__ret), 0); })
1170 enabled_dbuf_slices)({ int __ret = !!(hw_enabled_dbuf_slices != enabled_dbuf_slices
); if (__ret) printf("%s %s: " "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", hw_enabled_dbuf_slices
, enabled_dbuf_slices); __builtin_expect(!!(__ret), 0); })
;
1171}
1172
1173static void gen9_disable_dc_states(struct drm_i915_privateinteldrm_softc *dev_priv)
1174{
1175 struct intel_cdclk_config cdclk_config = {};
1176
1177 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0))) {
1178 tgl_disable_dc3co(dev_priv);
1179 return;
1180 }
1181
1182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
1183
1184 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1185 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1186 drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_cdclk_needs_modeset(&dev_priv->
cdclk.hw, &cdclk_config))); if (__ret) printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON("
"intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_config)"
")"); __builtin_expect(!!(__ret), 0); })
1187 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,({ int __ret = !!((intel_cdclk_needs_modeset(&dev_priv->
cdclk.hw, &cdclk_config))); if (__ret) printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON("
"intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_config)"
")"); __builtin_expect(!!(__ret), 0); })
1188 &cdclk_config))({ int __ret = !!((intel_cdclk_needs_modeset(&dev_priv->
cdclk.hw, &cdclk_config))); if (__ret) printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON("
"intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_config)"
")"); __builtin_expect(!!(__ret), 0); })
;
1189
1190 gen9_assert_dbuf_enabled(dev_priv);
1191
1192 if (IS_GEN9_LP(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
((&(dev_priv)->__info)->is_lp))
)
1193 bxt_verify_ddi_phy_power_wells(dev_priv);
1194
1195 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11)
1196 /*
1197 * DMC retains HW context only for port A, the other combo
1198 * PHY's HW context for port B is lost after DC transitions,
1199 * so we need to restore it manually.
1200 */
1201 intel_combo_phy_init(dev_priv);
1202}
1203
1204static void gen9_dc_off_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1205 struct i915_power_well *power_well)
1206{
1207 gen9_disable_dc_states(dev_priv);
1208}
1209
1210static void gen9_dc_off_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1211 struct i915_power_well *power_well)
1212{
1213 if (!dev_priv->csr.dmc_payload)
1214 return;
1215
1216 switch (dev_priv->csr.target_dc_state) {
1217 case DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)):
1218 tgl_enable_dc3co(dev_priv);
1219 break;
1220 case DC_STATE_EN_UPTO_DC6(2 << 0):
1221 skl_enable_dc6(dev_priv);
1222 break;
1223 case DC_STATE_EN_UPTO_DC5(1 << 0):
1224 gen9_enable_dc5(dev_priv);
1225 break;
1226 }
1227}
1228
1229static void i9xx_power_well_sync_hw_noop(struct drm_i915_privateinteldrm_softc *dev_priv,
1230 struct i915_power_well *power_well)
1231{
1232}
1233
1234static void i9xx_always_on_power_well_noop(struct drm_i915_privateinteldrm_softc *dev_priv,
1235 struct i915_power_well *power_well)
1236{
1237}
1238
1239static bool_Bool i9xx_always_on_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1240 struct i915_power_well *power_well)
1241{
1242 return true1;
1243}
1244
1245static void i830_pipes_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1246 struct i915_power_well *power_well)
1247{
1248 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
pipe_offsets[PIPE_A] - (&(dev_priv)->__info)->pipe_offsets
[PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset
)) })
) & PIPECONF_ENABLE(1 << 31)) == 0)
1249 i830_enable_pipe(dev_priv, PIPE_A);
1250 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
pipe_offsets[PIPE_B] - (&(dev_priv)->__info)->pipe_offsets
[PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset
)) })
) & PIPECONF_ENABLE(1 << 31)) == 0)
1251 i830_enable_pipe(dev_priv, PIPE_B);
1252}
1253
1254static void i830_pipes_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1255 struct i915_power_well *power_well)
1256{
1257 i830_disable_pipe(dev_priv, PIPE_B);
1258 i830_disable_pipe(dev_priv, PIPE_A);
1259}
1260
1261static bool_Bool i830_pipes_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1262 struct i915_power_well *power_well)
1263{
1264 return intel_de_read(dev_priv, PIPECONF(PIPE_A)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
pipe_offsets[PIPE_A] - (&(dev_priv)->__info)->pipe_offsets
[PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset
)) })
) & PIPECONF_ENABLE(1 << 31) &&
1265 intel_de_read(dev_priv, PIPECONF(PIPE_B)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
pipe_offsets[PIPE_B] - (&(dev_priv)->__info)->pipe_offsets
[PIPE_A] + (0x70008) + ((&(dev_priv)->__info)->display_mmio_offset
)) })
) & PIPECONF_ENABLE(1 << 31);
1266}
1267
1268static void i830_pipes_power_well_sync_hw(struct drm_i915_privateinteldrm_softc *dev_priv,
1269 struct i915_power_well *power_well)
1270{
1271 if (power_well->count > 0)
1272 i830_pipes_power_well_enable(dev_priv, power_well);
1273 else
1274 i830_pipes_power_well_disable(dev_priv, power_well);
1275}
1276
1277static void vlv_set_power_well(struct drm_i915_privateinteldrm_softc *dev_priv,
1278 struct i915_power_well *power_well, bool_Bool enable)
1279{
1280 int pw_idx = power_well->desc->vlv.idx;
1281 u32 mask;
1282 u32 state;
1283 u32 ctrl;
1284
1285 mask = PUNIT_PWRGT_MASK(pw_idx)(3 << ((pw_idx) * 2));
1286 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx)(0 << ((pw_idx) * 2)) :
1287 PUNIT_PWRGT_PWR_GATE(pw_idx)(3 << ((pw_idx) * 2));
1288
1289 vlv_punit_get(dev_priv);
1290
1291#define COND \
1292 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS0x61) & mask) == state)
1293
1294 if (COND)
1295 goto out;
1296
1297 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL0x60);
1298 ctrl &= ~mask;
1299 ctrl |= state;
1300 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL0x60, ctrl);
1301
1302 if (wait_for(COND, 100)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((100) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if (((COND))
) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; }
usleep_range(wait__, wait__ * 2); if (wait__ < ((1000))) wait__
<<= 1; } ret__; })
)
1303 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x60))
1304 "timeout setting power well state %08x (%08x)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x60))
1305 state,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x60))
1306 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x60))
;
1307
1308#undef COND
1309
1310out:
1311 vlv_punit_put(dev_priv);
1312}
1313
1314static void vlv_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1315 struct i915_power_well *power_well)
1316{
1317 vlv_set_power_well(dev_priv, power_well, true1);
1318}
1319
1320static void vlv_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1321 struct i915_power_well *power_well)
1322{
1323 vlv_set_power_well(dev_priv, power_well, false0);
1324}
1325
1326static bool_Bool vlv_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1327 struct i915_power_well *power_well)
1328{
1329 int pw_idx = power_well->desc->vlv.idx;
1330 bool_Bool enabled = false0;
1331 u32 mask;
1332 u32 state;
1333 u32 ctrl;
1334
1335 mask = PUNIT_PWRGT_MASK(pw_idx)(3 << ((pw_idx) * 2));
1336 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx)(0 << ((pw_idx) * 2));
1337
1338 vlv_punit_get(dev_priv);
1339
1340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS0x61) & mask;
1341 /*
1342 * We only ever set the power-on and power-gate states, anything
1343 * else is unexpected.
1344 */
1345 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&({ int __ret = !!((state != (0 << ((pw_idx) * 2)) &&
state != (3 << ((pw_idx) * 2)))); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "state != (0 << ((pw_idx) * 2)) && state != (3 << ((pw_idx) * 2))"
")"); __builtin_expect(!!(__ret), 0); })
1346 state != PUNIT_PWRGT_PWR_GATE(pw_idx))({ int __ret = !!((state != (0 << ((pw_idx) * 2)) &&
state != (3 << ((pw_idx) * 2)))); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "state != (0 << ((pw_idx) * 2)) && state != (3 << ((pw_idx) * 2))"
")"); __builtin_expect(!!(__ret), 0); })
;
1347 if (state == ctrl)
1348 enabled = true1;
1349
1350 /*
1351 * A transient state at this point would mean some unexpected party
1352 * is poking at the power controls too.
1353 */
1354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL0x60) & mask;
1355 drm_WARN_ON(&dev_priv->drm, ctrl != state)({ int __ret = !!((ctrl != state)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "ctrl != state" ")"); __builtin_expect(!!(__ret
), 0); })
;
1356
1357 vlv_punit_put(dev_priv);
1358
1359 return enabled;
1360}
1361
1362static void vlv_init_display_clock_gating(struct drm_i915_privateinteldrm_softc *dev_priv)
1363{
1364 u32 val;
1365
1366 /*
1367 * On driver load, a pipe may be active and driving a DSI display.
1368 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1369 * (and never recovering) in this case. intel_dsi_post_disable() will
1370 * clear it when we turn off the display.
1371 */
1372 val = intel_de_read(dev_priv, DSPCLK_GATE_D((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display_mmio_offset) + 0x6200) })
);
1373 val &= DPOUNIT_CLOCK_GATE_DISABLE(1 << 11);
1374 val |= VRHUNIT_CLOCK_GATE_DISABLE(1 << 28);
1375 intel_de_write(dev_priv, DSPCLK_GATE_D((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display_mmio_offset) + 0x6200) })
, val);
1376
1377 /*
1378 * Disable trickle feed and enable pnd deadline calculation
1379 */
1380 intel_de_write(dev_priv, MI_ARB_VLV((const i915_reg_t){ .reg = (0x180000 + 0x6504) }),
1381 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE(1 << 2));
1382 intel_de_write(dev_priv, CBR1_VLV((const i915_reg_t){ .reg = (0x180000 + 0x70400) }), 0);
1383
1384 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0)({ int __ret = !!(((&(dev_priv)->__runtime)->rawclk_freq
== 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(
((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "(&(dev_priv)->__runtime)->rawclk_freq == 0"
")"); __builtin_expect(!!(__ret), 0); })
;
1385 intel_de_write(dev_priv, RAWCLK_FREQ_VLV((const i915_reg_t){ .reg = (0x180000 + 0x6024) }),
1386 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,((((&(dev_priv)->__runtime)->rawclk_freq) + ((1000)
/ 2)) / (1000))
1387 1000)((((&(dev_priv)->__runtime)->rawclk_freq) + ((1000)
/ 2)) / (1000))
);
1388}
1389
1390static void vlv_display_power_well_init(struct drm_i915_privateinteldrm_softc *dev_priv)
1391{
1392 struct intel_encoder *encoder;
1393 enum pipe pipe;
1394
1395 /*
1396 * Enable the CRI clock source so we can get at the
1397 * display and the reference clock for VGA
1398 * hotplug / manual detection. Supposedly DSI also
1399 * needs the ref clock up and running.
1400 *
1401 * CHV DPLL B/C have some issues if VGA mode is enabled.
1402 */
1403 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__info)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1404 u32 val = intel_de_read(dev_priv, DPLL(pipe)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(pipe)]))
})
);
1405
1406 val |= DPLL_REF_CLK_ENABLE_VLV(1 << 29) | DPLL_VGA_MODE_DIS(1 << 28);
1407 if (pipe != PIPE_A)
1408 val |= DPLL_INTEGRATED_CRI_CLK_VLV(1 << 14);
1409
1410 intel_de_write(dev_priv, DPLL(pipe)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(pipe)]))
})
, val);
1411 }
1412
1413 vlv_init_display_clock_gating(dev_priv);
1414
1415 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
1416 valleyview_enable_display_irqs(dev_priv);
1417 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
1418
1419 /*
1420 * During driver initialization/resume we can avoid restoring the
1421 * part of the HW/SW state that will be inited anyway explicitly.
1422 */
1423 if (dev_priv->power_domains.initializing)
1424 return;
1425
1426 intel_hpd_init(dev_priv);
1427
1428 /* Re-enable the ADPA, if we have one */
1429 for_each_intel_encoder(&dev_priv->drm, encoder)for (encoder = ({ const __typeof( ((__typeof(*encoder) *)0)->
base.head ) *__mptr = ((&(&dev_priv->drm)->mode_config
.encoder_list)->next); (__typeof(*encoder) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*encoder), base.head) );}); &
encoder->base.head != (&(&dev_priv->drm)->mode_config
.encoder_list); encoder = ({ const __typeof( ((__typeof(*encoder
) *)0)->base.head ) *__mptr = (encoder->base.head.next)
; (__typeof(*encoder) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*encoder), base.head) );}))
{
1430 if (encoder->type == INTEL_OUTPUT_ANALOG)
1431 intel_crt_reset(&encoder->base);
1432 }
1433
1434 intel_vga_redisable_power_on(dev_priv);
1435
1436 intel_pps_unlock_regs_wa(dev_priv);
1437}
1438
1439static void vlv_display_power_well_deinit(struct drm_i915_privateinteldrm_softc *dev_priv)
1440{
1441 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
1442 valleyview_disable_display_irqs(dev_priv);
1443 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
1444
1445 /* make sure we're done processing display irqs */
1446 intel_synchronize_irq(dev_priv);
1447
1448 intel_power_sequencer_reset(dev_priv);
1449
1450 /* Prevent us from re-enabling polling on accident in late suspend */
1451#ifdef __linux__
1452 if (!dev_priv->drm.dev->power.is_suspended)
1453#else
1454 if (!cold)
1455#endif
1456 intel_hpd_poll_init(dev_priv);
1457}
1458
1459static void vlv_display_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1460 struct i915_power_well *power_well)
1461{
1462 vlv_set_power_well(dev_priv, power_well, true1);
1463
1464 vlv_display_power_well_init(dev_priv);
1465}
1466
1467static void vlv_display_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1468 struct i915_power_well *power_well)
1469{
1470 vlv_display_power_well_deinit(dev_priv);
1471
1472 vlv_set_power_well(dev_priv, power_well, false0);
1473}
1474
1475static void vlv_dpio_cmn_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1476 struct i915_power_well *power_well)
1477{
1478 /* since ref/cri clock was enabled */
1479 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1480
1481 vlv_set_power_well(dev_priv, power_well, true1);
1482
1483 /*
1484 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1485 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1486 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1487 * b. The other bits such as sfr settings / modesel may all
1488 * be set to 0.
1489 *
1490 * This should only be done on init and resume from S3 with
1491 * both PLLs disabled, or we risk losing DPIO and PLL
1492 * synchronization.
1493 */
1494 intel_de_write(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) }),
1495 intel_de_read(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) })) | DPIO_CMNRST(1 << 0));
1496}
1497
1498static void vlv_dpio_cmn_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1499 struct i915_power_well *power_well)
1500{
1501 enum pipe pipe;
1502
1503 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__info)->pipe_mask & (1UL <<
(pipe)))) {} else
1504 assert_pll_disabled(dev_priv, pipe)assert_pll(dev_priv, pipe, 0);
1505
1506 /* Assert common reset */
1507 intel_de_write(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) }),
1508 intel_de_read(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) })) & ~DPIO_CMNRST(1 << 0));
1509
1510 vlv_set_power_well(dev_priv, power_well, false0);
1511}
1512
1513#define POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
(GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)(((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0)))
)
1514
1515#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1516
1517static void assert_chv_phy_status(struct drm_i915_privateinteldrm_softc *dev_priv)
1518{
1519 struct i915_power_well *cmn_bc =
1520 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1521 struct i915_power_well *cmn_d =
1522 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1523 u32 phy_control = dev_priv->chv_phy_control;
1524 u32 phy_status = 0;
1525 u32 phy_status_mask = 0xffffffff;
1526
1527 /*
1528 * The BIOS can leave the PHY is some weird state
1529 * where it doesn't fully power down some parts.
1530 * Disable the asserts until the PHY has been fully
1531 * reset (ie. the power well has been disabled at
1532 * least once).
1533 */
1534 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1535 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0)(1 << (6 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0)))) |
1536 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + (0)))) |
1537 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + (1)))) |
1538 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1)(1 << (6 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1)))) |
1539 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + (0)))) |
1540 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + (1)))));
1541
1542 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1543 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0)(1 << (6 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0)))) |
1544 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0)(1 << (8 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + (0)))) |
1545 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)(1 << (8 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + (1)))));
1546
1547 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1548 phy_status |= PHY_POWERGOOD(DPIO_PHY0)(((DPIO_PHY0) == DPIO_PHY0) ? (1 << 31) : (1 << 30
))
;
1549
1550 /* this assumes override is only used to enable lanes */
1551 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)(1 << (2 * (DPIO_PHY0) + (DPIO_CH0) + 27))) == 0)
1552 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0)((0xf) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11));
1553
1554 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)(1 << (2 * (DPIO_PHY0) + (DPIO_CH1) + 27))) == 0)
1555 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)((0xf) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11));
1556
1557 /* CL1 is on whenever anything is on in either channel */
1558 if (BITS_SET(phy_control,
1559 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0)((0xf) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11)) |
1560 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)((0xf) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11))))
1561 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0)(1 << (6 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0))));
1562
1563 /*
1564 * The DPLLB check accounts for the pipe B + port A usage
1565 * with CL2 powered up but all the lanes in the second channel
1566 * powered down.
1567 */
1568 if (BITS_SET(phy_control,
1569 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)((0xf) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11))) &&
1570 (intel_de_read(dev_priv, DPLL(PIPE_B)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(PIPE_B)]
)) })
) & DPLL_VCO_ENABLE(1 << 31)) == 0)
1571 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1)(1 << (6 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1))));
1572
1573 if (BITS_SET(phy_control,
1574 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)((0x3) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11))))
1575 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + (0))));
1576 if (BITS_SET(phy_control,
1577 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)((0xc) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11))))
1578 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + (1))));
1579
1580 if (BITS_SET(phy_control,
1581 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)((0x3) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11))))
1582 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + (0))));
1583 if (BITS_SET(phy_control,
1584 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)((0xc) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11))))
1585 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)(1 << (8 - (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + (1))));
1586 }
1587
1588 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1589 phy_status |= PHY_POWERGOOD(DPIO_PHY1)(((DPIO_PHY1) == DPIO_PHY0) ? (1 << 31) : (1 << 30
))
;
1590
1591 /* this assumes override is only used to enable lanes */
1592 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)(1 << (2 * (DPIO_PHY1) + (DPIO_CH0) + 27))) == 0)
1593 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)((0xf) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11));
1594
1595 if (BITS_SET(phy_control,
1596 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)((0xf) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11))))
1597 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0)(1 << (6 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0))));
1598
1599 if (BITS_SET(phy_control,
1600 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)((0x3) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11))))
1601 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0)(1 << (8 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + (0))));
1602 if (BITS_SET(phy_control,
1603 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)((0xc) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11))))
1604 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)(1 << (8 - (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + (1))));
1605 }
1606
1607 phy_status &= phy_status_mask;
1608
1609 /*
1610 * The PHY may be busy with some initial calibration and whatnot,
1611 * so the power state can take a while to actually change.
1612 */
1613 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS((const i915_reg_t){ .reg = (0x180000 + 0x60104) }),
1614 phy_status_mask, phy_status, 10))
1615 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x180000 + 0x60104) }
)) & phy_status_mask, phy_status, dev_priv->chv_phy_control
)
1616 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x180000 + 0x60104) }
)) & phy_status_mask, phy_status, dev_priv->chv_phy_control
)
1617 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x180000 + 0x60104) }
)) & phy_status_mask, phy_status, dev_priv->chv_phy_control
)
1618 phy_status, dev_priv->chv_phy_control)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x180000 + 0x60104) }
)) & phy_status_mask, phy_status, dev_priv->chv_phy_control
)
;
1619}
1620
1621#undef BITS_SET
1622
1623static void chv_dpio_cmn_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1624 struct i915_power_well *power_well)
1625{
1626 enum dpio_phy phy;
1627 enum pipe pipe;
1628 u32 tmp;
1629
1630 drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1631 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1632 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D)({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
1633
1634 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1635 pipe = PIPE_A;
1636 phy = DPIO_PHY0;
1637 } else {
1638 pipe = PIPE_C;
1639 phy = DPIO_PHY1;
1640 }
1641
1642 /* since ref/cri clock was enabled */
1643 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1644 vlv_set_power_well(dev_priv, power_well, true1);
1645
1646 /* Poll for phypwrgood signal */
1647 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS((const i915_reg_t){ .reg = (0x180000 + 0x60104) }),
1648 PHY_POWERGOOD(phy)(((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30)), 1))
1649 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Display PHY %d is not power up\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , phy)
1650 phy)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Display PHY %d is not power up\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , phy)
;
1651
1652 vlv_dpio_get(dev_priv);
1653
1654 /* Enable dynamic power down */
1655 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW280x8170);
1656 tmp |= DPIO_DYNPWRDOWNEN_CH0(1 << 22) | DPIO_CL1POWERDOWNEN(1 << 23) |
1657 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ(3 << 0);
1658 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW280x8170, tmp);
1659
1660 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1661 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH10x8098);
1662 tmp |= DPIO_DYNPWRDOWNEN_CH1(1 << 28);
1663 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH10x8098, tmp);
1664 } else {
1665 /*
1666 * Force the non-existing CL2 off. BXT does this
1667 * too, so maybe it saves some power even though
1668 * CL2 doesn't exist?
1669 */
1670 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW300x8178);
1671 tmp |= DPIO_CL2_LDOFUSE_PWRENB(1 << 6);
1672 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW300x8178, tmp);
1673 }
1674
1675 vlv_dpio_put(dev_priv);
1676
1677 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy)(1 << (phy));
1678 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL((const i915_reg_t){ .reg = (0x180000 + 0x60100) }),
1679 dev_priv->chv_phy_control);
1680
1681 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
1682 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
1683 phy, dev_priv->chv_phy_control)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
;
1684
1685 assert_chv_phy_status(dev_priv);
1686}
1687
1688static void chv_dpio_cmn_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1689 struct i915_power_well *power_well)
1690{
1691 enum dpio_phy phy;
1692
1693 drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1694 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1695 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D)({ static int __warned; int __ret = !!((power_well->desc->
id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->
id != CHV_DISP_PW_DPIO_CMN_D)); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
1696
1697 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1698 phy = DPIO_PHY0;
1699 assert_pll_disabled(dev_priv, PIPE_A)assert_pll(dev_priv, PIPE_A, 0);
1700 assert_pll_disabled(dev_priv, PIPE_B)assert_pll(dev_priv, PIPE_B, 0);
1701 } else {
1702 phy = DPIO_PHY1;
1703 assert_pll_disabled(dev_priv, PIPE_C)assert_pll(dev_priv, PIPE_C, 0);
1704 }
1705
1706 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy)(1 << (phy));
1707 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL((const i915_reg_t){ .reg = (0x180000 + 0x60100) }),
1708 dev_priv->chv_phy_control);
1709
1710 vlv_set_power_well(dev_priv, power_well, false0);
1711
1712 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
1713 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
1714 phy, dev_priv->chv_phy_control)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n"
, phy, dev_priv->chv_phy_control)
;
1715
1716 /* PHY is fully reset now, so we can enable the PHY state asserts */
1717 dev_priv->chv_phy_assert[phy] = true1;
1718
1719 assert_chv_phy_status(dev_priv);
1720}
1721
1722static void assert_chv_phy_powergate(struct drm_i915_privateinteldrm_softc *dev_priv, enum dpio_phy phy,
1723 enum dpio_channel ch, bool_Bool override, unsigned int mask)
1724{
1725 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1726 u32 reg, val, expected, actual;
1727
1728 /*
1729 * The BIOS can leave the PHY is some weird state
1730 * where it doesn't fully power down some parts.
1731 * Disable the asserts until the PHY has been fully
1732 * reset (ie. the power well has been disabled at
1733 * least once).
1734 */
1735 if (!dev_priv->chv_phy_assert[phy])
1736 return;
1737
1738 if (ch == DPIO_CH0)
1739 reg = _CHV_CMN_DW0_CH00x8100;
1740 else
1741 reg = _CHV_CMN_DW6_CH10x8098;
1742
1743 vlv_dpio_get(dev_priv);
1744 val = vlv_dpio_read(dev_priv, pipe, reg);
1745 vlv_dpio_put(dev_priv);
1746
1747 /*
1748 * This assumes !override is only used when the port is disabled.
1749 * All lanes should power down even without the override when
1750 * the port is disabled.
1751 */
1752 if (!override || mask == 0xf) {
1753 expected = DPIO_ALLDL_POWERDOWN(1 << 1) | DPIO_ANYDL_POWERDOWN(1 << 0);
1754 /*
1755 * If CH1 common lane is not active anymore
1756 * (eg. for pipe B DPLL) the entire channel will
1757 * shut down, which causes the common lane registers
1758 * to read as 0. That means we can't actually check
1759 * the lane power down status bits, but as the entire
1760 * register reads as 0 it's a good indication that the
1761 * channel is indeed entirely powered down.
1762 */
1763 if (ch == DPIO_CH1 && val == 0)
1764 expected = 0;
1765 } else if (mask != 0x0) {
1766 expected = DPIO_ANYDL_POWERDOWN(1 << 0);
1767 } else {
1768 expected = 0;
1769 }
1770
1771 if (ch == DPIO_CH0)
1772 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH018;
1773 else
1774 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH129;
1775 actual &= DPIO_ALLDL_POWERDOWN(1 << 1) | DPIO_ANYDL_POWERDOWN(1 << 0);
1776
1777 drm_WARN(&dev_priv->drm, actual != expected,({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1778 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1779 !!(actual & DPIO_ALLDL_POWERDOWN),({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1780 !!(actual & DPIO_ANYDL_POWERDOWN),({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1781 !!(expected & DPIO_ALLDL_POWERDOWN),({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1782 !!(expected & DPIO_ANYDL_POWERDOWN),({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
1783 reg, val)({ int __ret = !!(actual != expected); if (__ret) printf("%s %s: "
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", !!(
actual & (1 << 1)), !!(actual & (1 << 0))
, !!(expected & (1 << 1)), !!(expected & (1 <<
0)), reg, val); __builtin_expect(!!(__ret), 0); })
;
1784}
1785
1786bool_Bool chv_phy_powergate_ch(struct drm_i915_privateinteldrm_softc *dev_priv, enum dpio_phy phy,
1787 enum dpio_channel ch, bool_Bool override)
1788{
1789 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1790 bool_Bool was_override;
1791
1792 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
1793
1794 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)(1 << (2 * (phy) + (ch) + 27));
1795
1796 if (override == was_override)
1797 goto out;
1798
1799 if (override)
1800 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)(1 << (2 * (phy) + (ch) + 27));
1801 else
1802 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)(1 << (2 * (phy) + (ch) + 27));
1803
1804 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL((const i915_reg_t){ .reg = (0x180000 + 0x60100) }),
1805 dev_priv->chv_phy_control);
1806
1807 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n"
, phy, ch, dev_priv->chv_phy_control)
1808 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n"
, phy, ch, dev_priv->chv_phy_control)
1809 phy, ch, dev_priv->chv_phy_control)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n"
, phy, ch, dev_priv->chv_phy_control)
;
1810
1811 assert_chv_phy_status(dev_priv);
1812
1813out:
1814 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
1815
1816 return was_override;
1817}
1818
1819void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1820 bool_Bool override, unsigned int mask)
1821{
1822 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
1823 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1824 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1825 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1826
1827 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
1828
1829 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch)((0xf) << (8 * (phy) + 4 * (ch) + 11));
1830 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)((mask) << (8 * (phy) + 4 * (ch) + 11));
1831
1832 if (override)
1833 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)(1 << (2 * (phy) + (ch) + 27));
1834 else
1835 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)(1 << (2 * (phy) + (ch) + 27));
1836
1837 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL((const i915_reg_t){ .reg = (0x180000 + 0x60100) }),
1838 dev_priv->chv_phy_control);
1839
1840 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n"
, phy, ch, mask, dev_priv->chv_phy_control)
1841 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n"
, phy, ch, mask, dev_priv->chv_phy_control)
1842 phy, ch, mask, dev_priv->chv_phy_control)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n"
, phy, ch, mask, dev_priv->chv_phy_control)
;
1843
1844 assert_chv_phy_status(dev_priv);
1845
1846 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1847
1848 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
1849}
1850
1851static bool_Bool chv_pipe_power_well_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
1852 struct i915_power_well *power_well)
1853{
1854 enum pipe pipe = PIPE_A;
1855 bool_Bool enabled;
1856 u32 state, ctrl;
1857
1858 vlv_punit_get(dev_priv);
1859
1860 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36) & DP_SSS_MASK(pipe)((0x3) << (2 * ((pipe)) + 16));
1861 /*
1862 * We only ever set the power-on and power-gate states, anything
1863 * else is unexpected.
1864 */
1865 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&({ int __ret = !!((state != ((0x0) << (2 * ((pipe)) + 16
)) && state != ((0x3) << (2 * ((pipe)) + 16))))
; if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
dev_priv->drm))->dev), "", "drm_WARN_ON(" "state != ((0x0) << (2 * ((pipe)) + 16)) && state != ((0x3) << (2 * ((pipe)) + 16))"
")"); __builtin_expect(!!(__ret), 0); })
1866 state != DP_SSS_PWR_GATE(pipe))({ int __ret = !!((state != ((0x0) << (2 * ((pipe)) + 16
)) && state != ((0x3) << (2 * ((pipe)) + 16))))
; if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
dev_priv->drm))->dev), "", "drm_WARN_ON(" "state != ((0x0) << (2 * ((pipe)) + 16)) && state != ((0x3) << (2 * ((pipe)) + 16))"
")"); __builtin_expect(!!(__ret), 0); })
;
1867 enabled = state == DP_SSS_PWR_ON(pipe)((0x0) << (2 * ((pipe)) + 16));
1868
1869 /*
1870 * A transient state at this point would mean some unexpected party
1871 * is poking at the power controls too.
1872 */
1873 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36) & DP_SSC_MASK(pipe)((0x3) << (2 * ((pipe))));
1874 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state)({ int __ret = !!((ctrl << 16 != state)); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "ctrl << 16 != state" ")"); __builtin_expect
(!!(__ret), 0); })
;
1875
1876 vlv_punit_put(dev_priv);
1877
1878 return enabled;
1879}
1880
1881static void chv_set_pipe_power_well(struct drm_i915_privateinteldrm_softc *dev_priv,
1882 struct i915_power_well *power_well,
1883 bool_Bool enable)
1884{
1885 enum pipe pipe = PIPE_A;
1886 u32 state;
1887 u32 ctrl;
1888
1889 state = enable ? DP_SSS_PWR_ON(pipe)((0x0) << (2 * ((pipe)) + 16)) : DP_SSS_PWR_GATE(pipe)((0x3) << (2 * ((pipe)) + 16));
1890
1891 vlv_punit_get(dev_priv);
1892
1893#define COND \
1894 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36) & DP_SSS_MASK(pipe)((0x3) << (2 * ((pipe)) + 16))) == state)
1895
1896 if (COND)
1897 goto out;
1898
1899 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM0x36);
1900 ctrl &= ~DP_SSC_MASK(pipe)((0x3) << (2 * ((pipe))));
1901 ctrl |= enable ? DP_SSC_PWR_ON(pipe)((0x0) << (2 * ((pipe)))) : DP_SSC_PWR_GATE(pipe)((0x3) << (2 * ((pipe))));
1902 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM0x36, ctrl);
1903
1904 if (wait_for(COND, 100)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((100) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if (((COND))
) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; }
usleep_range(wait__, wait__ * 2); if (wait__ < ((1000))) wait__
<<= 1; } ret__; })
)
1905 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x36))
1906 "timeout setting power well state %08x (%08x)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x36))
1907 state,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x36))
1908 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timeout setting power well state %08x (%08x)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , state,
vlv_punit_read(dev_priv, 0x36))
;
1909
1910#undef COND
1911
1912out:
1913 vlv_punit_put(dev_priv);
1914}
1915
1916static void chv_pipe_power_well_sync_hw(struct drm_i915_privateinteldrm_softc *dev_priv,
1917 struct i915_power_well *power_well)
1918{
1919 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL((const i915_reg_t){ .reg = (0x180000 + 0x60100) }),
1920 dev_priv->chv_phy_control);
1921}
1922
1923static void chv_pipe_power_well_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
1924 struct i915_power_well *power_well)
1925{
1926 chv_set_pipe_power_well(dev_priv, power_well, true1);
1927
1928 vlv_display_power_well_init(dev_priv);
1929}
1930
1931static void chv_pipe_power_well_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
1932 struct i915_power_well *power_well)
1933{
1934 vlv_display_power_well_deinit(dev_priv);
1935
1936 chv_set_pipe_power_well(dev_priv, power_well, false0);
1937}
1938
1939static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1940{
1941 return power_domains->async_put_domains[0] |
1942 power_domains->async_put_domains[1];
1943}
1944
1945#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0
1946
1947static bool_Bool
1948assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1949{
1950 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1951 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1952 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
1953 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &({ int __ret = !!((power_domains->async_put_domains[0] &
power_domains->async_put_domains[1])); if (__ret) printf(
"%s %s: " "%s", dev_driver_string(((&i915->drm))->dev
), "", "drm_WARN_ON(" "power_domains->async_put_domains[0] & power_domains->async_put_domains[1]"
")"); __builtin_expect(!!(__ret), 0); })
1954 power_domains->async_put_domains[1])({ int __ret = !!((power_domains->async_put_domains[0] &
power_domains->async_put_domains[1])); if (__ret) printf(
"%s %s: " "%s", dev_driver_string(((&i915->drm))->dev
), "", "drm_WARN_ON(" "power_domains->async_put_domains[0] & power_domains->async_put_domains[1]"
")"); __builtin_expect(!!(__ret), 0); })
;
1955}
1956
1957static bool_Bool
1958__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1959{
1960 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1961 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1962 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
1963 enum intel_display_power_domain domain;
1964 bool_Bool err = false0;
1965
1966 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1967 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=({ int __ret = !!((!!power_domains->async_put_wakeref != !
!__async_put_domains_mask(power_domains))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&i915->drm))->
dev), "", "drm_WARN_ON(" "!!power_domains->async_put_wakeref != !!__async_put_domains_mask(power_domains)"
")"); __builtin_expect(!!(__ret), 0); })
1968 !!__async_put_domains_mask(power_domains))({ int __ret = !!((!!power_domains->async_put_wakeref != !
!__async_put_domains_mask(power_domains))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&i915->drm))->
dev), "", "drm_WARN_ON(" "!!power_domains->async_put_wakeref != !!__async_put_domains_mask(power_domains)"
")"); __builtin_expect(!!(__ret), 0); })
;
1969
1970 for_each_power_domain(domain, __async_put_domains_mask(power_domains))for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++
) if (!((1ULL << (domain)) & (__async_put_domains_mask
(power_domains)))) {} else
1971 err |= drm_WARN_ON(&i915->drm,({ int __ret = !!((power_domains->domain_use_count[domain]
!= 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string(
((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1"
")"); __builtin_expect(!!(__ret), 0); })
1972 power_domains->domain_use_count[domain] != 1)({ int __ret = !!((power_domains->domain_use_count[domain]
!= 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string(
((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1"
")"); __builtin_expect(!!(__ret), 0); })
;
1973
1974 return !err;
1975}
1976
1977static void print_power_domains(struct i915_power_domains *power_domains,
1978 const char *prefix, u64 mask)
1979{
1980 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1981 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1982 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
1983 enum intel_display_power_domain domain;
1984
1985 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask))drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%s (%lu):\n"
, prefix, hweight64(mask))
;
1986 for_each_power_domain(domain, mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++
) if (!((1ULL << (domain)) & (mask))) {} else
1987 drm_dbg(&i915->drm, "%s use_count %d\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%s use_count %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
1988 intel_display_power_domain_str(domain),drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%s use_count %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
1989 power_domains->domain_use_count[domain])drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%s use_count %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
;
1990}
1991
1992static void
1993print_async_put_domains_state(struct i915_power_domains *power_domains)
1994{
1995 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1996 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
1997 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
1998
1999 drm_dbg(&i915->drm, "async_put_wakeref %u\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "async_put_wakeref %u\n"
, power_domains->async_put_wakeref)
2000 power_domains->async_put_wakeref)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "async_put_wakeref %u\n"
, power_domains->async_put_wakeref)
;
2001
2002 print_power_domains(power_domains, "async_put_domains[0]",
2003 power_domains->async_put_domains[0]);
2004 print_power_domains(power_domains, "async_put_domains[1]",
2005 power_domains->async_put_domains[1]);
2006}
2007
2008static void
2009verify_async_put_domains_state(struct i915_power_domains *power_domains)
2010{
2011 if (!__async_put_domains_state_ok(power_domains))
2012 print_async_put_domains_state(power_domains);
2013}
2014
2015#else
2016
2017static void
2018assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2019{
2020}
2021
2022static void
2023verify_async_put_domains_state(struct i915_power_domains *power_domains)
2024{
2025}
2026
2027#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2028
2029static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2030{
2031 assert_async_put_domain_masks_disjoint(power_domains);
2032
2033 return __async_put_domains_mask(power_domains);
2034}
2035
2036static void
2037async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2038 enum intel_display_power_domain domain)
2039{
2040 assert_async_put_domain_masks_disjoint(power_domains);
2041
2042 power_domains->async_put_domains[0] &= ~BIT_ULL(domain)(1ULL << (domain));
2043 power_domains->async_put_domains[1] &= ~BIT_ULL(domain)(1ULL << (domain));
2044}
2045
2046static bool_Bool
2047intel_display_power_grab_async_put_ref(struct drm_i915_privateinteldrm_softc *dev_priv,
2048 enum intel_display_power_domain domain)
2049{
2050 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2051 bool_Bool ret = false0;
2052
2053 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)(1ULL << (domain))))
2054 goto out_verify;
2055
2056 async_put_domains_clear_domain(power_domains, domain);
2057
2058 ret = true1;
2059
2060 if (async_put_domains_mask(power_domains))
2061 goto out_verify;
2062
2063 cancel_delayed_work(&power_domains->async_put_work);
2064 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2065 fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *(
&power_domains->async_put_wakeref); *(&power_domains
->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref
))0; __T; })
);
2066out_verify:
2067 verify_async_put_domains_state(power_domains);
2068
2069 return ret;
2070}
2071
2072static void
2073__intel_display_power_get_domain(struct drm_i915_privateinteldrm_softc *dev_priv,
2074 enum intel_display_power_domain domain)
2075{
2076 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2077 struct i915_power_well *power_well;
2078
2079 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2080 return;
2081
2082 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))for ((power_well) = (dev_priv)->power_domains.power_wells;
(power_well) - (dev_priv)->power_domains.power_wells <
(dev_priv)->power_domains.power_well_count; (power_well)++
) if (!((power_well)->desc->domains & ((1ULL <<
(domain))))) {} else
2083 intel_power_well_get(dev_priv, power_well);
2084
2085 power_domains->domain_use_count[domain]++;
2086}
2087
2088/**
2089 * intel_display_power_get - grab a power domain reference
2090 * @dev_priv: i915 device instance
2091 * @domain: power domain to reference
2092 *
2093 * This function grabs a power domain reference for @domain and ensures that the
2094 * power domain and all its parents are powered up. Therefore users should only
2095 * grab a reference to the innermost power domain they need.
2096 *
2097 * Any power domain reference obtained by this function must have a symmetric
2098 * call to intel_display_power_put() to release the reference again.
2099 */
2100intel_wakeref_t intel_display_power_get(struct drm_i915_privateinteldrm_softc *dev_priv,
2101 enum intel_display_power_domain domain)
2102{
2103 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2104 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2105
2106 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2107 __intel_display_power_get_domain(dev_priv, domain);
2108 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2109
2110 return wakeref;
2111}
2112
2113/**
2114 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2115 * @dev_priv: i915 device instance
2116 * @domain: power domain to reference
2117 *
2118 * This function grabs a power domain reference for @domain and ensures that the
2119 * power domain and all its parents are powered up. Therefore users should only
2120 * grab a reference to the innermost power domain they need.
2121 *
2122 * Any power domain reference obtained by this function must have a symmetric
2123 * call to intel_display_power_put() to release the reference again.
2124 */
2125intel_wakeref_t
2126intel_display_power_get_if_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
2127 enum intel_display_power_domain domain)
2128{
2129 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2130 intel_wakeref_t wakeref;
2131 bool_Bool is_enabled;
2132
2133 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2134 if (!wakeref)
2135 return false0;
2136
2137 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2138
2139 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2140 __intel_display_power_get_domain(dev_priv, domain);
2141 is_enabled = true1;
2142 } else {
2143 is_enabled = false0;
2144 }
2145
2146 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2147
2148 if (!is_enabled) {
2149 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2150 wakeref = 0;
2151 }
2152
2153 return wakeref;
2154}
2155
2156static void
2157__intel_display_power_put_domain(struct drm_i915_privateinteldrm_softc *dev_priv,
2158 enum intel_display_power_domain domain)
2159{
2160 struct i915_power_domains *power_domains;
2161 struct i915_power_well *power_well;
2162 const char *name = intel_display_power_domain_str(domain);
2163
2164 power_domains = &dev_priv->power_domains;
2165
2166 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],({ int __ret = !!(!power_domains->domain_use_count[domain]
); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
2167 "Use count on domain %s is already zero\n",({ int __ret = !!(!power_domains->domain_use_count[domain]
); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
2168 name)({ int __ret = !!(!power_domains->domain_use_count[domain]
); if (__ret) printf("%s %s: " "Use count on domain %s is already zero\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
;
2169 drm_WARN(&dev_priv->drm,({ int __ret = !!(async_put_domains_mask(power_domains) &
(1ULL << (domain))); if (__ret) printf("%s %s: " "Async disabling of domain %s is pending\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
2170 async_put_domains_mask(power_domains) & BIT_ULL(domain),({ int __ret = !!(async_put_domains_mask(power_domains) &
(1ULL << (domain))); if (__ret) printf("%s %s: " "Async disabling of domain %s is pending\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
2171 "Async disabling of domain %s is pending\n",({ int __ret = !!(async_put_domains_mask(power_domains) &
(1ULL << (domain))); if (__ret) printf("%s %s: " "Async disabling of domain %s is pending\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
2172 name)({ int __ret = !!(async_put_domains_mask(power_domains) &
(1ULL << (domain))); if (__ret) printf("%s %s: " "Async disabling of domain %s is pending\n"
, dev_driver_string((&dev_priv->drm)->dev), "", name
); __builtin_expect(!!(__ret), 0); })
;
2173
2174 power_domains->domain_use_count[domain]--;
2175
2176 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))for ((power_well) = (dev_priv)->power_domains.power_wells +
(dev_priv)->power_domains.power_well_count - 1; (power_well
) - (dev_priv)->power_domains.power_wells >= 0; (power_well
)--) if (!((power_well)->desc->domains & ((1ULL <<
(domain))))) {} else
2177 intel_power_well_put(dev_priv, power_well);
2178}
2179
2180static void __intel_display_power_put(struct drm_i915_privateinteldrm_softc *dev_priv,
2181 enum intel_display_power_domain domain)
2182{
2183 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2184
2185 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2186 __intel_display_power_put_domain(dev_priv, domain);
2187 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2188}
2189
2190/**
2191 * intel_display_power_put_unchecked - release an unchecked power domain reference
2192 * @dev_priv: i915 device instance
2193 * @domain: power domain to reference
2194 *
2195 * This function drops the power domain reference obtained by
2196 * intel_display_power_get() and might power down the corresponding hardware
2197 * block right away if this is the last reference.
2198 *
2199 * This function exists only for historical reasons and should be avoided in
2200 * new code, as the correctness of its use cannot be checked. Always use
2201 * intel_display_power_put() instead.
2202 */
2203void intel_display_power_put_unchecked(struct drm_i915_privateinteldrm_softc *dev_priv,
2204 enum intel_display_power_domain domain)
2205{
2206 __intel_display_power_put(dev_priv, domain);
2207 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2208}
2209
2210static void
2211queue_async_put_domains_work(struct i915_power_domains *power_domains,
2212 intel_wakeref_t wakeref)
2213{
2214 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
2215 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
2216 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
2217 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref)({ int __ret = !!((power_domains->async_put_wakeref)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->
drm))->dev), "", "drm_WARN_ON(" "power_domains->async_put_wakeref"
")"); __builtin_expect(!!(__ret), 0); })
;
2218 power_domains->async_put_wakeref = wakeref;
2219 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,({ int __ret = !!((!queue_delayed_work(system_unbound_wq, &
power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000
)))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))"
")"); __builtin_expect(!!(__ret), 0); })
2220 &power_domains->async_put_work,({ int __ret = !!((!queue_delayed_work(system_unbound_wq, &
power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000
)))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))"
")"); __builtin_expect(!!(__ret), 0); })
2221 msecs_to_jiffies(100)))({ int __ret = !!((!queue_delayed_work(system_unbound_wq, &
power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000
)))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "!queue_delayed_work(system_unbound_wq, &power_domains->async_put_work, (((uint64_t)(100)) * hz / 1000))"
")"); __builtin_expect(!!(__ret), 0); })
;
2222}
2223
2224static void
2225release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2226{
2227 struct drm_i915_privateinteldrm_softc *dev_priv =
2228 container_of(power_domains, struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
2229 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
2230 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2231 enum intel_display_power_domain domain;
2232 intel_wakeref_t wakeref;
2233
2234 /*
2235 * The caller must hold already raw wakeref, upgrade that to a proper
2236 * wakeref to make the state checker happy about the HW access during
2237 * power well disabling.
2238 */
2239 assert_rpm_raw_wakeref_held(rpm);
2240 wakeref = intel_runtime_pm_get(rpm);
2241
2242 for_each_power_domain(domain, mask)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++
) if (!((1ULL << (domain)) & (mask))) {} else
{
2243 /* Clear before put, so put's sanity check is happy. */
2244 async_put_domains_clear_domain(power_domains, domain);
2245 __intel_display_power_put_domain(dev_priv, domain);
2246 }
2247
2248 intel_runtime_pm_put(rpm, wakeref);
2249}
2250
2251static void
2252intel_display_power_put_async_work(struct work_struct *work)
2253{
2254 struct drm_i915_privateinteldrm_softc *dev_priv =
2255 container_of(work, struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
.async_put_work.work ) *__mptr = (work); (struct inteldrm_softc
*)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc
, power_domains.async_put_work.work) );})
2256 power_domains.async_put_work.work)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
.async_put_work.work ) *__mptr = (work); (struct inteldrm_softc
*)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc
, power_domains.async_put_work.work) );})
;
2257 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2258 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2259 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2260 intel_wakeref_t old_work_wakeref = 0;
2261
2262 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2263
2264 /*
2265 * Bail out if all the domain refs pending to be released were grabbed
2266 * by subsequent gets or a flush_work.
2267 */
2268 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *(
&power_domains->async_put_wakeref); *(&power_domains
->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref
))0; __T; })
;
2269 if (!old_work_wakeref)
2270 goto out_verify;
2271
2272 release_async_put_domains(power_domains,
2273 power_domains->async_put_domains[0]);
2274
2275 /* Requeue the work if more domains were async put meanwhile. */
2276 if (power_domains->async_put_domains[1]) {
2277 power_domains->async_put_domains[0] =
2278 fetch_and_zero(&power_domains->async_put_domains[1])({ typeof(*&power_domains->async_put_domains[1]) __T =
*(&power_domains->async_put_domains[1]); *(&power_domains
->async_put_domains[1]) = (typeof(*&power_domains->
async_put_domains[1]))0; __T; })
;
2279 queue_async_put_domains_work(power_domains,
2280 fetch_and_zero(&new_work_wakeref)({ typeof(*&new_work_wakeref) __T = *(&new_work_wakeref
); *(&new_work_wakeref) = (typeof(*&new_work_wakeref)
)0; __T; })
);
2281 }
2282
2283out_verify:
2284 verify_async_put_domains_state(power_domains);
2285
2286 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2287
2288 if (old_work_wakeref)
2289 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2290 if (new_work_wakeref)
2291 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2292}
2293
2294/**
2295 * intel_display_power_put_async - release a power domain reference asynchronously
2296 * @i915: i915 device instance
2297 * @domain: power domain to reference
2298 * @wakeref: wakeref acquired for the reference that is being released
2299 *
2300 * This function drops the power domain reference obtained by
2301 * intel_display_power_get*() and schedules a work to power down the
2302 * corresponding hardware block if this is the last reference.
2303 */
2304void __intel_display_power_put_async(struct drm_i915_privateinteldrm_softc *i915,
2305 enum intel_display_power_domain domain,
2306 intel_wakeref_t wakeref)
2307{
2308 struct i915_power_domains *power_domains = &i915->power_domains;
2309 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2310 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2311
2312 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2313
2314 if (power_domains->domain_use_count[domain] > 1) {
2315 __intel_display_power_put_domain(i915, domain);
2316
2317 goto out_verify;
2318 }
2319
2320 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1)({ int __ret = !!((power_domains->domain_use_count[domain]
!= 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string(
((&i915->drm))->dev), "", "drm_WARN_ON(" "power_domains->domain_use_count[domain] != 1"
")"); __builtin_expect(!!(__ret), 0); })
;
2321
2322 /* Let a pending work requeue itself or queue a new one. */
2323 if (power_domains->async_put_wakeref) {
2324 power_domains->async_put_domains[1] |= BIT_ULL(domain)(1ULL << (domain));
2325 } else {
2326 power_domains->async_put_domains[0] |= BIT_ULL(domain)(1ULL << (domain));
2327 queue_async_put_domains_work(power_domains,
2328 fetch_and_zero(&work_wakeref)({ typeof(*&work_wakeref) __T = *(&work_wakeref); *(&
work_wakeref) = (typeof(*&work_wakeref))0; __T; })
);
2329 }
2330
2331out_verify:
2332 verify_async_put_domains_state(power_domains);
2333
2334 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2335
2336 if (work_wakeref)
2337 intel_runtime_pm_put_raw(rpm, work_wakeref);
2338
2339 intel_runtime_pm_put(rpm, wakeref);
2340}
2341
2342/**
2343 * intel_display_power_flush_work - flushes the async display power disabling work
2344 * @i915: i915 device instance
2345 *
2346 * Flushes any pending work that was scheduled by a preceding
2347 * intel_display_power_put_async() call, completing the disabling of the
2348 * corresponding power domains.
2349 *
2350 * Note that the work handler function may still be running after this
2351 * function returns; to ensure that the work handler isn't running use
2352 * intel_display_power_flush_work_sync() instead.
2353 */
2354void intel_display_power_flush_work(struct drm_i915_privateinteldrm_softc *i915)
2355{
2356 struct i915_power_domains *power_domains = &i915->power_domains;
2357 intel_wakeref_t work_wakeref;
2358
2359 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
2360
2361 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref)({ typeof(*&power_domains->async_put_wakeref) __T = *(
&power_domains->async_put_wakeref); *(&power_domains
->async_put_wakeref) = (typeof(*&power_domains->async_put_wakeref
))0; __T; })
;
2362 if (!work_wakeref)
2363 goto out_verify;
2364
2365 release_async_put_domains(power_domains,
2366 async_put_domains_mask(power_domains));
2367 cancel_delayed_work(&power_domains->async_put_work);
2368
2369out_verify:
2370 verify_async_put_domains_state(power_domains);
2371
2372 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
2373
2374 if (work_wakeref)
2375 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2376}
2377
2378/**
2379 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2380 * @i915: i915 device instance
2381 *
2382 * Like intel_display_power_flush_work(), but also ensure that the work
2383 * handler function is not running any more when this function returns.
2384 */
2385static void
2386intel_display_power_flush_work_sync(struct drm_i915_privateinteldrm_softc *i915)
2387{
2388 struct i915_power_domains *power_domains = &i915->power_domains;
2389
2390 intel_display_power_flush_work(i915);
2391 cancel_delayed_work_sync(&power_domains->async_put_work);
2392
2393 verify_async_put_domains_state(power_domains);
2394
2395 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref)({ int __ret = !!((power_domains->async_put_wakeref)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->
drm))->dev), "", "drm_WARN_ON(" "power_domains->async_put_wakeref"
")"); __builtin_expect(!!(__ret), 0); })
;
2396}
2397
2398#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0
2399/**
2400 * intel_display_power_put - release a power domain reference
2401 * @dev_priv: i915 device instance
2402 * @domain: power domain to reference
2403 * @wakeref: wakeref acquired for the reference that is being released
2404 *
2405 * This function drops the power domain reference obtained by
2406 * intel_display_power_get() and might power down the corresponding hardware
2407 * block right away if this is the last reference.
2408 */
2409void intel_display_power_put(struct drm_i915_privateinteldrm_softc *dev_priv,
2410 enum intel_display_power_domain domain,
2411 intel_wakeref_t wakeref)
2412{
2413 __intel_display_power_put(dev_priv, domain);
2414 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2415}
2416#endif
2417
2418#define I830_PIPES_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2419 BIT_ULL(POWER_DOMAIN_PIPE_A)(1ULL << (POWER_DOMAIN_PIPE_A)) | \
2420 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2421 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | \
2422 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2423 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2424 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2425 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2426
2427#define VLV_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | (1ULL <<
(POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DSI
)) | (1ULL << (POWER_DOMAIN_PORT_CRT)) | (1ULL <<
(POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO)) |
(1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C
)) | (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2428 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE)(1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | \
2429 BIT_ULL(POWER_DOMAIN_PIPE_A)(1ULL << (POWER_DOMAIN_PIPE_A)) | \
2430 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2431 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | \
2432 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2434 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2435 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2436 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2437 BIT_ULL(POWER_DOMAIN_PORT_DSI)(1ULL << (POWER_DOMAIN_PORT_DSI)) | \
2438 BIT_ULL(POWER_DOMAIN_PORT_CRT)(1ULL << (POWER_DOMAIN_PORT_CRT)) | \
2439 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2440 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2441 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2442 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2443 BIT_ULL(POWER_DOMAIN_GMBUS)(1ULL << (POWER_DOMAIN_GMBUS)) | \
2444 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2445
2446#define VLV_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_CRT
)) | (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2447 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2448 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2449 BIT_ULL(POWER_DOMAIN_PORT_CRT)(1ULL << (POWER_DOMAIN_PORT_CRT)) | \
2450 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2451 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2452 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2453
2454#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2455 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2456 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2457 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2458
2459#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2460 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2461 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2462 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2463
2464#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2465 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2466 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2467 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2468
2469#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2470 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2471 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2472 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2473
2474#define CHV_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | (1ULL <<
(POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DSI)) | (1ULL << (POWER_DOMAIN_VGA)
) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUX_D
)) | (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2475 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE)(1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | \
2476 BIT_ULL(POWER_DOMAIN_PIPE_A)(1ULL << (POWER_DOMAIN_PIPE_A)) | \
2477 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2478 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2479 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | \
2480 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2481 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2482 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2483 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2486 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2487 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2488 BIT_ULL(POWER_DOMAIN_PORT_DSI)(1ULL << (POWER_DOMAIN_PORT_DSI)) | \
2489 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2490 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2491 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2492 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2493 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2494 BIT_ULL(POWER_DOMAIN_GMBUS)(1ULL << (POWER_DOMAIN_GMBUS)) | \
2495 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2496
2497#define CHV_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2498 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2499 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2500 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2501 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2502 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2503
2504#define CHV_DPIO_CMN_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2505 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2506 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2507 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2508
2509#define HSW_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_TRANSCODER_A
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_CRT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2510 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2511 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2512 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | \
2513 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2514 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2515 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2516 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2517 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2518 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2519 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2520 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2521 BIT_ULL(POWER_DOMAIN_PORT_CRT)(1ULL << (POWER_DOMAIN_PORT_CRT)) | /* DDI E */ \
2522 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2523 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2524 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2525
2526#define BDW_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_CRT)) | (1ULL << (POWER_DOMAIN_VGA)
) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2527 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2528 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2529 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2530 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2531 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2532 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2533 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2534 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2535 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2537 BIT_ULL(POWER_DOMAIN_PORT_CRT)(1ULL << (POWER_DOMAIN_PORT_CRT)) | /* DDI E */ \
2538 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2539 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2540 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2541
2542#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2543 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2544 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2545 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2546 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2547 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2548 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2549 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2550 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2551 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2552 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2553 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | \
2554 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2555 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2556 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2557 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2558 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2559 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2560#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_IO)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2561 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | \
2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_E_IO)) | \
2563 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2564#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2565 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | \
2566 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2567#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | \
2569 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2570#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2571 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | \
2572 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2573#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_INIT)))
( \
2574 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
| \
2575 BIT_ULL(POWER_DOMAIN_GT_IRQ)(1ULL << (POWER_DOMAIN_GT_IRQ)) | \
2576 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2577 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2578 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2579
2580#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2581 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2582 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2583 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2584 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2585 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2586 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2587 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2588 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2590 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2591 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2592 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2593 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2594 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2595#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2596 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
| \
2597 BIT_ULL(POWER_DOMAIN_GT_IRQ)(1ULL << (POWER_DOMAIN_GT_IRQ)) | \
2598 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2599 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2600 BIT_ULL(POWER_DOMAIN_GMBUS)(1ULL << (POWER_DOMAIN_GMBUS)) | \
2601 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2602#define BXT_DPIO_CMN_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2603 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | \
2604 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2605 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2606#define BXT_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2609 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2610 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2611 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2612
2613#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2614 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2615 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2616 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2617 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2618 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2619 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2620 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2623 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2624 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2625 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2626 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2627 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2628#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))) ( \
2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)))
2630#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))) ( \
2631 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)))
2632#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO))) ( \
2633 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)))
2634#define GLK_DPIO_CMN_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | \
2636 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2637 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2638#define GLK_DPIO_CMN_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2640 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2641 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2642#define GLK_DPIO_CMN_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2643 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2644 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2645 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2646#define GLK_DISPLAY_AUX_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_IO_A
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2647 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2648 BIT_ULL(POWER_DOMAIN_AUX_IO_A)(1ULL << (POWER_DOMAIN_AUX_IO_A)) | \
2649 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2650#define GLK_DISPLAY_AUX_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2651 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2652 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2653#define GLK_DISPLAY_AUX_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2654 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2655 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2656#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2657 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
| \
2658 BIT_ULL(POWER_DOMAIN_GT_IRQ)(1ULL << (POWER_DOMAIN_GT_IRQ)) | \
2659 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2660 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2661 BIT_ULL(POWER_DOMAIN_GMBUS)(1ULL << (POWER_DOMAIN_GMBUS)) | \
2662 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2663
2664#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2665 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2666 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2667 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2668 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2669 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2670 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2671 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2672 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2675 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | \
2676 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2677 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2678 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2679 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)) | \
2680 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2681 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2682 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2683#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2684 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | \
2685 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2686#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | \
2688 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2689#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | \
2691 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2692#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | \
2694 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2695#define CNL_DISPLAY_AUX_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_IO_A
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2696 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2697 BIT_ULL(POWER_DOMAIN_AUX_IO_A)(1ULL << (POWER_DOMAIN_AUX_IO_A)) | \
2698 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2699#define CNL_DISPLAY_AUX_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2700 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2701 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2702#define CNL_DISPLAY_AUX_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2703 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2704 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2705#define CNL_DISPLAY_AUX_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2706 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2707 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2708#define CNL_DISPLAY_AUX_F_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2709 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)) | \
2710 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2711#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_F_IO)) | \
2713 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2714#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_GT_IRQ
)) | (1ULL << (POWER_DOMAIN_MODESET)) | (1ULL << (
POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2715 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2716 BIT_ULL(POWER_DOMAIN_GT_IRQ)(1ULL << (POWER_DOMAIN_GT_IRQ)) | \
2717 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2718 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2719 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2720
2721/*
2722 * ICL PW_0/PG_0 domains (HW/DMC control):
2723 * - PCI
2724 * - clocks except port PLL
2725 * - central power except FBC
2726 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2727 * ICL PW_1/PG_1 domains (HW/DMC control):
2728 * - DBUF function
2729 * - PIPE_A and its planes, except VGA
2730 * - transcoder EDP + PSR
2731 * - transcoder DSI
2732 * - DDI_A
2733 * - FBC
2734 */
2735#define ICL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2736 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2737 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2738 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2739 /* VDSC/joining */
2740#define ICL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUX_D
)) | (1ULL << (POWER_DOMAIN_AUX_E)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_D_TBT)) | (1ULL << (POWER_DOMAIN_AUX_E_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT)) | (1ULL <<
(POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO)) |
(1ULL << (POWER_DOMAIN_INIT)))
( \
2741 ICL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2742 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2743 BIT_ULL(POWER_DOMAIN_TRANSCODER_A)(1ULL << (POWER_DOMAIN_TRANSCODER_A)) | \
2744 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2745 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2746 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2747 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | \
2748 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | \
2749 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2750 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | \
2751 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | \
2752 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2753 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2754 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2755 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)) | \
2756 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)) | \
2757 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)(1ULL << (POWER_DOMAIN_AUX_C_TBT)) | \
2758 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)(1ULL << (POWER_DOMAIN_AUX_D_TBT)) | \
2759 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)(1ULL << (POWER_DOMAIN_AUX_E_TBT)) | \
2760 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)(1ULL << (POWER_DOMAIN_AUX_F_TBT)) | \
2761 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2762 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2763 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2764 /*
2765 * - transcoder WD
2766 * - KVMR (HW control)
2767 */
2768#define ICL_PW_2_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2769 ICL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUX_D
)) | (1ULL << (POWER_DOMAIN_AUX_E)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_D_TBT)) | (1ULL << (POWER_DOMAIN_AUX_E_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT)) | (1ULL <<
(POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO)) |
(1ULL << (POWER_DOMAIN_INIT)))
| \
2770 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2)(1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2)) | \
2771 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2772 /*
2773 * - KVMR (HW control)
2774 */
2775#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_DPLL_DC_OFF
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2776 ICL_PW_2_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2777 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2778 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2779 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF)(1ULL << (POWER_DOMAIN_DPLL_DC_OFF)) | \
2780 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2781
2782#define ICL_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))) ( \
2783 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)))
2784#define ICL_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))) ( \
2785 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)))
2786#define ICL_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO))) ( \
2787 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)))
2788#define ICL_DDI_IO_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO))) ( \
2789 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)))
2790#define ICL_DDI_IO_E_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_E_IO))) ( \
2791 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_E_IO)))
2792#define ICL_DDI_IO_F_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO))) ( \
2793 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_F_IO)))
2794
2795#define ICL_AUX_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_IO_A)) | (1ULL << (POWER_DOMAIN_AUX_A
)))
( \
2796 BIT_ULL(POWER_DOMAIN_AUX_IO_A)(1ULL << (POWER_DOMAIN_AUX_IO_A)) | \
2797 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)))
2798#define ICL_AUX_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B))) ( \
2799 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)))
2800#define ICL_AUX_C_TC1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C))) ( \
2801 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)))
2802#define ICL_AUX_D_TC2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D))) ( \
2803 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)))
2804#define ICL_AUX_E_TC3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E))) ( \
2805 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)))
2806#define ICL_AUX_F_TC4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F))) ( \
2807 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)))
2808#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C_TBT))) ( \
2809 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)(1ULL << (POWER_DOMAIN_AUX_C_TBT)))
2810#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D_TBT))) ( \
2811 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)(1ULL << (POWER_DOMAIN_AUX_D_TBT)))
2812#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E_TBT))) ( \
2813 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)(1ULL << (POWER_DOMAIN_AUX_E_TBT)))
2814#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F_TBT))) ( \
2815 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)(1ULL << (POWER_DOMAIN_AUX_F_TBT)))
2816
2817#define TGL_PW_5_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT)))
( \
2818 BIT_ULL(POWER_DOMAIN_PIPE_D)(1ULL << (POWER_DOMAIN_PIPE_D)) | \
2819 BIT_ULL(POWER_DOMAIN_TRANSCODER_D)(1ULL << (POWER_DOMAIN_TRANSCODER_D)) | \
2820 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | \
2821 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2822
2823#define TGL_PW_4_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2824 TGL_PW_5_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT)))
| \
2825 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2826 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2827 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2828 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2829
2830#define TGL_PW_3_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2831 TGL_PW_4_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
)))
| \
2832 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2833 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2834 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2835 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2836 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | \
2837 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | \
2838 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | \
2839 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_H_LANES)) | \
2840 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES)) | \
2841 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2842 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)) | \
2843 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)) | \
2844 BIT_ULL(POWER_DOMAIN_AUX_G)(1ULL << (POWER_DOMAIN_AUX_G)) | \
2845 BIT_ULL(POWER_DOMAIN_AUX_H)(1ULL << (POWER_DOMAIN_AUX_H)) | \
2846 BIT_ULL(POWER_DOMAIN_AUX_I)(1ULL << (POWER_DOMAIN_AUX_I)) | \
2847 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)(1ULL << (POWER_DOMAIN_AUX_D_TBT)) | \
2848 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)(1ULL << (POWER_DOMAIN_AUX_E_TBT)) | \
2849 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)(1ULL << (POWER_DOMAIN_AUX_F_TBT)) | \
2850 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)(1ULL << (POWER_DOMAIN_AUX_G_TBT)) | \
2851 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)(1ULL << (POWER_DOMAIN_AUX_H_TBT)) | \
2852 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)(1ULL << (POWER_DOMAIN_AUX_I_TBT)) | \
2853 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2854 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2855 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2856
2857#define TGL_PW_2_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2858 TGL_PW_3_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2859 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2)(1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2)) | \
2860 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2861
2862#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
( \
2863 TGL_PW_3_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2864 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2865 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2866 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2867 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)) | \
2868 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2869
2870#define TGL_DDI_IO_D_TC1_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO))) ( \
2871 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)))
2872#define TGL_DDI_IO_E_TC2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_E_IO))) ( \
2873 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_E_IO)))
2874#define TGL_DDI_IO_F_TC3_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO))) ( \
2875 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_F_IO)))
2876#define TGL_DDI_IO_G_TC4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_G_IO))) ( \
2877 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_G_IO)))
2878#define TGL_DDI_IO_H_TC5_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_H_IO))) ( \
2879 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_H_IO)))
2880#define TGL_DDI_IO_I_TC6_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_I_IO))) ( \
2881 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)(1ULL << (POWER_DOMAIN_PORT_DDI_I_IO)))
2882
2883#define TGL_AUX_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_IO_A)) | (1ULL << (POWER_DOMAIN_AUX_A
)))
( \
2884 BIT_ULL(POWER_DOMAIN_AUX_IO_A)(1ULL << (POWER_DOMAIN_AUX_IO_A)) | \
2885 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)))
2886#define TGL_AUX_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B))) ( \
2887 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)))
2888#define TGL_AUX_C_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C))) ( \
2889 BIT_ULL(POWER_DOMAIN_AUX_C)(1ULL << (POWER_DOMAIN_AUX_C)))
2890#define TGL_AUX_D_TC1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D))) ( \
2891 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)))
2892#define TGL_AUX_E_TC2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E))) ( \
2893 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)))
2894#define TGL_AUX_F_TC3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F))) ( \
2895 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)))
2896#define TGL_AUX_G_TC4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_G))) ( \
2897 BIT_ULL(POWER_DOMAIN_AUX_G)(1ULL << (POWER_DOMAIN_AUX_G)))
2898#define TGL_AUX_H_TC5_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_H))) ( \
2899 BIT_ULL(POWER_DOMAIN_AUX_H)(1ULL << (POWER_DOMAIN_AUX_H)))
2900#define TGL_AUX_I_TC6_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_I))) ( \
2901 BIT_ULL(POWER_DOMAIN_AUX_I)(1ULL << (POWER_DOMAIN_AUX_I)))
2902#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D_TBT))) ( \
2903 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)(1ULL << (POWER_DOMAIN_AUX_D_TBT)))
2904#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E_TBT))) ( \
2905 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)(1ULL << (POWER_DOMAIN_AUX_E_TBT)))
2906#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F_TBT))) ( \
2907 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)(1ULL << (POWER_DOMAIN_AUX_F_TBT)))
2908#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_G_TBT))) ( \
2909 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)(1ULL << (POWER_DOMAIN_AUX_G_TBT)))
2910#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_H_TBT))) ( \
2911 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)(1ULL << (POWER_DOMAIN_AUX_H_TBT)))
2912#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_I_TBT))) ( \
2913 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)(1ULL << (POWER_DOMAIN_AUX_I_TBT)))
2914
2915#define TGL_TC_COLD_OFF_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_TC_COLD_OFF)))
( \
2916 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2917 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)) | \
2918 BIT_ULL(POWER_DOMAIN_AUX_F)(1ULL << (POWER_DOMAIN_AUX_F)) | \
2919 BIT_ULL(POWER_DOMAIN_AUX_G)(1ULL << (POWER_DOMAIN_AUX_G)) | \
2920 BIT_ULL(POWER_DOMAIN_AUX_H)(1ULL << (POWER_DOMAIN_AUX_H)) | \
2921 BIT_ULL(POWER_DOMAIN_AUX_I)(1ULL << (POWER_DOMAIN_AUX_I)) | \
2922 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)(1ULL << (POWER_DOMAIN_AUX_D_TBT)) | \
2923 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)(1ULL << (POWER_DOMAIN_AUX_E_TBT)) | \
2924 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)(1ULL << (POWER_DOMAIN_AUX_F_TBT)) | \
2925 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)(1ULL << (POWER_DOMAIN_AUX_G_TBT)) | \
2926 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)(1ULL << (POWER_DOMAIN_AUX_H_TBT)) | \
2927 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)(1ULL << (POWER_DOMAIN_AUX_I_TBT)) | \
2928 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)(1ULL << (POWER_DOMAIN_TC_COLD_OFF)))
2929
2930#define RKL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT)))
( \
2931 BIT_ULL(POWER_DOMAIN_PIPE_C)(1ULL << (POWER_DOMAIN_PIPE_C)) | \
2932 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | \
2933 BIT_ULL(POWER_DOMAIN_TRANSCODER_C)(1ULL << (POWER_DOMAIN_TRANSCODER_C)) | \
2934 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2935
2936#define RKL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B)
) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2937 RKL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT)))
| \
2938 BIT_ULL(POWER_DOMAIN_PIPE_B)(1ULL << (POWER_DOMAIN_PIPE_B)) | \
2939 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER)(1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | \
2940 BIT_ULL(POWER_DOMAIN_AUDIO)(1ULL << (POWER_DOMAIN_AUDIO)) | \
2941 BIT_ULL(POWER_DOMAIN_VGA)(1ULL << (POWER_DOMAIN_VGA)) | \
2942 BIT_ULL(POWER_DOMAIN_TRANSCODER_B)(1ULL << (POWER_DOMAIN_TRANSCODER_B)) | \
2943 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | \
2944 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES)(1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | \
2945 BIT_ULL(POWER_DOMAIN_AUX_D)(1ULL << (POWER_DOMAIN_AUX_D)) | \
2946 BIT_ULL(POWER_DOMAIN_AUX_E)(1ULL << (POWER_DOMAIN_AUX_E)) | \
2947 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2948
2949/*
2950 * There is no PW_2/PG_2 on RKL.
2951 *
2952 * RKL PW_1/PG_1 domains (under HW/DMC control):
2953 * - DBUF function (note: registers are in PW0)
2954 * - PIPE_A and its planes and VDSC/joining, except VGA
2955 * - transcoder A
2956 * - DDI_A and DDI_B
2957 * - FBC
2958 *
2959 * RKL PW_0/PG_0 domains (under HW/DMC control):
2960 * - PCI
2961 * - clocks except port PLL
2962 * - shared functions:
2963 * * interrupts except pipe interrupts
2964 * * MBus except PIPE_MBUS_DBOX_CTL
2965 * * DBUF registers
2966 * - central power except FBC
2967 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
2968 */
2969
2970#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_INIT)))
( \
2971 RKL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B)
) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_INIT)))
| \
2972 BIT_ULL(POWER_DOMAIN_MODESET)(1ULL << (POWER_DOMAIN_MODESET)) | \
2973 BIT_ULL(POWER_DOMAIN_AUX_A)(1ULL << (POWER_DOMAIN_AUX_A)) | \
2974 BIT_ULL(POWER_DOMAIN_AUX_B)(1ULL << (POWER_DOMAIN_AUX_B)) | \
2975 BIT_ULL(POWER_DOMAIN_INIT)(1ULL << (POWER_DOMAIN_INIT)))
2976
2977static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2978 .sync_hw = i9xx_power_well_sync_hw_noop,
2979 .enable = i9xx_always_on_power_well_noop,
2980 .disable = i9xx_always_on_power_well_noop,
2981 .is_enabled = i9xx_always_on_power_well_enabled,
2982};
2983
2984static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2985 .sync_hw = chv_pipe_power_well_sync_hw,
2986 .enable = chv_pipe_power_well_enable,
2987 .disable = chv_pipe_power_well_disable,
2988 .is_enabled = chv_pipe_power_well_enabled,
2989};
2990
2991static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2992 .sync_hw = i9xx_power_well_sync_hw_noop,
2993 .enable = chv_dpio_cmn_power_well_enable,
2994 .disable = chv_dpio_cmn_power_well_disable,
2995 .is_enabled = vlv_power_well_enabled,
2996};
2997
2998static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2999 {
3000 .name = "always-on",
3001 .always_on = true1,
3002 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3003 .ops = &i9xx_always_on_power_well_ops,
3004 .id = DISP_PW_ID_NONE,
3005 },
3006};
3007
3008static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3009 .sync_hw = i830_pipes_power_well_sync_hw,
3010 .enable = i830_pipes_power_well_enable,
3011 .disable = i830_pipes_power_well_disable,
3012 .is_enabled = i830_pipes_power_well_enabled,
3013};
3014
3015static const struct i915_power_well_desc i830_power_wells[] = {
3016 {
3017 .name = "always-on",
3018 .always_on = true1,
3019 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3020 .ops = &i9xx_always_on_power_well_ops,
3021 .id = DISP_PW_ID_NONE,
3022 },
3023 {
3024 .name = "pipes",
3025 .domains = I830_PIPES_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3026 .ops = &i830_pipes_power_well_ops,
3027 .id = DISP_PW_ID_NONE,
3028 },
3029};
3030
3031static const struct i915_power_well_ops hsw_power_well_ops = {
3032 .sync_hw = hsw_power_well_sync_hw,
3033 .enable = hsw_power_well_enable,
3034 .disable = hsw_power_well_disable,
3035 .is_enabled = hsw_power_well_enabled,
3036};
3037
3038static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3039 .sync_hw = i9xx_power_well_sync_hw_noop,
3040 .enable = gen9_dc_off_power_well_enable,
3041 .disable = gen9_dc_off_power_well_disable,
3042 .is_enabled = gen9_dc_off_power_well_enabled,
3043};
3044
3045static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3046 .sync_hw = i9xx_power_well_sync_hw_noop,
3047 .enable = bxt_dpio_cmn_power_well_enable,
3048 .disable = bxt_dpio_cmn_power_well_disable,
3049 .is_enabled = bxt_dpio_cmn_power_well_enabled,
3050};
3051
3052static const struct i915_power_well_regs hsw_power_well_regs = {
3053 .bios = HSW_PWR_WELL_CTL1((const i915_reg_t){ .reg = (0x45400) }),
3054 .driver = HSW_PWR_WELL_CTL2((const i915_reg_t){ .reg = (0x45404) }),
3055 .kvmr = HSW_PWR_WELL_CTL3((const i915_reg_t){ .reg = (0x45408) }),
3056 .debug = HSW_PWR_WELL_CTL4((const i915_reg_t){ .reg = (0x4540C) }),
3057};
3058
3059static const struct i915_power_well_desc hsw_power_wells[] = {
3060 {
3061 .name = "always-on",
3062 .always_on = true1,
3063 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3064 .ops = &i9xx_always_on_power_well_ops,
3065 .id = DISP_PW_ID_NONE,
3066 },
3067 {
3068 .name = "display",
3069 .domains = HSW_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_TRANSCODER_A
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_CRT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3070 .ops = &hsw_power_well_ops,
3071 .id = HSW_DISP_PW_GLOBAL,
3072 {
3073 .hsw.regs = &hsw_power_well_regs,
3074 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL15,
3075 .hsw.has_vga = true1,
3076 },
3077 },
3078};
3079
3080static const struct i915_power_well_desc bdw_power_wells[] = {
3081 {
3082 .name = "always-on",
3083 .always_on = true1,
3084 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3085 .ops = &i9xx_always_on_power_well_ops,
3086 .id = DISP_PW_ID_NONE,
3087 },
3088 {
3089 .name = "display",
3090 .domains = BDW_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_CRT)) | (1ULL << (POWER_DOMAIN_VGA)
) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3091 .ops = &hsw_power_well_ops,
3092 .id = HSW_DISP_PW_GLOBAL,
3093 {
3094 .hsw.regs = &hsw_power_well_regs,
3095 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL15,
3096 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)),
3097 .hsw.has_vga = true1,
3098 },
3099 },
3100};
3101
3102static const struct i915_power_well_ops vlv_display_power_well_ops = {
3103 .sync_hw = i9xx_power_well_sync_hw_noop,
3104 .enable = vlv_display_power_well_enable,
3105 .disable = vlv_display_power_well_disable,
3106 .is_enabled = vlv_power_well_enabled,
3107};
3108
3109static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3110 .sync_hw = i9xx_power_well_sync_hw_noop,
3111 .enable = vlv_dpio_cmn_power_well_enable,
3112 .disable = vlv_dpio_cmn_power_well_disable,
3113 .is_enabled = vlv_power_well_enabled,
3114};
3115
3116static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3117 .sync_hw = i9xx_power_well_sync_hw_noop,
3118 .enable = vlv_power_well_enable,
3119 .disable = vlv_power_well_disable,
3120 .is_enabled = vlv_power_well_enabled,
3121};
3122
3123static const struct i915_power_well_desc vlv_power_wells[] = {
3124 {
3125 .name = "always-on",
3126 .always_on = true1,
3127 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3128 .ops = &i9xx_always_on_power_well_ops,
3129 .id = DISP_PW_ID_NONE,
3130 },
3131 {
3132 .name = "display",
3133 .domains = VLV_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | (1ULL <<
(POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DSI
)) | (1ULL << (POWER_DOMAIN_PORT_CRT)) | (1ULL <<
(POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO)) |
(1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C
)) | (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3134 .ops = &vlv_display_power_well_ops,
3135 .id = VLV_DISP_PW_DISP2D,
3136 {
3137 .vlv.idx = PUNIT_PWGT_IDX_DISP2D3,
3138 },
3139 },
3140 {
3141 .name = "dpio-tx-b-01",
3142 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3143 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3144 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3145 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3146 .ops = &vlv_dpio_power_well_ops,
3147 .id = DISP_PW_ID_NONE,
3148 {
3149 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_016,
3150 },
3151 },
3152 {
3153 .name = "dpio-tx-b-23",
3154 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3155 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3156 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3157 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3158 .ops = &vlv_dpio_power_well_ops,
3159 .id = DISP_PW_ID_NONE,
3160 {
3161 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_237,
3162 },
3163 },
3164 {
3165 .name = "dpio-tx-c-01",
3166 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3167 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3168 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3169 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3170 .ops = &vlv_dpio_power_well_ops,
3171 .id = DISP_PW_ID_NONE,
3172 {
3173 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_018,
3174 },
3175 },
3176 {
3177 .name = "dpio-tx-c-23",
3178 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3179 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3180 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
|
3181 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3182 .ops = &vlv_dpio_power_well_ops,
3183 .id = DISP_PW_ID_NONE,
3184 {
3185 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_239,
3186 },
3187 },
3188 {
3189 .name = "dpio-common",
3190 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_CRT
)) | (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3191 .ops = &vlv_dpio_cmn_power_well_ops,
3192 .id = VLV_DISP_PW_DPIO_CMN_BC,
3193 {
3194 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC5,
3195 },
3196 },
3197};
3198
3199static const struct i915_power_well_desc chv_power_wells[] = {
3200 {
3201 .name = "always-on",
3202 .always_on = true1,
3203 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3204 .ops = &i9xx_always_on_power_well_ops,
3205 .id = DISP_PW_ID_NONE,
3206 },
3207 {
3208 .name = "display",
3209 /*
3210 * Pipe A power well is the new disp2d well. Pipe B and C
3211 * power wells don't actually exist. Pipe A power well is
3212 * required for any pipe to work.
3213 */
3214 .domains = CHV_DISPLAY_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_DISPLAY_CORE)) | (1ULL <<
(POWER_DOMAIN_PIPE_A)) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_A_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DSI)) | (1ULL << (POWER_DOMAIN_VGA)
) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUX_D
)) | (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3215 .ops = &chv_pipe_power_well_ops,
3216 .id = DISP_PW_ID_NONE,
3217 },
3218 {
3219 .name = "dpio-common-bc",
3220 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3221 .ops = &chv_dpio_cmn_power_well_ops,
3222 .id = VLV_DISP_PW_DPIO_CMN_BC,
3223 {
3224 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC5,
3225 },
3226 },
3227 {
3228 .name = "dpio-common-d",
3229 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3230 .ops = &chv_dpio_cmn_power_well_ops,
3231 .id = CHV_DISP_PW_DPIO_CMN_D,
3232 {
3233 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D12,
3234 },
3235 },
3236};
3237
3238bool_Bool intel_display_power_well_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
3239 enum i915_power_well_id power_well_id)
3240{
3241 struct i915_power_well *power_well;
3242 bool_Bool ret;
3243
3244 power_well = lookup_power_well(dev_priv, power_well_id);
3245 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3246
3247 return ret;
3248}
3249
3250static const struct i915_power_well_desc skl_power_wells[] = {
3251 {
3252 .name = "always-on",
3253 .always_on = true1,
3254 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3255 .ops = &i9xx_always_on_power_well_ops,
3256 .id = DISP_PW_ID_NONE,
3257 },
3258 {
3259 .name = "power well 1",
3260 /* Handled by the DMC firmware */
3261 .always_on = true1,
3262 .domains = 0,
3263 .ops = &hsw_power_well_ops,
3264 .id = SKL_DISP_PW_1,
3265 {
3266 .hsw.regs = &hsw_power_well_regs,
3267 .hsw.idx = SKL_PW_CTL_IDX_PW_114,
3268 .hsw.has_fuses = true1,
3269 },
3270 },
3271 {
3272 .name = "MISC IO power well",
3273 /* Handled by the DMC firmware */
3274 .always_on = true1,
3275 .domains = 0,
3276 .ops = &hsw_power_well_ops,
3277 .id = SKL_DISP_PW_MISC_IO,
3278 {
3279 .hsw.regs = &hsw_power_well_regs,
3280 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO0,
3281 },
3282 },
3283 {
3284 .name = "DC off",
3285 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_INIT)))
,
3286 .ops = &gen9_dc_off_power_well_ops,
3287 .id = SKL_DISP_DC_OFF,
3288 },
3289 {
3290 .name = "power well 2",
3291 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3292 .ops = &hsw_power_well_ops,
3293 .id = SKL_DISP_PW_2,
3294 {
3295 .hsw.regs = &hsw_power_well_regs,
3296 .hsw.idx = SKL_PW_CTL_IDX_PW_215,
3297 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)),
3298 .hsw.has_vga = true1,
3299 .hsw.has_fuses = true1,
3300 },
3301 },
3302 {
3303 .name = "DDI A/E IO power well",
3304 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_IO)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3305 .ops = &hsw_power_well_ops,
3306 .id = DISP_PW_ID_NONE,
3307 {
3308 .hsw.regs = &hsw_power_well_regs,
3309 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E1,
3310 },
3311 },
3312 {
3313 .name = "DDI B IO power well",
3314 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3315 .ops = &hsw_power_well_ops,
3316 .id = DISP_PW_ID_NONE,
3317 {
3318 .hsw.regs = &hsw_power_well_regs,
3319 .hsw.idx = SKL_PW_CTL_IDX_DDI_B2,
3320 },
3321 },
3322 {
3323 .name = "DDI C IO power well",
3324 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3325 .ops = &hsw_power_well_ops,
3326 .id = DISP_PW_ID_NONE,
3327 {
3328 .hsw.regs = &hsw_power_well_regs,
3329 .hsw.idx = SKL_PW_CTL_IDX_DDI_C3,
3330 },
3331 },
3332 {
3333 .name = "DDI D IO power well",
3334 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3335 .ops = &hsw_power_well_ops,
3336 .id = DISP_PW_ID_NONE,
3337 {
3338 .hsw.regs = &hsw_power_well_regs,
3339 .hsw.idx = SKL_PW_CTL_IDX_DDI_D4,
3340 },
3341 },
3342};
3343
3344static const struct i915_power_well_desc bxt_power_wells[] = {
3345 {
3346 .name = "always-on",
3347 .always_on = true1,
3348 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3349 .ops = &i9xx_always_on_power_well_ops,
3350 .id = DISP_PW_ID_NONE,
3351 },
3352 {
3353 .name = "power well 1",
3354 /* Handled by the DMC firmware */
3355 .always_on = true1,
3356 .domains = 0,
3357 .ops = &hsw_power_well_ops,
3358 .id = SKL_DISP_PW_1,
3359 {
3360 .hsw.regs = &hsw_power_well_regs,
3361 .hsw.idx = SKL_PW_CTL_IDX_PW_114,
3362 .hsw.has_fuses = true1,
3363 },
3364 },
3365 {
3366 .name = "DC off",
3367 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3368 .ops = &gen9_dc_off_power_well_ops,
3369 .id = SKL_DISP_DC_OFF,
3370 },
3371 {
3372 .name = "power well 2",
3373 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3374 .ops = &hsw_power_well_ops,
3375 .id = SKL_DISP_PW_2,
3376 {
3377 .hsw.regs = &hsw_power_well_regs,
3378 .hsw.idx = SKL_PW_CTL_IDX_PW_215,
3379 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)),
3380 .hsw.has_vga = true1,
3381 .hsw.has_fuses = true1,
3382 },
3383 },
3384 {
3385 .name = "dpio-common-a",
3386 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3387 .ops = &bxt_dpio_cmn_power_well_ops,
3388 .id = BXT_DISP_PW_DPIO_CMN_A,
3389 {
3390 .bxt.phy = DPIO_PHY1,
3391 },
3392 },
3393 {
3394 .name = "dpio-common-bc",
3395 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3396 .ops = &bxt_dpio_cmn_power_well_ops,
3397 .id = VLV_DISP_PW_DPIO_CMN_BC,
3398 {
3399 .bxt.phy = DPIO_PHY0,
3400 },
3401 },
3402};
3403
3404static const struct i915_power_well_desc glk_power_wells[] = {
3405 {
3406 .name = "always-on",
3407 .always_on = true1,
3408 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3409 .ops = &i9xx_always_on_power_well_ops,
3410 .id = DISP_PW_ID_NONE,
3411 },
3412 {
3413 .name = "power well 1",
3414 /* Handled by the DMC firmware */
3415 .always_on = true1,
3416 .domains = 0,
3417 .ops = &hsw_power_well_ops,
3418 .id = SKL_DISP_PW_1,
3419 {
3420 .hsw.regs = &hsw_power_well_regs,
3421 .hsw.idx = SKL_PW_CTL_IDX_PW_114,
3422 .hsw.has_fuses = true1,
3423 },
3424 },
3425 {
3426 .name = "DC off",
3427 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_GT_IRQ)) | (1ULL << (
POWER_DOMAIN_MODESET)) | (1ULL << (POWER_DOMAIN_AUX_A))
| (1ULL << (POWER_DOMAIN_GMBUS)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3428 .ops = &gen9_dc_off_power_well_ops,
3429 .id = SKL_DISP_DC_OFF,
3430 },
3431 {
3432 .name = "power well 2",
3433 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3434 .ops = &hsw_power_well_ops,
3435 .id = SKL_DISP_PW_2,
3436 {
3437 .hsw.regs = &hsw_power_well_regs,
3438 .hsw.idx = SKL_PW_CTL_IDX_PW_215,
3439 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)),
3440 .hsw.has_vga = true1,
3441 .hsw.has_fuses = true1,
3442 },
3443 },
3444 {
3445 .name = "dpio-common-a",
3446 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3447 .ops = &bxt_dpio_cmn_power_well_ops,
3448 .id = BXT_DISP_PW_DPIO_CMN_A,
3449 {
3450 .bxt.phy = DPIO_PHY1,
3451 },
3452 },
3453 {
3454 .name = "dpio-common-b",
3455 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3456 .ops = &bxt_dpio_cmn_power_well_ops,
3457 .id = VLV_DISP_PW_DPIO_CMN_BC,
3458 {
3459 .bxt.phy = DPIO_PHY0,
3460 },
3461 },
3462 {
3463 .name = "dpio-common-c",
3464 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3465 .ops = &bxt_dpio_cmn_power_well_ops,
3466 .id = GLK_DISP_PW_DPIO_CMN_C,
3467 {
3468 .bxt.phy = DPIO_PHY2,
3469 },
3470 },
3471 {
3472 .name = "AUX A",
3473 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_IO_A
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3474 .ops = &hsw_power_well_ops,
3475 .id = DISP_PW_ID_NONE,
3476 {
3477 .hsw.regs = &hsw_power_well_regs,
3478 .hsw.idx = GLK_PW_CTL_IDX_AUX_A8,
3479 },
3480 },
3481 {
3482 .name = "AUX B",
3483 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3484 .ops = &hsw_power_well_ops,
3485 .id = DISP_PW_ID_NONE,
3486 {
3487 .hsw.regs = &hsw_power_well_regs,
3488 .hsw.idx = GLK_PW_CTL_IDX_AUX_B9,
3489 },
3490 },
3491 {
3492 .name = "AUX C",
3493 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3494 .ops = &hsw_power_well_ops,
3495 .id = DISP_PW_ID_NONE,
3496 {
3497 .hsw.regs = &hsw_power_well_regs,
3498 .hsw.idx = GLK_PW_CTL_IDX_AUX_C10,
3499 },
3500 },
3501 {
3502 .name = "DDI A IO power well",
3503 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))),
3504 .ops = &hsw_power_well_ops,
3505 .id = DISP_PW_ID_NONE,
3506 {
3507 .hsw.regs = &hsw_power_well_regs,
3508 .hsw.idx = GLK_PW_CTL_IDX_DDI_A1,
3509 },
3510 },
3511 {
3512 .name = "DDI B IO power well",
3513 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))),
3514 .ops = &hsw_power_well_ops,
3515 .id = DISP_PW_ID_NONE,
3516 {
3517 .hsw.regs = &hsw_power_well_regs,
3518 .hsw.idx = SKL_PW_CTL_IDX_DDI_B2,
3519 },
3520 },
3521 {
3522 .name = "DDI C IO power well",
3523 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO))),
3524 .ops = &hsw_power_well_ops,
3525 .id = DISP_PW_ID_NONE,
3526 {
3527 .hsw.regs = &hsw_power_well_regs,
3528 .hsw.idx = SKL_PW_CTL_IDX_DDI_C3,
3529 },
3530 },
3531};
3532
3533static const struct i915_power_well_desc cnl_power_wells[] = {
3534 {
3535 .name = "always-on",
3536 .always_on = true1,
3537 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3538 .ops = &i9xx_always_on_power_well_ops,
3539 .id = DISP_PW_ID_NONE,
3540 },
3541 {
3542 .name = "power well 1",
3543 /* Handled by the DMC firmware */
3544 .always_on = true1,
3545 .domains = 0,
3546 .ops = &hsw_power_well_ops,
3547 .id = SKL_DISP_PW_1,
3548 {
3549 .hsw.regs = &hsw_power_well_regs,
3550 .hsw.idx = SKL_PW_CTL_IDX_PW_114,
3551 .hsw.has_fuses = true1,
3552 },
3553 },
3554 {
3555 .name = "AUX A",
3556 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_IO_A
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3557 .ops = &hsw_power_well_ops,
3558 .id = DISP_PW_ID_NONE,
3559 {
3560 .hsw.regs = &hsw_power_well_regs,
3561 .hsw.idx = GLK_PW_CTL_IDX_AUX_A8,
3562 },
3563 },
3564 {
3565 .name = "AUX B",
3566 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3567 .ops = &hsw_power_well_ops,
3568 .id = DISP_PW_ID_NONE,
3569 {
3570 .hsw.regs = &hsw_power_well_regs,
3571 .hsw.idx = GLK_PW_CTL_IDX_AUX_B9,
3572 },
3573 },
3574 {
3575 .name = "AUX C",
3576 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3577 .ops = &hsw_power_well_ops,
3578 .id = DISP_PW_ID_NONE,
3579 {
3580 .hsw.regs = &hsw_power_well_regs,
3581 .hsw.idx = GLK_PW_CTL_IDX_AUX_C10,
3582 },
3583 },
3584 {
3585 .name = "AUX D",
3586 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3587 .ops = &hsw_power_well_ops,
3588 .id = DISP_PW_ID_NONE,
3589 {
3590 .hsw.regs = &hsw_power_well_regs,
3591 .hsw.idx = CNL_PW_CTL_IDX_AUX_D11,
3592 },
3593 },
3594 {
3595 .name = "DC off",
3596 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_GT_IRQ
)) | (1ULL << (POWER_DOMAIN_MODESET)) | (1ULL << (
POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3597 .ops = &gen9_dc_off_power_well_ops,
3598 .id = SKL_DISP_DC_OFF,
3599 },
3600 {
3601 .name = "power well 2",
3602 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_PIPE_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_C)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3603 .ops = &hsw_power_well_ops,
3604 .id = SKL_DISP_PW_2,
3605 {
3606 .hsw.regs = &hsw_power_well_regs,
3607 .hsw.idx = SKL_PW_CTL_IDX_PW_215,
3608 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)),
3609 .hsw.has_vga = true1,
3610 .hsw.has_fuses = true1,
3611 },
3612 },
3613 {
3614 .name = "DDI A IO power well",
3615 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3616 .ops = &hsw_power_well_ops,
3617 .id = DISP_PW_ID_NONE,
3618 {
3619 .hsw.regs = &hsw_power_well_regs,
3620 .hsw.idx = GLK_PW_CTL_IDX_DDI_A1,
3621 },
3622 },
3623 {
3624 .name = "DDI B IO power well",
3625 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3626 .ops = &hsw_power_well_ops,
3627 .id = DISP_PW_ID_NONE,
3628 {
3629 .hsw.regs = &hsw_power_well_regs,
3630 .hsw.idx = SKL_PW_CTL_IDX_DDI_B2,
3631 },
3632 },
3633 {
3634 .name = "DDI C IO power well",
3635 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3636 .ops = &hsw_power_well_ops,
3637 .id = DISP_PW_ID_NONE,
3638 {
3639 .hsw.regs = &hsw_power_well_regs,
3640 .hsw.idx = SKL_PW_CTL_IDX_DDI_C3,
3641 },
3642 },
3643 {
3644 .name = "DDI D IO power well",
3645 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3646 .ops = &hsw_power_well_ops,
3647 .id = DISP_PW_ID_NONE,
3648 {
3649 .hsw.regs = &hsw_power_well_regs,
3650 .hsw.idx = SKL_PW_CTL_IDX_DDI_D4,
3651 },
3652 },
3653 {
3654 .name = "DDI F IO power well",
3655 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
3656 .ops = &hsw_power_well_ops,
3657 .id = DISP_PW_ID_NONE,
3658 {
3659 .hsw.regs = &hsw_power_well_regs,
3660 .hsw.idx = CNL_PW_CTL_IDX_DDI_F6,
3661 },
3662 },
3663 {
3664 .name = "AUX F",
3665 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
3666 .ops = &hsw_power_well_ops,
3667 .id = DISP_PW_ID_NONE,
3668 {
3669 .hsw.regs = &hsw_power_well_regs,
3670 .hsw.idx = CNL_PW_CTL_IDX_AUX_F12,
3671 },
3672 },
3673};
3674
3675static const struct i915_power_well_ops icl_aux_power_well_ops = {
3676 .sync_hw = hsw_power_well_sync_hw,
3677 .enable = icl_aux_power_well_enable,
3678 .disable = icl_aux_power_well_disable,
3679 .is_enabled = hsw_power_well_enabled,
3680};
3681
3682static const struct i915_power_well_regs icl_aux_power_well_regs = {
3683 .bios = ICL_PWR_WELL_CTL_AUX1((const i915_reg_t){ .reg = (0x45440) }),
3684 .driver = ICL_PWR_WELL_CTL_AUX2((const i915_reg_t){ .reg = (0x45444) }),
3685 .debug = ICL_PWR_WELL_CTL_AUX4((const i915_reg_t){ .reg = (0x4544C) }),
3686};
3687
3688static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3689 .bios = ICL_PWR_WELL_CTL_DDI1((const i915_reg_t){ .reg = (0x45450) }),
3690 .driver = ICL_PWR_WELL_CTL_DDI2((const i915_reg_t){ .reg = (0x45454) }),
3691 .debug = ICL_PWR_WELL_CTL_DDI4((const i915_reg_t){ .reg = (0x4545C) }),
3692};
3693
3694static const struct i915_power_well_desc icl_power_wells[] = {
3695 {
3696 .name = "always-on",
3697 .always_on = true1,
3698 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
3699 .ops = &i9xx_always_on_power_well_ops,
3700 .id = DISP_PW_ID_NONE,
3701 },
3702 {
3703 .name = "power well 1",
3704 /* Handled by the DMC firmware */
3705 .always_on = true1,
3706 .domains = 0,
3707 .ops = &hsw_power_well_ops,
3708 .id = SKL_DISP_PW_1,
3709 {
3710 .hsw.regs = &hsw_power_well_regs,
3711 .hsw.idx = ICL_PW_CTL_IDX_PW_10,
3712 .hsw.has_fuses = true1,
3713 },
3714 },
3715 {
3716 .name = "DC off",
3717 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_DPLL_DC_OFF
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3718 .ops = &gen9_dc_off_power_well_ops,
3719 .id = SKL_DISP_DC_OFF,
3720 },
3721 {
3722 .name = "power well 2",
3723 .domains = ICL_PW_2_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_A)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_PORT_DDI_B_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL <<
(POWER_DOMAIN_AUX_B)) | (1ULL << (POWER_DOMAIN_AUX_C))
| (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3724 .ops = &hsw_power_well_ops,
3725 .id = SKL_DISP_PW_2,
3726 {
3727 .hsw.regs = &hsw_power_well_regs,
3728 .hsw.idx = ICL_PW_CTL_IDX_PW_21,
3729 .hsw.has_fuses = true1,
3730 },
3731 },
3732 {
3733 .name = "power well 3",
3734 .domains = ICL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_A)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_PORT_DDI_B_LANES)) | (1ULL << (
POWER_DOMAIN_PORT_DDI_C_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_F_LANES)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_AUX_D
)) | (1ULL << (POWER_DOMAIN_AUX_E)) | (1ULL << (POWER_DOMAIN_AUX_F
)) | (1ULL << (POWER_DOMAIN_AUX_C_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_D_TBT)) | (1ULL << (POWER_DOMAIN_AUX_E_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT)) | (1ULL <<
(POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO)) |
(1ULL << (POWER_DOMAIN_INIT)))
,
3735 .ops = &hsw_power_well_ops,
3736 .id = ICL_DISP_PW_3,
3737 {
3738 .hsw.regs = &hsw_power_well_regs,
3739 .hsw.idx = ICL_PW_CTL_IDX_PW_32,
3740 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)),
3741 .hsw.has_vga = true1,
3742 .hsw.has_fuses = true1,
3743 },
3744 },
3745 {
3746 .name = "DDI A IO",
3747 .domains = ICL_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))),
3748 .ops = &hsw_power_well_ops,
3749 .id = DISP_PW_ID_NONE,
3750 {
3751 .hsw.regs = &icl_ddi_power_well_regs,
3752 .hsw.idx = ICL_PW_CTL_IDX_DDI_A0,
3753 },
3754 },
3755 {
3756 .name = "DDI B IO",
3757 .domains = ICL_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))),
3758 .ops = &hsw_power_well_ops,
3759 .id = DISP_PW_ID_NONE,
3760 {
3761 .hsw.regs = &icl_ddi_power_well_regs,
3762 .hsw.idx = ICL_PW_CTL_IDX_DDI_B1,
3763 },
3764 },
3765 {
3766 .name = "DDI C IO",
3767 .domains = ICL_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO))),
3768 .ops = &hsw_power_well_ops,
3769 .id = DISP_PW_ID_NONE,
3770 {
3771 .hsw.regs = &icl_ddi_power_well_regs,
3772 .hsw.idx = ICL_PW_CTL_IDX_DDI_C2,
3773 },
3774 },
3775 {
3776 .name = "DDI D IO",
3777 .domains = ICL_DDI_IO_D_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO))),
3778 .ops = &hsw_power_well_ops,
3779 .id = DISP_PW_ID_NONE,
3780 {
3781 .hsw.regs = &icl_ddi_power_well_regs,
3782 .hsw.idx = ICL_PW_CTL_IDX_DDI_D3,
3783 },
3784 },
3785 {
3786 .name = "DDI E IO",
3787 .domains = ICL_DDI_IO_E_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_E_IO))),
3788 .ops = &hsw_power_well_ops,
3789 .id = DISP_PW_ID_NONE,
3790 {
3791 .hsw.regs = &icl_ddi_power_well_regs,
3792 .hsw.idx = ICL_PW_CTL_IDX_DDI_E4,
3793 },
3794 },
3795 {
3796 .name = "DDI F IO",
3797 .domains = ICL_DDI_IO_F_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO))),
3798 .ops = &hsw_power_well_ops,
3799 .id = DISP_PW_ID_NONE,
3800 {
3801 .hsw.regs = &icl_ddi_power_well_regs,
3802 .hsw.idx = ICL_PW_CTL_IDX_DDI_F5,
3803 },
3804 },
3805 {
3806 .name = "AUX A",
3807 .domains = ICL_AUX_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_IO_A)) | (1ULL << (POWER_DOMAIN_AUX_A
)))
,
3808 .ops = &icl_aux_power_well_ops,
3809 .id = DISP_PW_ID_NONE,
3810 {
3811 .hsw.regs = &icl_aux_power_well_regs,
3812 .hsw.idx = ICL_PW_CTL_IDX_AUX_A0,
3813 },
3814 },
3815 {
3816 .name = "AUX B",
3817 .domains = ICL_AUX_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B))),
3818 .ops = &icl_aux_power_well_ops,
3819 .id = DISP_PW_ID_NONE,
3820 {
3821 .hsw.regs = &icl_aux_power_well_regs,
3822 .hsw.idx = ICL_PW_CTL_IDX_AUX_B1,
3823 },
3824 },
3825 {
3826 .name = "AUX C TC1",
3827 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C))),
3828 .ops = &icl_aux_power_well_ops,
3829 .id = DISP_PW_ID_NONE,
3830 {
3831 .hsw.regs = &icl_aux_power_well_regs,
3832 .hsw.idx = ICL_PW_CTL_IDX_AUX_C2,
3833 .hsw.is_tc_tbt = false0,
3834 },
3835 },
3836 {
3837 .name = "AUX D TC2",
3838 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D))),
3839 .ops = &icl_aux_power_well_ops,
3840 .id = DISP_PW_ID_NONE,
3841 {
3842 .hsw.regs = &icl_aux_power_well_regs,
3843 .hsw.idx = ICL_PW_CTL_IDX_AUX_D3,
3844 .hsw.is_tc_tbt = false0,
3845 },
3846 },
3847 {
3848 .name = "AUX E TC3",
3849 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E))),
3850 .ops = &icl_aux_power_well_ops,
3851 .id = DISP_PW_ID_NONE,
3852 {
3853 .hsw.regs = &icl_aux_power_well_regs,
3854 .hsw.idx = ICL_PW_CTL_IDX_AUX_E4,
3855 .hsw.is_tc_tbt = false0,
3856 },
3857 },
3858 {
3859 .name = "AUX F TC4",
3860 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F))),
3861 .ops = &icl_aux_power_well_ops,
3862 .id = DISP_PW_ID_NONE,
3863 {
3864 .hsw.regs = &icl_aux_power_well_regs,
3865 .hsw.idx = ICL_PW_CTL_IDX_AUX_F5,
3866 .hsw.is_tc_tbt = false0,
3867 },
3868 },
3869 {
3870 .name = "AUX C TBT1",
3871 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C_TBT))),
3872 .ops = &icl_aux_power_well_ops,
3873 .id = DISP_PW_ID_NONE,
3874 {
3875 .hsw.regs = &icl_aux_power_well_regs,
3876 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT18,
3877 .hsw.is_tc_tbt = true1,
3878 },
3879 },
3880 {
3881 .name = "AUX D TBT2",
3882 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D_TBT))),
3883 .ops = &icl_aux_power_well_ops,
3884 .id = DISP_PW_ID_NONE,
3885 {
3886 .hsw.regs = &icl_aux_power_well_regs,
3887 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT29,
3888 .hsw.is_tc_tbt = true1,
3889 },
3890 },
3891 {
3892 .name = "AUX E TBT3",
3893 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E_TBT))),
3894 .ops = &icl_aux_power_well_ops,
3895 .id = DISP_PW_ID_NONE,
3896 {
3897 .hsw.regs = &icl_aux_power_well_regs,
3898 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT310,
3899 .hsw.is_tc_tbt = true1,
3900 },
3901 },
3902 {
3903 .name = "AUX F TBT4",
3904 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F_TBT))),
3905 .ops = &icl_aux_power_well_ops,
3906 .id = DISP_PW_ID_NONE,
3907 {
3908 .hsw.regs = &icl_aux_power_well_regs,
3909 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT411,
3910 .hsw.is_tc_tbt = true1,
3911 },
3912 },
3913 {
3914 .name = "power well 4",
3915 .domains = ICL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
3916 .ops = &hsw_power_well_ops,
3917 .id = DISP_PW_ID_NONE,
3918 {
3919 .hsw.regs = &hsw_power_well_regs,
3920 .hsw.idx = ICL_PW_CTL_IDX_PW_43,
3921 .hsw.has_fuses = true1,
3922 .hsw.irq_pipe_mask = BIT(PIPE_C)(1UL << (PIPE_C)),
3923 },
3924 },
3925};
3926
3927static void
3928tgl_tc_cold_request(struct drm_i915_privateinteldrm_softc *i915, bool_Bool block)
3929{
3930 u8 tries = 0;
3931 int ret;
3932
3933 while (1) {
3934 u32 low_val;
3935 u32 high_val = 0;
3936
3937 if (block)
3938 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ0;
3939 else
3940 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ((u32)((1UL << (0)) + 0));
3941
3942 /*
3943 * Spec states that we should timeout the request after 200us
3944 * but the function below will timeout after 500us
3945 */
3946 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD0x26, &low_val,
3947 &high_val);
3948 if (ret == 0) {
3949 if (block &&
3950 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED((u32)((1UL << (0)) + 0))))
3951 ret = -EIO5;
3952 else
3953 break;
3954 }
3955
3956 if (++tries == 3)
3957 break;
3958
3959 drm_msleep(1)mdelay(1);
3960 }
3961
3962 if (ret)
3963 drm_err(&i915->drm, "TC cold %sblock failed\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "TC cold %sblock failed\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , block ?
"" : "un")
3964 block ? "" : "un")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "TC cold %sblock failed\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , block ?
"" : "un")
;
3965 else
3966 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "TC cold %sblock succeeded\n"
, block ? "" : "un")
3967 block ? "" : "un")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "TC cold %sblock succeeded\n"
, block ? "" : "un")
;
3968}
3969
3970static void
3971tgl_tc_cold_off_power_well_enable(struct drm_i915_privateinteldrm_softc *i915,
3972 struct i915_power_well *power_well)
3973{
3974 tgl_tc_cold_request(i915, true1);
3975}
3976
3977static void
3978tgl_tc_cold_off_power_well_disable(struct drm_i915_privateinteldrm_softc *i915,
3979 struct i915_power_well *power_well)
3980{
3981 tgl_tc_cold_request(i915, false0);
3982}
3983
3984static void
3985tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_privateinteldrm_softc *i915,
3986 struct i915_power_well *power_well)
3987{
3988 if (power_well->count > 0)
3989 tgl_tc_cold_off_power_well_enable(i915, power_well);
3990 else
3991 tgl_tc_cold_off_power_well_disable(i915, power_well);
3992}
3993
3994static bool_Bool
3995tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_privateinteldrm_softc *dev_priv,
3996 struct i915_power_well *power_well)
3997{
3998 /*
3999 * Not the correctly implementation but there is no way to just read it
4000 * from PCODE, so returning count to avoid state mismatch errors
4001 */
4002 return power_well->count;
4003}
4004
4005static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4006 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4007 .enable = tgl_tc_cold_off_power_well_enable,
4008 .disable = tgl_tc_cold_off_power_well_disable,
4009 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4010};
4011
4012static const struct i915_power_well_desc tgl_power_wells[] = {
4013 {
4014 .name = "always-on",
4015 .always_on = true1,
4016 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
4017 .ops = &i9xx_always_on_power_well_ops,
4018 .id = DISP_PW_ID_NONE,
4019 },
4020 {
4021 .name = "power well 1",
4022 /* Handled by the DMC firmware */
4023 .always_on = true1,
4024 .domains = 0,
4025 .ops = &hsw_power_well_ops,
4026 .id = SKL_DISP_PW_1,
4027 {
4028 .hsw.regs = &hsw_power_well_regs,
4029 .hsw.idx = ICL_PW_CTL_IDX_PW_10,
4030 .hsw.has_fuses = true1,
4031 },
4032 },
4033 {
4034 .name = "DC off",
4035 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_AUX_C)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
4036 .ops = &gen9_dc_off_power_well_ops,
4037 .id = SKL_DISP_DC_OFF,
4038 },
4039 {
4040 .name = "power well 2",
4041 .domains = TGL_PW_2_POWER_DOMAINS( ( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL <<
(POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_TRANSCODER_VDSC_PW2
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
4042 .ops = &hsw_power_well_ops,
4043 .id = SKL_DISP_PW_2,
4044 {
4045 .hsw.regs = &hsw_power_well_regs,
4046 .hsw.idx = ICL_PW_CTL_IDX_PW_21,
4047 .hsw.has_fuses = true1,
4048 },
4049 },
4050 {
4051 .name = "power well 3",
4052 .domains = TGL_PW_3_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_D)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
))) | (1ULL << (POWER_DOMAIN_PIPE_B)) | (1ULL << (
POWER_DOMAIN_TRANSCODER_B)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_E_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_F_LANES
)) | (1ULL << (POWER_DOMAIN_PORT_DDI_G_LANES)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_H_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_I_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_VGA)) | (1ULL << (POWER_DOMAIN_AUDIO
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
4053 .ops = &hsw_power_well_ops,
4054 .id = ICL_DISP_PW_3,
4055 {
4056 .hsw.regs = &hsw_power_well_regs,
4057 .hsw.idx = ICL_PW_CTL_IDX_PW_32,
4058 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)),
4059 .hsw.has_vga = true1,
4060 .hsw.has_fuses = true1,
4061 },
4062 },
4063 {
4064 .name = "DDI A IO",
4065 .domains = ICL_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))),
4066 .ops = &hsw_power_well_ops,
4067 .id = DISP_PW_ID_NONE,
4068 {
4069 .hsw.regs = &icl_ddi_power_well_regs,
4070 .hsw.idx = ICL_PW_CTL_IDX_DDI_A0,
4071 }
4072 },
4073 {
4074 .name = "DDI B IO",
4075 .domains = ICL_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))),
4076 .ops = &hsw_power_well_ops,
4077 .id = DISP_PW_ID_NONE,
4078 {
4079 .hsw.regs = &icl_ddi_power_well_regs,
4080 .hsw.idx = ICL_PW_CTL_IDX_DDI_B1,
4081 }
4082 },
4083 {
4084 .name = "DDI C IO",
4085 .domains = ICL_DDI_IO_C_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_C_IO))),
4086 .ops = &hsw_power_well_ops,
4087 .id = DISP_PW_ID_NONE,
4088 {
4089 .hsw.regs = &icl_ddi_power_well_regs,
4090 .hsw.idx = ICL_PW_CTL_IDX_DDI_C2,
4091 }
4092 },
4093 {
4094 .name = "DDI D TC1 IO",
4095 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO))),
4096 .ops = &hsw_power_well_ops,
4097 .id = DISP_PW_ID_NONE,
4098 {
4099 .hsw.regs = &icl_ddi_power_well_regs,
4100 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC13,
4101 },
4102 },
4103 {
4104 .name = "DDI E TC2 IO",
4105 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_E_IO))),
4106 .ops = &hsw_power_well_ops,
4107 .id = DISP_PW_ID_NONE,
4108 {
4109 .hsw.regs = &icl_ddi_power_well_regs,
4110 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC24,
4111 },
4112 },
4113 {
4114 .name = "DDI F TC3 IO",
4115 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_F_IO))),
4116 .ops = &hsw_power_well_ops,
4117 .id = DISP_PW_ID_NONE,
4118 {
4119 .hsw.regs = &icl_ddi_power_well_regs,
4120 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC35,
4121 },
4122 },
4123 {
4124 .name = "DDI G TC4 IO",
4125 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_G_IO))),
4126 .ops = &hsw_power_well_ops,
4127 .id = DISP_PW_ID_NONE,
4128 {
4129 .hsw.regs = &icl_ddi_power_well_regs,
4130 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC46,
4131 },
4132 },
4133 {
4134 .name = "DDI H TC5 IO",
4135 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_H_IO))),
4136 .ops = &hsw_power_well_ops,
4137 .id = DISP_PW_ID_NONE,
4138 {
4139 .hsw.regs = &icl_ddi_power_well_regs,
4140 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC57,
4141 },
4142 },
4143 {
4144 .name = "DDI I TC6 IO",
4145 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_I_IO))),
4146 .ops = &hsw_power_well_ops,
4147 .id = DISP_PW_ID_NONE,
4148 {
4149 .hsw.regs = &icl_ddi_power_well_regs,
4150 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC68,
4151 },
4152 },
4153 {
4154 .name = "TC cold off",
4155 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_AUX_F)) | (1ULL << (POWER_DOMAIN_AUX_G
)) | (1ULL << (POWER_DOMAIN_AUX_H)) | (1ULL << (POWER_DOMAIN_AUX_I
)) | (1ULL << (POWER_DOMAIN_AUX_D_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_E_TBT)) | (1ULL << (POWER_DOMAIN_AUX_F_TBT
)) | (1ULL << (POWER_DOMAIN_AUX_G_TBT)) | (1ULL <<
(POWER_DOMAIN_AUX_H_TBT)) | (1ULL << (POWER_DOMAIN_AUX_I_TBT
)) | (1ULL << (POWER_DOMAIN_TC_COLD_OFF)))
,
4156 .ops = &tgl_tc_cold_off_ops,
4157 .id = DISP_PW_ID_NONE,
4158 },
4159 {
4160 .name = "AUX A",
4161 .domains = TGL_AUX_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_IO_A)) | (1ULL << (POWER_DOMAIN_AUX_A
)))
,
4162 .ops = &icl_aux_power_well_ops,
4163 .id = DISP_PW_ID_NONE,
4164 {
4165 .hsw.regs = &icl_aux_power_well_regs,
4166 .hsw.idx = ICL_PW_CTL_IDX_AUX_A0,
4167 },
4168 },
4169 {
4170 .name = "AUX B",
4171 .domains = TGL_AUX_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B))),
4172 .ops = &icl_aux_power_well_ops,
4173 .id = DISP_PW_ID_NONE,
4174 {
4175 .hsw.regs = &icl_aux_power_well_regs,
4176 .hsw.idx = ICL_PW_CTL_IDX_AUX_B1,
4177 },
4178 },
4179 {
4180 .name = "AUX C",
4181 .domains = TGL_AUX_C_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_C))),
4182 .ops = &icl_aux_power_well_ops,
4183 .id = DISP_PW_ID_NONE,
4184 {
4185 .hsw.regs = &icl_aux_power_well_regs,
4186 .hsw.idx = ICL_PW_CTL_IDX_AUX_C2,
4187 },
4188 },
4189 {
4190 .name = "AUX D TC1",
4191 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D))),
4192 .ops = &icl_aux_power_well_ops,
4193 .id = DISP_PW_ID_NONE,
4194 {
4195 .hsw.regs = &icl_aux_power_well_regs,
4196 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC13,
4197 .hsw.is_tc_tbt = false0,
4198 },
4199 },
4200 {
4201 .name = "AUX E TC2",
4202 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E))),
4203 .ops = &icl_aux_power_well_ops,
4204 .id = DISP_PW_ID_NONE,
4205 {
4206 .hsw.regs = &icl_aux_power_well_regs,
4207 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC24,
4208 .hsw.is_tc_tbt = false0,
4209 },
4210 },
4211 {
4212 .name = "AUX F TC3",
4213 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F))),
4214 .ops = &icl_aux_power_well_ops,
4215 .id = DISP_PW_ID_NONE,
4216 {
4217 .hsw.regs = &icl_aux_power_well_regs,
4218 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC35,
4219 .hsw.is_tc_tbt = false0,
4220 },
4221 },
4222 {
4223 .name = "AUX G TC4",
4224 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_G))),
4225 .ops = &icl_aux_power_well_ops,
4226 .id = DISP_PW_ID_NONE,
4227 {
4228 .hsw.regs = &icl_aux_power_well_regs,
4229 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC46,
4230 .hsw.is_tc_tbt = false0,
4231 },
4232 },
4233 {
4234 .name = "AUX H TC5",
4235 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_H))),
4236 .ops = &icl_aux_power_well_ops,
4237 .id = DISP_PW_ID_NONE,
4238 {
4239 .hsw.regs = &icl_aux_power_well_regs,
4240 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC57,
4241 .hsw.is_tc_tbt = false0,
4242 },
4243 },
4244 {
4245 .name = "AUX I TC6",
4246 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_I))),
4247 .ops = &icl_aux_power_well_ops,
4248 .id = DISP_PW_ID_NONE,
4249 {
4250 .hsw.regs = &icl_aux_power_well_regs,
4251 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC68,
4252 .hsw.is_tc_tbt = false0,
4253 },
4254 },
4255 {
4256 .name = "AUX D TBT1",
4257 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D_TBT))),
4258 .ops = &icl_aux_power_well_ops,
4259 .id = DISP_PW_ID_NONE,
4260 {
4261 .hsw.regs = &icl_aux_power_well_regs,
4262 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT19,
4263 .hsw.is_tc_tbt = true1,
4264 },
4265 },
4266 {
4267 .name = "AUX E TBT2",
4268 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E_TBT))),
4269 .ops = &icl_aux_power_well_ops,
4270 .id = DISP_PW_ID_NONE,
4271 {
4272 .hsw.regs = &icl_aux_power_well_regs,
4273 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT210,
4274 .hsw.is_tc_tbt = true1,
4275 },
4276 },
4277 {
4278 .name = "AUX F TBT3",
4279 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_F_TBT))),
4280 .ops = &icl_aux_power_well_ops,
4281 .id = DISP_PW_ID_NONE,
4282 {
4283 .hsw.regs = &icl_aux_power_well_regs,
4284 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT311,
4285 .hsw.is_tc_tbt = true1,
4286 },
4287 },
4288 {
4289 .name = "AUX G TBT4",
4290 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_G_TBT))),
4291 .ops = &icl_aux_power_well_ops,
4292 .id = DISP_PW_ID_NONE,
4293 {
4294 .hsw.regs = &icl_aux_power_well_regs,
4295 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT412,
4296 .hsw.is_tc_tbt = true1,
4297 },
4298 },
4299 {
4300 .name = "AUX H TBT5",
4301 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_H_TBT))),
4302 .ops = &icl_aux_power_well_ops,
4303 .id = DISP_PW_ID_NONE,
4304 {
4305 .hsw.regs = &icl_aux_power_well_regs,
4306 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT513,
4307 .hsw.is_tc_tbt = true1,
4308 },
4309 },
4310 {
4311 .name = "AUX I TBT6",
4312 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_I_TBT))),
4313 .ops = &icl_aux_power_well_ops,
4314 .id = DISP_PW_ID_NONE,
4315 {
4316 .hsw.regs = &icl_aux_power_well_regs,
4317 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT614,
4318 .hsw.is_tc_tbt = true1,
4319 },
4320 },
4321 {
4322 .name = "power well 4",
4323 .domains = TGL_PW_4_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_C
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_INIT
)))
,
4324 .ops = &hsw_power_well_ops,
4325 .id = DISP_PW_ID_NONE,
4326 {
4327 .hsw.regs = &hsw_power_well_regs,
4328 .hsw.idx = ICL_PW_CTL_IDX_PW_43,
4329 .hsw.has_fuses = true1,
4330 .hsw.irq_pipe_mask = BIT(PIPE_C)(1UL << (PIPE_C)),
4331 }
4332 },
4333 {
4334 .name = "power well 5",
4335 .domains = TGL_PW_5_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_D)) | (1ULL << (POWER_DOMAIN_TRANSCODER_D
)) | (1ULL << (POWER_DOMAIN_PIPE_D_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_INIT)))
,
4336 .ops = &hsw_power_well_ops,
4337 .id = DISP_PW_ID_NONE,
4338 {
4339 .hsw.regs = &hsw_power_well_regs,
4340 .hsw.idx = TGL_PW_CTL_IDX_PW_54,
4341 .hsw.has_fuses = true1,
4342 .hsw.irq_pipe_mask = BIT(PIPE_D)(1UL << (PIPE_D)),
4343 },
4344 },
4345};
4346
4347static const struct i915_power_well_desc rkl_power_wells[] = {
4348 {
4349 .name = "always-on",
4350 .always_on = true1,
4351 .domains = POWER_DOMAIN_MASK((((~0ULL) >> (64 - (POWER_DOMAIN_NUM - 1) - 1)) & (
(~0ULL) << (0))))
,
4352 .ops = &i9xx_always_on_power_well_ops,
4353 .id = DISP_PW_ID_NONE,
4354 },
4355 {
4356 .name = "power well 1",
4357 /* Handled by the DMC firmware */
4358 .always_on = true1,
4359 .domains = 0,
4360 .ops = &hsw_power_well_ops,
4361 .id = SKL_DISP_PW_1,
4362 {
4363 .hsw.regs = &hsw_power_well_regs,
4364 .hsw.idx = ICL_PW_CTL_IDX_PW_10,
4365 .hsw.has_fuses = true1,
4366 },
4367 },
4368 {
4369 .name = "DC off",
4370 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS( ( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (
POWER_DOMAIN_PIPE_C_PANEL_FITTER)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B
)) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_MODESET
)) | (1ULL << (POWER_DOMAIN_AUX_A)) | (1ULL << (POWER_DOMAIN_AUX_B
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
4371 .ops = &gen9_dc_off_power_well_ops,
4372 .id = SKL_DISP_DC_OFF,
4373 },
4374 {
4375 .name = "power well 3",
4376 .domains = RKL_PW_3_POWER_DOMAINS( ( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT))) | (1ULL << (POWER_DOMAIN_PIPE_B)
) | (1ULL << (POWER_DOMAIN_PIPE_B_PANEL_FITTER)) | (1ULL
<< (POWER_DOMAIN_AUDIO)) | (1ULL << (POWER_DOMAIN_VGA
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_B)) | (1ULL <<
(POWER_DOMAIN_PORT_DDI_D_LANES)) | (1ULL << (POWER_DOMAIN_PORT_DDI_E_LANES
)) | (1ULL << (POWER_DOMAIN_AUX_D)) | (1ULL << (POWER_DOMAIN_AUX_E
)) | (1ULL << (POWER_DOMAIN_INIT)))
,
4377 .ops = &hsw_power_well_ops,
4378 .id = ICL_DISP_PW_3,
4379 {
4380 .hsw.regs = &hsw_power_well_regs,
4381 .hsw.idx = ICL_PW_CTL_IDX_PW_32,
4382 .hsw.irq_pipe_mask = BIT(PIPE_B)(1UL << (PIPE_B)),
4383 .hsw.has_vga = true1,
4384 .hsw.has_fuses = true1,
4385 },
4386 },
4387 {
4388 .name = "power well 4",
4389 .domains = RKL_PW_4_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PIPE_C)) | (1ULL << (POWER_DOMAIN_PIPE_C_PANEL_FITTER
)) | (1ULL << (POWER_DOMAIN_TRANSCODER_C)) | (1ULL <<
(POWER_DOMAIN_INIT)))
,
4390 .ops = &hsw_power_well_ops,
4391 .id = DISP_PW_ID_NONE,
4392 {
4393 .hsw.regs = &hsw_power_well_regs,
4394 .hsw.idx = ICL_PW_CTL_IDX_PW_43,
4395 .hsw.has_fuses = true1,
4396 .hsw.irq_pipe_mask = BIT(PIPE_C)(1UL << (PIPE_C)),
4397 }
4398 },
4399 {
4400 .name = "DDI A IO",
4401 .domains = ICL_DDI_IO_A_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_A_IO))),
4402 .ops = &hsw_power_well_ops,
4403 .id = DISP_PW_ID_NONE,
4404 {
4405 .hsw.regs = &icl_ddi_power_well_regs,
4406 .hsw.idx = ICL_PW_CTL_IDX_DDI_A0,
4407 }
4408 },
4409 {
4410 .name = "DDI B IO",
4411 .domains = ICL_DDI_IO_B_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_B_IO))),
4412 .ops = &hsw_power_well_ops,
4413 .id = DISP_PW_ID_NONE,
4414 {
4415 .hsw.regs = &icl_ddi_power_well_regs,
4416 .hsw.idx = ICL_PW_CTL_IDX_DDI_B1,
4417 }
4418 },
4419 {
4420 .name = "DDI D TC1 IO",
4421 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_D_IO))),
4422 .ops = &hsw_power_well_ops,
4423 .id = DISP_PW_ID_NONE,
4424 {
4425 .hsw.regs = &icl_ddi_power_well_regs,
4426 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC13,
4427 },
4428 },
4429 {
4430 .name = "DDI E TC2 IO",
4431 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_PORT_DDI_E_IO))),
4432 .ops = &hsw_power_well_ops,
4433 .id = DISP_PW_ID_NONE,
4434 {
4435 .hsw.regs = &icl_ddi_power_well_regs,
4436 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC24,
4437 },
4438 },
4439 {
4440 .name = "AUX A",
4441 .domains = ICL_AUX_A_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_IO_A)) | (1ULL << (POWER_DOMAIN_AUX_A
)))
,
4442 .ops = &icl_aux_power_well_ops,
4443 .id = DISP_PW_ID_NONE,
4444 {
4445 .hsw.regs = &icl_aux_power_well_regs,
4446 .hsw.idx = ICL_PW_CTL_IDX_AUX_A0,
4447 },
4448 },
4449 {
4450 .name = "AUX B",
4451 .domains = ICL_AUX_B_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_B))),
4452 .ops = &icl_aux_power_well_ops,
4453 .id = DISP_PW_ID_NONE,
4454 {
4455 .hsw.regs = &icl_aux_power_well_regs,
4456 .hsw.idx = ICL_PW_CTL_IDX_AUX_B1,
4457 },
4458 },
4459 {
4460 .name = "AUX D TC1",
4461 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_D))),
4462 .ops = &icl_aux_power_well_ops,
4463 .id = DISP_PW_ID_NONE,
4464 {
4465 .hsw.regs = &icl_aux_power_well_regs,
4466 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC13,
4467 },
4468 },
4469 {
4470 .name = "AUX E TC2",
4471 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS( (1ULL << (POWER_DOMAIN_AUX_E))),
4472 .ops = &icl_aux_power_well_ops,
4473 .id = DISP_PW_ID_NONE,
4474 {
4475 .hsw.regs = &icl_aux_power_well_regs,
4476 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC24,
4477 },
4478 },
4479};
4480
4481static int
4482sanitize_disable_power_well_option(const struct drm_i915_privateinteldrm_softc *dev_priv,
4483 int disable_power_well)
4484{
4485 if (disable_power_well >= 0)
4486 return !!disable_power_well;
4487
4488 return 1;
4489}
4490
4491static u32 get_allowed_dc_mask(const struct drm_i915_privateinteldrm_softc *dev_priv,
4492 int enable_dc)
4493{
4494 u32 mask;
4495 int requested_dc;
4496 int max_dc;
4497
4498 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) {
4499 max_dc = 4;
4500 /*
4501 * DC9 has a separate HW flow from the rest of the DC states,
4502 * not depending on the DMC firmware. It's needed by system
4503 * suspend/resume, so allow it unconditionally.
4504 */
4505 mask = DC_STATE_EN_DC9(1 << 3);
4506 } else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) {
4507 max_dc = 2;
4508 mask = DC_STATE_EN_DC9(1 << 3);
4509 } else if (IS_GEN(dev_priv, 10)(0 + (&(dev_priv)->__info)->gen == (10)) || IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
!((&(dev_priv)->__info)->is_lp))
) {
4510 max_dc = 2;
4511 mask = 0;
4512 } else if (IS_GEN9_LP(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
((&(dev_priv)->__info)->is_lp))
) {
4513 max_dc = 1;
4514 mask = DC_STATE_EN_DC9(1 << 3);
4515 } else {
4516 max_dc = 0;
4517 mask = 0;
4518 }
4519
4520 if (!dev_priv->params.disable_power_well)
4521 max_dc = 0;
4522
4523 if (enable_dc >= 0 && enable_dc <= max_dc) {
4524 requested_dc = enable_dc;
4525 } else if (enable_dc == -1) {
4526 requested_dc = max_dc;
4527 } else if (enable_dc > max_dc && enable_dc <= 4) {
4528 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n"
, enable_dc, max_dc)
4529 "Adjusting requested max DC state (%d->%d)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n"
, enable_dc, max_dc)
4530 enable_dc, max_dc)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Adjusting requested max DC state (%d->%d)\n"
, enable_dc, max_dc)
;
4531 requested_dc = max_dc;
4532 } else {
4533 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected value for enable_dc (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , enable_dc
)
4534 "Unexpected value for enable_dc (%d)\n", enable_dc)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected value for enable_dc (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , enable_dc
)
;
4535 requested_dc = max_dc;
4536 }
4537
4538 switch (requested_dc) {
4539 case 4:
4540 mask |= DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)) | DC_STATE_EN_UPTO_DC6(2 << 0);
4541 break;
4542 case 3:
4543 mask |= DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)) | DC_STATE_EN_UPTO_DC5(1 << 0);
4544 break;
4545 case 2:
4546 mask |= DC_STATE_EN_UPTO_DC6(2 << 0);
4547 break;
4548 case 1:
4549 mask |= DC_STATE_EN_UPTO_DC5(1 << 0);
4550 break;
4551 }
4552
4553 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Allowed DC state mask %02x\n"
, mask)
;
4554
4555 return mask;
4556}
4557
4558static int
4559__set_power_wells(struct i915_power_domains *power_domains,
4560 const struct i915_power_well_desc *power_well_descs,
4561 int power_well_count)
4562{
4563 struct drm_i915_privateinteldrm_softc *i915 = container_of(power_domains,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
4564 struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
4565 power_domains)({ const __typeof( ((struct inteldrm_softc *)0)->power_domains
) *__mptr = (power_domains); (struct inteldrm_softc *)( (char
*)__mptr - __builtin_offsetof(struct inteldrm_softc, power_domains
) );})
;
4566 u64 power_well_ids = 0;
4567 int i;
4568
4569 power_domains->power_well_count = power_well_count;
4570 power_domains->power_wells =
4571 kcalloc(power_well_count,
4572 sizeof(*power_domains->power_wells),
4573 GFP_KERNEL(0x0001 | 0x0004));
4574 if (!power_domains->power_wells)
4575 return -ENOMEM12;
4576
4577 for (i = 0; i < power_well_count; i++) {
4578 enum i915_power_well_id id = power_well_descs[i].id;
4579
4580 power_domains->power_wells[i].desc = &power_well_descs[i];
4581
4582 if (id == DISP_PW_ID_NONE)
4583 continue;
4584
4585 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8)({ int __ret = !!((id >= sizeof(power_well_ids) * 8)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->
drm))->dev), "", "drm_WARN_ON(" "id >= sizeof(power_well_ids) * 8"
")"); __builtin_expect(!!(__ret), 0); })
;
4586 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id))({ int __ret = !!((power_well_ids & (1ULL << (id)))
); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "power_well_ids & (1ULL << (id))"
")"); __builtin_expect(!!(__ret), 0); })
;
4587 power_well_ids |= BIT_ULL(id)(1ULL << (id));
4588 }
4589
4590 return 0;
4591}
4592
4593#define set_power_wells(power_domains, __power_well_descs)__set_power_wells(power_domains, __power_well_descs, (sizeof(
(__power_well_descs)) / sizeof((__power_well_descs)[0])))
\
4594 __set_power_wells(power_domains, __power_well_descs, \
4595 ARRAY_SIZE(__power_well_descs)(sizeof((__power_well_descs)) / sizeof((__power_well_descs)[0
]))
)
4596
4597/**
4598 * intel_power_domains_init - initializes the power domain structures
4599 * @dev_priv: i915 device instance
4600 *
4601 * Initializes the power domain structures for @dev_priv depending upon the
4602 * supported platform.
4603 */
4604int intel_power_domains_init(struct drm_i915_privateinteldrm_softc *dev_priv)
4605{
4606 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4607 int err;
4608
4609 dev_priv->params.disable_power_well =
4610 sanitize_disable_power_well_option(dev_priv,
4611 dev_priv->params.disable_power_well);
4612 dev_priv->csr.allowed_dc_mask =
4613 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
4614
4615 dev_priv->csr.target_dc_state =
4616 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6(2 << 0));
4617
4618 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64)extern char _ctassert[(!(POWER_DOMAIN_NUM > 64)) ? 1 : -1 ]
__attribute__((__unused__))
;
4619
4620 rw_init(&power_domains->lock, "ipdl")_rw_init_flags(&power_domains->lock, "ipdl", 0, ((void
*)0))
;
4621
4622 INIT_DELAYED_WORK(&power_domains->async_put_work,
4623 intel_display_power_put_async_work);
4624
4625 /*
4626 * The enabling order will be from lower to higher indexed wells,
4627 * the disabling order is reversed.
4628 */
4629 if (IS_ROCKETLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)) {
4630 err = set_power_wells(power_domains, rkl_power_wells)__set_power_wells(power_domains, rkl_power_wells, (sizeof((rkl_power_wells
)) / sizeof((rkl_power_wells)[0])))
;
4631 } else if (IS_GEN(dev_priv, 12)(0 + (&(dev_priv)->__info)->gen == (12))) {
4632 err = set_power_wells(power_domains, tgl_power_wells)__set_power_wells(power_domains, tgl_power_wells, (sizeof((tgl_power_wells
)) / sizeof((tgl_power_wells)[0])))
;
4633 } else if (IS_GEN(dev_priv, 11)(0 + (&(dev_priv)->__info)->gen == (11))) {
4634 err = set_power_wells(power_domains, icl_power_wells)__set_power_wells(power_domains, icl_power_wells, (sizeof((icl_power_wells
)) / sizeof((icl_power_wells)[0])))
;
4635 } else if (IS_CANNONLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)) {
4636 err = set_power_wells(power_domains, cnl_power_wells)__set_power_wells(power_domains, cnl_power_wells, (sizeof((cnl_power_wells
)) / sizeof((cnl_power_wells)[0])))
;
4637
4638 /*
4639 * DDI and Aux IO are getting enabled for all ports
4640 * regardless the presence or use. So, in order to avoid
4641 * timeouts, lets remove them from the list
4642 * for the SKUs without port F.
4643 */
4644 if (!IS_CNL_WITH_PORT_F(dev_priv)IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, (0)))
4645 power_domains->power_well_count -= 2;
4646 } else if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) {
4647 err = set_power_wells(power_domains, glk_power_wells)__set_power_wells(power_domains, glk_power_wells, (sizeof((glk_power_wells
)) / sizeof((glk_power_wells)[0])))
;
4648 } else if (IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) {
4649 err = set_power_wells(power_domains, bxt_power_wells)__set_power_wells(power_domains, bxt_power_wells, (sizeof((bxt_power_wells
)) / sizeof((bxt_power_wells)[0])))
;
4650 } else if (IS_GEN9_BC(dev_priv)((0 + (&(dev_priv)->__info)->gen == (9)) &&
!((&(dev_priv)->__info)->is_lp))
) {
4651 err = set_power_wells(power_domains, skl_power_wells)__set_power_wells(power_domains, skl_power_wells, (sizeof((skl_power_wells
)) / sizeof((skl_power_wells)[0])))
;
4652 } else if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) {
4653 err = set_power_wells(power_domains, chv_power_wells)__set_power_wells(power_domains, chv_power_wells, (sizeof((chv_power_wells
)) / sizeof((chv_power_wells)[0])))
;
4654 } else if (IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) {
4655 err = set_power_wells(power_domains, bdw_power_wells)__set_power_wells(power_domains, bdw_power_wells, (sizeof((bdw_power_wells
)) / sizeof((bdw_power_wells)[0])))
;
4656 } else if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) {
4657 err = set_power_wells(power_domains, hsw_power_wells)__set_power_wells(power_domains, hsw_power_wells, (sizeof((hsw_power_wells
)) / sizeof((hsw_power_wells)[0])))
;
4658 } else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)) {
4659 err = set_power_wells(power_domains, vlv_power_wells)__set_power_wells(power_domains, vlv_power_wells, (sizeof((vlv_power_wells
)) / sizeof((vlv_power_wells)[0])))
;
4660 } else if (IS_I830(dev_priv)IS_PLATFORM(dev_priv, INTEL_I830)) {
4661 err = set_power_wells(power_domains, i830_power_wells)__set_power_wells(power_domains, i830_power_wells, (sizeof((i830_power_wells
)) / sizeof((i830_power_wells)[0])))
;
4662 } else {
4663 err = set_power_wells(power_domains, i9xx_always_on_power_well)__set_power_wells(power_domains, i9xx_always_on_power_well, (
sizeof((i9xx_always_on_power_well)) / sizeof((i9xx_always_on_power_well
)[0])))
;
4664 }
4665
4666 return err;
4667}
4668
4669/**
4670 * intel_power_domains_cleanup - clean up power domains resources
4671 * @dev_priv: i915 device instance
4672 *
4673 * Release any resources acquired by intel_power_domains_init()
4674 */
4675void intel_power_domains_cleanup(struct drm_i915_privateinteldrm_softc *dev_priv)
4676{
4677 kfree(dev_priv->power_domains.power_wells);
4678}
4679
4680static void intel_power_domains_sync_hw(struct drm_i915_privateinteldrm_softc *dev_priv)
4681{
4682 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4683 struct i915_power_well *power_well;
4684
4685 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
4686 for_each_power_well(dev_priv, power_well)for ((power_well) = (dev_priv)->power_domains.power_wells;
(power_well) - (dev_priv)->power_domains.power_wells <
(dev_priv)->power_domains.power_well_count; (power_well)++
)
{
4687 power_well->desc->ops->sync_hw(dev_priv, power_well);
4688 power_well->hw_enabled =
4689 power_well->desc->ops->is_enabled(dev_priv, power_well);
4690 }
4691 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
4692}
4693
4694static void gen9_dbuf_slice_set(struct drm_i915_privateinteldrm_softc *dev_priv,
4695 enum dbuf_slice slice, bool_Bool enable)
4696{
4697 i915_reg_t reg = DBUF_CTL_S(slice)((const i915_reg_t){ .reg = (((0x45008) + (slice) * ((0x44FE8
) - (0x45008)))) })
;
4698 bool_Bool state;
4699 u32 val;
4700
4701 val = intel_de_read(dev_priv, reg);
4702 if (enable)
4703 val |= DBUF_POWER_REQUEST(1 << 31);
4704 else
4705 val &= ~DBUF_POWER_REQUEST(1 << 31);
4706 intel_de_write(dev_priv, reg, val);
4707 intel_de_posting_read(dev_priv, reg);
4708 udelay(10);
4709
4710 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE(1 << 30);
4711 drm_WARN(&dev_priv->drm, enable != state,({ int __ret = !!(enable != state); if (__ret) printf("%s %s: "
"DBuf slice %d power %s timeout!\n", dev_driver_string((&
dev_priv->drm)->dev), "", slice, enable ? "enable" : "disable"
); __builtin_expect(!!(__ret), 0); })
4712 "DBuf slice %d power %s timeout!\n",({ int __ret = !!(enable != state); if (__ret) printf("%s %s: "
"DBuf slice %d power %s timeout!\n", dev_driver_string((&
dev_priv->drm)->dev), "", slice, enable ? "enable" : "disable"
); __builtin_expect(!!(__ret), 0); })
4713 slice, enable ? "enable" : "disable")({ int __ret = !!(enable != state); if (__ret) printf("%s %s: "
"DBuf slice %d power %s timeout!\n", dev_driver_string((&
dev_priv->drm)->dev), "", slice, enable ? "enable" : "disable"
); __builtin_expect(!!(__ret), 0); })
;
4714}
4715
4716void gen9_dbuf_slices_update(struct drm_i915_privateinteldrm_softc *dev_priv,
4717 u8 req_slices)
4718{
4719 int num_slices = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->num_supported_dbuf_slices;
4720 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4721 enum dbuf_slice slice;
4722
4723 drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),({ int __ret = !!(req_slices & ~((1UL << (num_slices
)) - 1)); if (__ret) printf("%s %s: " "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", req_slices
, num_slices); __builtin_expect(!!(__ret), 0); })
4724 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",({ int __ret = !!(req_slices & ~((1UL << (num_slices
)) - 1)); if (__ret) printf("%s %s: " "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", req_slices
, num_slices); __builtin_expect(!!(__ret), 0); })
4725 req_slices, num_slices)({ int __ret = !!(req_slices & ~((1UL << (num_slices
)) - 1)); if (__ret) printf("%s %s: " "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", req_slices
, num_slices); __builtin_expect(!!(__ret), 0); })
;
4726
4727 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Updating dbuf slices to 0x%x\n"
, req_slices)
4728 req_slices)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Updating dbuf slices to 0x%x\n"
, req_slices)
;
4729
4730 /*
4731 * Might be running this in parallel to gen9_dc_off_power_well_enable
4732 * being called from intel_dp_detect for instance,
4733 * which causes assertion triggered by race condition,
4734 * as gen9_assert_dbuf_enabled might preempt this when registers
4735 * were already updated, while dev_priv was not.
4736 */
4737 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
4738
4739 for (slice = DBUF_S1; slice < num_slices; slice++)
4740 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)(1UL << (slice)));
4741
4742 dev_priv->dbuf.enabled_slices = req_slices;
4743
4744 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
4745}
4746
4747static void gen9_dbuf_enable(struct drm_i915_privateinteldrm_softc *dev_priv)
4748{
4749 dev_priv->dbuf.enabled_slices =
4750 intel_enabled_dbuf_slices_mask(dev_priv);
4751
4752 /*
4753 * Just power up at least 1 slice, we will
4754 * figure out later which slices we have and what we need.
4755 */
4756 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1)(1UL << (DBUF_S1)) |
4757 dev_priv->dbuf.enabled_slices);
4758}
4759
4760static void gen9_dbuf_disable(struct drm_i915_privateinteldrm_softc *dev_priv)
4761{
4762 gen9_dbuf_slices_update(dev_priv, 0);
4763}
4764
4765static void icl_mbus_init(struct drm_i915_privateinteldrm_softc *dev_priv)
4766{
4767 unsigned long abox_regs = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->abox_mask;
4768 u32 mask, val, i;
4769
4770 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK(0x1F << 0) |
4771 MBUS_ABOX_BT_CREDIT_POOL2_MASK(0x1F << 8) |
4772 MBUS_ABOX_B_CREDIT_MASK(0xF << 16) |
4773 MBUS_ABOX_BW_CREDIT_MASK(3 << 20);
4774 val = MBUS_ABOX_BT_CREDIT_POOL1(16)((16) << 0) |
4775 MBUS_ABOX_BT_CREDIT_POOL2(16)((16) << 8) |
4776 MBUS_ABOX_B_CREDIT(1)((1) << 16) |
4777 MBUS_ABOX_BW_CREDIT(1)((1) << 20);
4778
4779 /*
4780 * gen12 platforms that use abox1 and abox2 for pixel data reads still
4781 * expect us to program the abox_ctl0 register as well, even though
4782 * we don't have to program other instance-0 registers like BW_BUDDY.
4783 */
4784 if (IS_GEN(dev_priv, 12)(0 + (&(dev_priv)->__info)->gen == (12)))
7
Assuming the condition is false
8
Taking false branch
4785 abox_regs |= BIT(0)(1UL << (0));
4786
4787 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))for ((i) = find_first_bit((&abox_regs), (sizeof(abox_regs
))); (i) < (sizeof(abox_regs)); (i) = find_next_bit((&
abox_regs), (sizeof(abox_regs)), (i) + 1))
9
Loop condition is true. Entering loop body
4788 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45038, 0x45048
, 0x4504C })[i])) })
, mask, val)
;
10
Calling 'intel_de_rmw'
4789}
4790
4791static void hsw_assert_cdclk(struct drm_i915_privateinteldrm_softc *dev_priv)
4792{
4793 u32 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4794
4795 /*
4796 * The LCPLL register should be turned on by the BIOS. For now
4797 * let's just check its state and print errors in case
4798 * something is wrong. Don't even try to turn it on.
4799 */
4800
4801 if (val & LCPLL_CD_SOURCE_FCLK(1 << 21))
4802 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "CDCLK source is not LCPLL\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4803
4804 if (val & LCPLL_PLL_DISABLE(1 << 31))
4805 drm_err(&dev_priv->drm, "LCPLL is disabled\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL is disabled\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4806
4807 if ((val & LCPLL_REF_MASK(3 << 28)) != LCPLL_REF_NON_SSC(0 << 28))
4808 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL not using non-SSC reference\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4809}
4810
4811static void assert_can_disable_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv)
4812{
4813 struct drm_device *dev = &dev_priv->drm;
4814 struct intel_crtc *crtc;
4815
4816 for_each_intel_crtc(dev, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base
.head ) *__mptr = ((&(dev)->mode_config.crtc_list)->
next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*crtc), base.head) );}); &crtc->base.head !=
(&(dev)->mode_config.crtc_list); crtc = ({ const __typeof
( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->
base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*crtc), base.head) );}))
4817 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",({ int __ret_warn_on = !!(crtc->active); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CRTC for pipe %c enabled\n"
, ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret), 0); }
)) __drm_err("CRTC for pipe %c enabled\n", ((crtc->pipe) +
'A')); __builtin_expect(!!(__ret_warn_on), 0); })
4818 pipe_name(crtc->pipe))({ int __ret_warn_on = !!(crtc->active); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CRTC for pipe %c enabled\n"
, ((crtc->pipe) + 'A')); __builtin_expect(!!(__ret), 0); }
)) __drm_err("CRTC for pipe %c enabled\n", ((crtc->pipe) +
'A')); __builtin_expect(!!(__ret_warn_on), 0); })
;
4819
4820 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x45404) }))); if (__builtin_expect(!!(__ret_warn_on
), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks
); if (__ret) printf("Display power well on\n"); __builtin_expect
(!!(__ret), 0); })) __drm_err("Display power well on\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
4821 "Display power well on\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x45404) }))); if (__builtin_expect(!!(__ret_warn_on
), 0)) if (!({ int __ret = !!(i915_modparams.verbose_state_checks
); if (__ret) printf("Display power well on\n"); __builtin_expect
(!!(__ret), 0); })) __drm_err("Display power well on\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
;
4822 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x46020) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("SPLL enabled\n"); __builtin_expect
(!!(__ret), 0); })) __drm_err("SPLL enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
4823 "SPLL enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x46020) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("SPLL enabled\n"); __builtin_expect
(!!(__ret), 0); })) __drm_err("SPLL enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
;
4824 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (((0x46040) + (0) * ((0x46060) - (0x46040)))) })) &
(1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0)
) if (!({ int __ret = !!(i915_modparams.verbose_state_checks)
; if (__ret) printf("WRPLL1 enabled\n"); __builtin_expect(!!(
__ret), 0); })) __drm_err("WRPLL1 enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
4825 "WRPLL1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (((0x46040) + (0) * ((0x46060) - (0x46040)))) })) &
(1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0)
) if (!({ int __ret = !!(i915_modparams.verbose_state_checks)
; if (__ret) printf("WRPLL1 enabled\n"); __builtin_expect(!!(
__ret), 0); })) __drm_err("WRPLL1 enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
;
4826 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (((0x46040) + (1) * ((0x46060) - (0x46040)))) })) &
(1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0)
) if (!({ int __ret = !!(i915_modparams.verbose_state_checks)
; if (__ret) printf("WRPLL2 enabled\n"); __builtin_expect(!!(
__ret), 0); })) __drm_err("WRPLL2 enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
4827 "WRPLL2 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (((0x46040) + (1) * ((0x46060) - (0x46040)))) })) &
(1 << 31)); if (__builtin_expect(!!(__ret_warn_on), 0)
) if (!({ int __ret = !!(i915_modparams.verbose_state_checks)
; if (__ret) printf("WRPLL2 enabled\n"); __builtin_expect(!!(
__ret), 0); })) __drm_err("WRPLL2 enabled\n"); __builtin_expect
(!!(__ret_warn_on), 0); })
;
4828 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (dev_priv->pps_mmio_base - 0x61200 + (0x61200) +
(0) * 0x100) })) & ((u32)((1UL << (31)) + 0))); if
(__builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret =
!!(i915_modparams.verbose_state_checks); if (__ret) printf("Panel power on\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("Panel power on\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4829 "Panel power on\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (dev_priv->pps_mmio_base - 0x61200 + (0x61200) +
(0) * 0x100) })) & ((u32)((1UL << (31)) + 0))); if
(__builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret =
!!(i915_modparams.verbose_state_checks); if (__ret) printf("Panel power on\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("Panel power on\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4830 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48250) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CPU PWM1 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM1 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4831 "CPU PWM1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48250) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CPU PWM1 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM1 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4832 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL))
4833 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48350) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CPU PWM2 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM2 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4834 "CPU PWM2 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48350) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("CPU PWM2 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("CPU PWM2 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4835 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0xc8250) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("PCH PWM1 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH PWM1 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4836 "PCH PWM1 enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0xc8250) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("PCH PWM1 enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH PWM1 enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4837 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48400) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("Utility pin enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("Utility pin enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4838 "Utility pin enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x48400) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("Utility pin enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("Utility pin enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4839 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0xe7000) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("PCH GTC enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH GTC enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
4840 "PCH GTC enabled\n")({ int __ret_warn_on = !!(intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0xe7000) })) & (1 << 31)); if (__builtin_expect
(!!(__ret_warn_on), 0)) if (!({ int __ret = !!(i915_modparams
.verbose_state_checks); if (__ret) printf("PCH GTC enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("PCH GTC enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4841
4842 /*
4843 * In theory we can still leave IRQs enabled, as long as only the HPD
4844 * interrupts remain enabled. We used to check for that, but since it's
4845 * gen-specific and since we only disable LCPLL after we fully disable
4846 * the interrupts, the check below should be enough.
4847 */
4848 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n")({ int __ret_warn_on = !!(intel_irqs_enabled(dev_priv)); if (
__builtin_expect(!!(__ret_warn_on), 0)) if (!({ int __ret = !
!(i915_modparams.verbose_state_checks); if (__ret) printf("IRQs enabled\n"
); __builtin_expect(!!(__ret), 0); })) __drm_err("IRQs enabled\n"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
4849}
4850
4851static u32 hsw_read_dcomp(struct drm_i915_privateinteldrm_softc *dev_priv)
4852{
4853 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL))
4854 return intel_de_read(dev_priv, D_COMP_HSW((const i915_reg_t){ .reg = (0x140000 + 0x5F0C) }));
4855 else
4856 return intel_de_read(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) }));
4857}
4858
4859static void hsw_write_dcomp(struct drm_i915_privateinteldrm_softc *dev_priv, u32 val)
4860{
4861 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) {
4862 if (sandybridge_pcode_write(dev_priv,sandybridge_pcode_write_timeout(dev_priv, 0x11, val, 500, 0)
4863 GEN6_PCODE_WRITE_D_COMP, val)sandybridge_pcode_write_timeout(dev_priv, 0x11, val, 500, 0))
4864 drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Failed to write to D_COMP\n"
)
4865 "Failed to write to D_COMP\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Failed to write to D_COMP\n"
)
;
4866 } else {
4867 intel_de_write(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) }), val);
4868 intel_de_posting_read(dev_priv, D_COMP_BDW((const i915_reg_t){ .reg = (0x138144) }));
4869 }
4870}
4871
4872/*
4873 * This function implements pieces of two sequences from BSpec:
4874 * - Sequence for display software to disable LCPLL
4875 * - Sequence for display software to allow package C8+
4876 * The steps implemented here are just the steps that actually touch the LCPLL
4877 * register. Callers should take care of disabling all the display engine
4878 * functions, doing the mode unset, fixing interrupts, etc.
4879 */
4880static void hsw_disable_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv,
4881 bool_Bool switch_to_fclk, bool_Bool allow_power_down)
4882{
4883 u32 val;
4884
4885 assert_can_disable_lcpll(dev_priv);
4886
4887 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4888
4889 if (switch_to_fclk) {
4890 val |= LCPLL_CD_SOURCE_FCLK(1 << 21);
4891 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4892
4893 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p(
1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10)
ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(
), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if (((intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & (
1 << 19)))) { ret__ = 0; break; } if (expired__) { ret__
= -60; break; } usleep_range(wait__, wait__ * 2); if (wait__
< ((10))) wait__ <<= 1; } ret__; }); else ret__ = (
{ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while
(0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock
(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile
("" : : : "memory"); if ((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x130040) })) & (1 << 19))) { ret = 0; break
; } if (now - base >= timeout) { ret = -60; break; } cpu_relax
(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); ret__; })
4894 LCPLL_CD_SOURCE_FCLK_DONE, 1)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p(
1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10)
ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(
), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if (((intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & (
1 << 19)))) { ret__ = 0; break; } if (expired__) { ret__
= -60; break; } usleep_range(wait__, wait__ * 2); if (wait__
< ((10))) wait__ <<= 1; } ret__; }); else ret__ = (
{ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while
(0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock
(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile
("" : : : "memory"); if ((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x130040) })) & (1 << 19))) { ret = 0; break
; } if (now - base >= timeout) { ret = -60; break; } cpu_relax
(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); ret__; })
)
4895 drm_err(&dev_priv->drm, "Switching to FCLK failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching to FCLK failed\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4896
4897 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4898 }
4899
4900 val |= LCPLL_PLL_DISABLE(1 << 31);
4901 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4902 intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4903
4904 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), LCPLL_PLL_LOCK(1 << 30), 1))
4905 drm_err(&dev_priv->drm, "LCPLL still locked\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL still locked\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4906
4907 val = hsw_read_dcomp(dev_priv);
4908 val |= D_COMP_COMP_DISABLE(1 << 0);
4909 hsw_write_dcomp(dev_priv, val);
4910 ndelay(100);
4911
4912 if (wait_for((hsw_read_dcomp(dev_priv) &({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((hsw_read_dcomp
(dev_priv) & (1 << 9)) == 0))) { ret__ = 0; break; }
if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__
* 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__;
})
4913 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((1) * 1000))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((hsw_read_dcomp
(dev_priv) & (1 << 9)) == 0))) { ret__ = 0; break; }
if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__
* 2); if (wait__ < ((1000))) wait__ <<= 1; } ret__;
})
)
4914 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "D_COMP RCOMP still in progress\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4915
4916 if (allow_power_down) {
4917 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4918 val |= LCPLL_POWER_DOWN_ALLOW(1 << 22);
4919 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4920 intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4921 }
4922}
4923
4924/*
4925 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4926 * source.
4927 */
4928static void hsw_restore_lcpll(struct drm_i915_privateinteldrm_softc *dev_priv)
4929{
4930 u32 val;
4931
4932 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4933
4934 if ((val & (LCPLL_PLL_LOCK(1 << 30) | LCPLL_PLL_DISABLE(1 << 31) | LCPLL_CD_SOURCE_FCLK(1 << 21) |
4935 LCPLL_POWER_DOWN_ALLOW(1 << 22))) == LCPLL_PLL_LOCK(1 << 30))
4936 return;
4937
4938 /*
4939 * Make sure we're not on PC8 state before disabling PC8, otherwise
4940 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4941 */
4942 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4943
4944 if (val & LCPLL_POWER_DOWN_ALLOW(1 << 22)) {
4945 val &= ~LCPLL_POWER_DOWN_ALLOW(1 << 22);
4946 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4947 intel_de_posting_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4948 }
4949
4950 val = hsw_read_dcomp(dev_priv);
4951 val |= D_COMP_COMP_FORCE(1 << 8);
4952 val &= ~D_COMP_COMP_DISABLE(1 << 0);
4953 hsw_write_dcomp(dev_priv, val);
4954
4955 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4956 val &= ~LCPLL_PLL_DISABLE(1 << 31);
4957 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4958
4959 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), LCPLL_PLL_LOCK(1 << 30), 5))
4960 drm_err(&dev_priv->drm, "LCPLL not locked yet\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LCPLL not locked yet\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4961
4962 if (val & LCPLL_CD_SOURCE_FCLK(1 << 21)) {
4963 val = intel_de_read(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }));
4964 val &= ~LCPLL_CD_SOURCE_FCLK(1 << 21);
4965 intel_de_write(dev_priv, LCPLL_CTL((const i915_reg_t){ .reg = (0x130040) }), val);
4966
4967 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &({ int ret__; extern char _ctassert[(!(!__builtin_constant_p(
1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10)
ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(
), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & (
1 << 19)) == 0))) { ret__ = 0; break; } if (expired__) {
ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (
wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__
= ({ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while
(0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock
(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile
("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x130040) })) & (1 << 19)) == 0)) { ret =
0; break; } if (now - base >= timeout) { ret = -60; break
; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu !=
(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (
({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__
; })
4968 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)({ int ret__; extern char _ctassert[(!(!__builtin_constant_p(
1))) ? 1 : -1 ] __attribute__((__unused__)); if ((1) > 10)
ret__ = ({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(
), 1000ll * (((1)))); long wait__ = ((10)); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); ; __asm volatile("" : : : "memory"); if ((((intel_de_read
(dev_priv, ((const i915_reg_t){ .reg = (0x130040) })) & (
1 << 19)) == 0))) { ret__ = 0; break; } if (expired__) {
ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (
wait__ < ((10))) wait__ <<= 1; } ret__; }); else ret__
= ({ int cpu, ret, timeout = ((1)) * 1000; u64 base; do { } while
(0); if (!(0)) { ; cpu = (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); } base = local_clock
(); for (;;) { u64 now = local_clock(); if (!(0)) ; __asm volatile
("" : : : "memory"); if (((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = (0x130040) })) & (1 << 19)) == 0)) { ret =
0; break; } if (now - base >= timeout) { ret = -60; break
; } cpu_relax(); if (!(0)) { ; if (__builtin_expect(!!(cpu !=
(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu = (
({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid); base = local_clock(); } } } ret; }); ret__
; })
)
4969 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching back to LCPLL failed\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
4970 "Switching back to LCPLL failed\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Switching back to LCPLL failed\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4971 }
4972
4973 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4974
4975 intel_update_cdclk(dev_priv);
4976 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4977}
4978
4979/*
4980 * Package states C8 and deeper are really deep PC states that can only be
4981 * reached when all the devices on the system allow it, so even if the graphics
4982 * device allows PC8+, it doesn't mean the system will actually get to these
4983 * states. Our driver only allows PC8+ when going into runtime PM.
4984 *
4985 * The requirements for PC8+ are that all the outputs are disabled, the power
4986 * well is disabled and most interrupts are disabled, and these are also
4987 * requirements for runtime PM. When these conditions are met, we manually do
4988 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4989 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4990 * hang the machine.
4991 *
4992 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4993 * the state of some registers, so when we come back from PC8+ we need to
4994 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4995 * need to take care of the registers kept by RC6. Notice that this happens even
4996 * if we don't put the device in PCI D3 state (which is what currently happens
4997 * because of the runtime PM support).
4998 *
4999 * For more, read "Display Sequences for Package C8" on the hardware
5000 * documentation.
5001 */
5002static void hsw_enable_pc8(struct drm_i915_privateinteldrm_softc *dev_priv)
5003{
5004 u32 val;
5005
5006 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling package C8+\n"
)
;
5007
5008 if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id)
== 0x9c80)
) {
5009 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }));
5010 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE(1 << 12);
5011 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), val);
5012 }
5013
5014 lpt_disable_clkout_dp(dev_priv);
5015 hsw_disable_lcpll(dev_priv, true1, true1);
5016}
5017
5018static void hsw_disable_pc8(struct drm_i915_privateinteldrm_softc *dev_priv)
5019{
5020 u32 val;
5021
5022 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling package C8+\n"
)
;
5023
5024 hsw_restore_lcpll(dev_priv);
5025 intel_init_pch_refclk(dev_priv);
5026
5027 if (HAS_PCH_LPT_LP(dev_priv)(((dev_priv)->pch_id) == 0x9c00 || ((dev_priv)->pch_id)
== 0x9c80)
) {
5028 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }));
5029 val |= PCH_LP_PARTITION_LEVEL_DISABLE(1 << 12);
5030 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), val);
5031 }
5032}
5033
5034static void intel_pch_reset_handshake(struct drm_i915_privateinteldrm_softc *dev_priv,
5035 bool_Bool enable)
5036{
5037 i915_reg_t reg;
5038 u32 reset_bits, val;
5039
5040 if (IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)) {
5041 reg = GEN7_MSG_CTL((const i915_reg_t){ .reg = (0x45010) });
5042 reset_bits = WAIT_FOR_PCH_FLR_ACK(1 << 0) | WAIT_FOR_PCH_RESET_ACK(1 << 1);
5043 } else {
5044 reg = HSW_NDE_RSTWRN_OPT((const i915_reg_t){ .reg = (0x46408) });
5045 reset_bits = RESET_PCH_HANDSHAKE_ENABLE(1 << 4);
5046 }
5047
5048 val = intel_de_read(dev_priv, reg);
5049
5050 if (enable)
5051 val |= reset_bits;
5052 else
5053 val &= ~reset_bits;
5054
5055 intel_de_write(dev_priv, reg, val);
5056}
5057
5058static void skl_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv,
5059 bool_Bool resume)
5060{
5061 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5062 struct i915_power_well *well;
5063
5064 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
5065
5066 /* enable PCH reset handshake */
5067 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP));
5068
5069 /* enable PG1 and Misc I/O */
5070 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5071
5072 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5073 intel_power_well_enable(dev_priv, well);
5074
5075 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5076 intel_power_well_enable(dev_priv, well);
5077
5078 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5079
5080 intel_cdclk_init_hw(dev_priv);
5081
5082 gen9_dbuf_enable(dev_priv);
5083
5084 if (resume && dev_priv->csr.dmc_payload)
5085 intel_csr_load_program(dev_priv);
5086}
5087
5088static void skl_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv)
5089{
5090 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5091 struct i915_power_well *well;
5092
5093 gen9_disable_dc_states(dev_priv);
5094
5095 gen9_dbuf_disable(dev_priv);
5096
5097 intel_cdclk_uninit_hw(dev_priv);
5098
5099 /* The spec doesn't call for removing the reset handshake flag */
5100 /* disable PG1 and Misc I/O */
5101
5102 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5103
5104 /*
5105 * BSpec says to keep the MISC IO power well enabled here, only
5106 * remove our request for power well 1.
5107 * Note that even though the driver's request is removed power well 1
5108 * may stay enabled after this due to DMC's own request on it.
5109 */
5110 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5111 intel_power_well_disable(dev_priv, well);
5112
5113 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5114
5115 usleep_range(10, 30); /* 10 us delay per Bspec */
5116}
5117
5118static void bxt_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool resume)
5119{
5120 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5121 struct i915_power_well *well;
5122
5123 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
5124
5125 /*
5126 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5127 * or else the reset will hang because there is no PCH to respond.
5128 * Move the handshake programming to initialization sequence.
5129 * Previously was left up to BIOS.
5130 */
5131 intel_pch_reset_handshake(dev_priv, false0);
5132
5133 /* Enable PG1 */
5134 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5135
5136 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5137 intel_power_well_enable(dev_priv, well);
5138
5139 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5140
5141 intel_cdclk_init_hw(dev_priv);
5142
5143 gen9_dbuf_enable(dev_priv);
5144
5145 if (resume && dev_priv->csr.dmc_payload)
5146 intel_csr_load_program(dev_priv);
5147}
5148
5149static void bxt_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv)
5150{
5151 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5152 struct i915_power_well *well;
5153
5154 gen9_disable_dc_states(dev_priv);
5155
5156 gen9_dbuf_disable(dev_priv);
5157
5158 intel_cdclk_uninit_hw(dev_priv);
5159
5160 /* The spec doesn't call for removing the reset handshake flag */
5161
5162 /*
5163 * Disable PW1 (PG1).
5164 * Note that even though the driver's request is removed power well 1
5165 * may stay enabled after this due to DMC's own request on it.
5166 */
5167 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5168
5169 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5170 intel_power_well_disable(dev_priv, well);
5171
5172 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5173
5174 usleep_range(10, 30); /* 10 us delay per Bspec */
5175}
5176
5177static void cnl_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv, bool_Bool resume)
5178{
5179 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5180 struct i915_power_well *well;
5181
5182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
5183
5184 /* 1. Enable PCH Reset Handshake */
5185 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP));
5186
5187 /* 2-3. */
5188 intel_combo_phy_init(dev_priv);
5189
5190 /*
5191 * 4. Enable Power Well 1 (PG1).
5192 * The AUX IO power wells will be enabled on demand.
5193 */
5194 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5195 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5196 intel_power_well_enable(dev_priv, well);
5197 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5198
5199 /* 5. Enable CD clock */
5200 intel_cdclk_init_hw(dev_priv);
5201
5202 /* 6. Enable DBUF */
5203 gen9_dbuf_enable(dev_priv);
5204
5205 if (resume && dev_priv->csr.dmc_payload)
5206 intel_csr_load_program(dev_priv);
5207}
5208
5209static void cnl_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv)
5210{
5211 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5212 struct i915_power_well *well;
5213
5214 gen9_disable_dc_states(dev_priv);
5215
5216 /* 1. Disable all display engine functions -> aready done */
5217
5218 /* 2. Disable DBUF */
5219 gen9_dbuf_disable(dev_priv);
5220
5221 /* 3. Disable CD clock */
5222 intel_cdclk_uninit_hw(dev_priv);
5223
5224 /*
5225 * 4. Disable Power Well 1 (PG1).
5226 * The AUX IO power wells are toggled on demand, so they are already
5227 * disabled at this point.
5228 */
5229 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5230 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5231 intel_power_well_disable(dev_priv, well);
5232 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5233
5234 usleep_range(10, 30); /* 10 us delay per Bspec */
5235
5236 /* 5. */
5237 intel_combo_phy_uninit(dev_priv);
5238}
5239
5240struct buddy_page_mask {
5241 u32 page_mask;
5242 u8 type;
5243 u8 num_channels;
5244};
5245
5246static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5247 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
5248 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5249 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
5250 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5251 {}
5252};
5253
5254static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5255 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5256 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
5257 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5258 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
5259 {}
5260};
5261
5262static void tgl_bw_buddy_init(struct drm_i915_privateinteldrm_softc *dev_priv)
5263{
5264 enum intel_dram_type type = dev_priv->dram_info.type;
5265 u8 num_channels = dev_priv->dram_info.num_channels;
5266 const struct buddy_page_mask *table;
5267 unsigned long abox_mask = INTEL_INFO(dev_priv)(&(dev_priv)->__info)->abox_mask;
5268 int config, i;
5269
5270 if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && tgl_revids_get
(dev_priv)->disp_stepping >= (TGL_REVID_A0) && tgl_revids_get
(dev_priv)->disp_stepping <= (TGL_REVID_B0))
)
5271 /* Wa_1409767108: tgl */
5272 table = wa_1409767108_buddy_page_masks;
5273 else
5274 table = tgl_buddy_page_masks;
5275
5276 for (config = 0; table[config].page_mask != 0; config++)
5277 if (table[config].num_channels == num_channels &&
5278 table[config].type == type)
5279 break;
5280
5281 if (table[config].page_mask == 0) {
5282 drm_dbg(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "Unknown memory configuration; disabling address buddy logic.\n"
)
5283 "Unknown memory configuration; disabling address buddy logic.\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_DRIVER, "Unknown memory configuration; disabling address buddy logic.\n"
)
;
5284 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))for ((i) = find_first_bit((&abox_mask), (sizeof(abox_mask
))); (i) < (sizeof(abox_mask)); (i) = find_next_bit((&
abox_mask), (sizeof(abox_mask)), (i) + 1))
5285 intel_de_write(dev_priv, BW_BUDDY_CTL(i)((const i915_reg_t){ .reg = (((0x45130) + (i) * ((0x45140) - (
0x45130)))) })
,
5286 BW_BUDDY_DISABLE((u32)((1UL << (31)) + 0)));
5287 } else {
5288 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))for ((i) = find_first_bit((&abox_mask), (sizeof(abox_mask
))); (i) < (sizeof(abox_mask)); (i) = find_next_bit((&
abox_mask), (sizeof(abox_mask)), (i) + 1))
{
5289 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i)((const i915_reg_t){ .reg = (((0x45134) + (i) * ((0x45144) - (
0x45134)))) })
,
5290 table[config].page_mask);
5291
5292 /* Wa_22010178259:tgl,rkl */
5293 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i)((const i915_reg_t){ .reg = (((0x45130) + (i) * ((0x45140) - (
0x45130)))) })
,
5294 BW_BUDDY_TLB_REQ_TIMER_MASK((u32)((((~0UL) >> (64 - (21) - 1)) & ((~0UL) <<
(16))) + 0))
,
5295 BW_BUDDY_TLB_REQ_TIMER(0x8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (21) - 1)) &
((~0UL) << (16))) + 0))))(0x8) << (__builtin_ffsll
(((u32)((((~0UL) >> (64 - (21) - 1)) & ((~0UL) <<
(16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (21
) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0
))
);
5296 }
5297 }
5298}
5299
5300static void icl_display_core_init(struct drm_i915_privateinteldrm_softc *dev_priv,
5301 bool_Bool resume)
5302{
5303 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5304 struct i915_power_well *well;
5305 u32 val;
5306
5307 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE0);
5308
5309 /* Wa_14011294188:ehl,jsl,tgl,rkl */
5310 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_JSP &&
4
Assuming field 'pch_type' is < PCH_JSP
5311 INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) < PCH_DG1)
5312 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D((const i915_reg_t){ .reg = (0xc2020) }), 0,
5313 PCH_DPMGUNIT_CLOCK_GATE_DISABLE(1 << 15));
5314
5315 /* 1. Enable PCH reset handshake. */
5316 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP));
5
Assuming field 'pch_type' is not equal to PCH_NOP
5317
5318 /* 2. Initialize all combo phys */
5319 intel_combo_phy_init(dev_priv);
5320
5321 /*
5322 * 3. Enable Power Well 1 (PG1).
5323 * The AUX IO power wells will be enabled on demand.
5324 */
5325 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5326 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5327 intel_power_well_enable(dev_priv, well);
5328 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5329
5330 /* 4. Enable CDCLK. */
5331 intel_cdclk_init_hw(dev_priv);
5332
5333 /* 5. Enable DBUF. */
5334 gen9_dbuf_enable(dev_priv);
5335
5336 /* 6. Setup MBUS. */
5337 icl_mbus_init(dev_priv);
6
Calling 'icl_mbus_init'
5338
5339 /* 7. Program arbiter BW_BUDDY registers */
5340 if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12)
5341 tgl_bw_buddy_init(dev_priv);
5342
5343 if (resume && dev_priv->csr.dmc_payload)
5344 intel_csr_load_program(dev_priv);
5345
5346 /* Wa_14011508470 */
5347 if (IS_GEN(dev_priv, 12)(0 + (&(dev_priv)->__info)->gen == (12))) {
5348 val = DCPR_CLEAR_MEMSTAT_DIS((u32)((1UL << (24)) + 0)) | DCPR_SEND_RESP_IMM((u32)((1UL << (25)) + 0)) |
5349 DCPR_MASK_LPMODE((u32)((1UL << (26)) + 0)) | DCPR_MASK_MAXLATENCY_MEMUP_CLR((u32)((1UL << (27)) + 0));
5350 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2((const i915_reg_t){ .reg = (0x46434) }), 0, val);
5351 }
5352}
5353
5354static void icl_display_core_uninit(struct drm_i915_privateinteldrm_softc *dev_priv)
5355{
5356 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5357 struct i915_power_well *well;
5358
5359 gen9_disable_dc_states(dev_priv);
5360
5361 /* 1. Disable all display engine functions -> aready done */
5362
5363 /* 2. Disable DBUF */
5364 gen9_dbuf_disable(dev_priv);
5365
5366 /* 3. Disable CD clock */
5367 intel_cdclk_uninit_hw(dev_priv);
5368
5369 /*
5370 * 4. Disable Power Well 1 (PG1).
5371 * The AUX IO power wells are toggled on demand, so they are already
5372 * disabled at this point.
5373 */
5374 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5375 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5376 intel_power_well_disable(dev_priv, well);
5377 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5378
5379 /* 5. */
5380 intel_combo_phy_uninit(dev_priv);
5381}
5382
5383static void chv_phy_control_init(struct drm_i915_privateinteldrm_softc *dev_priv)
5384{
5385 struct i915_power_well *cmn_bc =
5386 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5387 struct i915_power_well *cmn_d =
5388 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5389
5390 /*
5391 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5392 * workaround never ever read DISPLAY_PHY_CONTROL, and
5393 * instead maintain a shadow copy ourselves. Use the actual
5394 * power well state and lane status to reconstruct the
5395 * expected initial value.
5396 */
5397 dev_priv->chv_phy_control =
5398 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0)((0x2) << (2 * (DPIO_PHY0) + 23)) |
5399 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1)((0x2) << (2 * (DPIO_PHY1) + 23)) |
5400 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0)((0x7) << (6 * (DPIO_PHY0) + 3 * (DPIO_CH0) + 2)) |
5401 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1)((0x7) << (6 * (DPIO_PHY0) + 3 * (DPIO_CH1) + 2)) |
5402 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0)((0x7) << (6 * (DPIO_PHY1) + 3 * (DPIO_CH0) + 2));
5403
5404 /*
5405 * If all lanes are disabled we leave the override disabled
5406 * with all power down bits cleared to match the state we
5407 * would use after disabling the port. Otherwise enable the
5408 * override and set the lane powerdown bits accding to the
5409 * current lane status.
5410 */
5411 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5412 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)((const i915_reg_t){ .reg = ((((const u32 []){ (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6014), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6018), (((&(dev_priv
)->__info)->display_mmio_offset) + 0x6030) })[(PIPE_A)]
)) })
);
5413 unsigned int mask;
5414
5415 mask = status & DPLL_PORTB_READY_MASK(0xf);
5416 if (mask == 0xf)
5417 mask = 0x0;
5418 else
5419 dev_priv->chv_phy_control |=
5420 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)(1 << (2 * (DPIO_PHY0) + (DPIO_CH0) + 27));
5421
5422 dev_priv->chv_phy_control |=
5423 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0)((mask) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH0) + 11));
5424
5425 mask = (status & DPLL_PORTC_READY_MASK(0xf << 4)) >> 4;
5426 if (mask == 0xf)
5427 mask = 0x0;
5428 else
5429 dev_priv->chv_phy_control |=
5430 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)(1 << (2 * (DPIO_PHY0) + (DPIO_CH1) + 27));
5431
5432 dev_priv->chv_phy_control |=
5433 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1)((mask) << (8 * (DPIO_PHY0) + 4 * (DPIO_CH1) + 11));
5434
5435 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0)(1 << (DPIO_PHY0));
5436
5437 dev_priv->chv_phy_assert[DPIO_PHY0] = false0;
5438 } else {
5439 dev_priv->chv_phy_assert[DPIO_PHY0] = true1;
5440 }
5441
5442 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5443 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS((const i915_reg_t){ .reg = (0x180000 + 0x6240) }));
5444 unsigned int mask;
5445
5446 mask = status & DPLL_PORTD_READY_MASK(0xf);
5447
5448 if (mask == 0xf)
5449 mask = 0x0;
5450 else
5451 dev_priv->chv_phy_control |=
5452 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)(1 << (2 * (DPIO_PHY1) + (DPIO_CH0) + 27));
5453
5454 dev_priv->chv_phy_control |=
5455 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0)((mask) << (8 * (DPIO_PHY1) + 4 * (DPIO_CH0) + 11));
5456
5457 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1)(1 << (DPIO_PHY1));
5458
5459 dev_priv->chv_phy_assert[DPIO_PHY1] = false0;
5460 } else {
5461 dev_priv->chv_phy_assert[DPIO_PHY1] = true1;
5462 }
5463
5464 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial PHY_CONTROL=0x%08x\n"
, dev_priv->chv_phy_control)
5465 dev_priv->chv_phy_control)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Initial PHY_CONTROL=0x%08x\n"
, dev_priv->chv_phy_control)
;
5466
5467 /* Defer application of initial phy_control to enabling the powerwell */
5468}
5469
5470static void vlv_cmnlane_wa(struct drm_i915_privateinteldrm_softc *dev_priv)
5471{
5472 struct i915_power_well *cmn =
5473 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5474 struct i915_power_well *disp2d =
5475 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5476
5477 /* If the display might be already active skip this */
5478 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5479 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5480 intel_de_read(dev_priv, DPIO_CTL((const i915_reg_t){ .reg = (0x180000 + 0x2110) })) & DPIO_CMNRST(1 << 0))
5481 return;
5482
5483 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "toggling display PHY side reset\n"
)
;
5484
5485 /* cmnlane needs DPLL registers */
5486 disp2d->desc->ops->enable(dev_priv, disp2d);
5487
5488 /*
5489 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5490 * Need to assert and de-assert PHY SB reset by gating the
5491 * common lane power, then un-gating it.
5492 * Simply ungating isn't enough to reset the PHY enough to get
5493 * ports and lanes running.
5494 */
5495 cmn->desc->ops->disable(dev_priv, cmn);
5496}
5497
5498static bool_Bool vlv_punit_is_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv, u32 reg0)
5499{
5500 bool_Bool ret;
5501
5502 vlv_punit_get(dev_priv);
5503 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK((0x3) << 0)) == SSPM0_SSC_PWR_GATE((0x3) << 0);
5504 vlv_punit_put(dev_priv);
5505
5506 return ret;
5507}
5508
5509static void assert_ved_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv)
5510{
5511 drm_WARN(&dev_priv->drm,({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32));
if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret
), 0); })
5512 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32));
if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret
), 0); })
5513 "VED not power gated\n")({ int __ret = !!(!vlv_punit_is_power_gated(dev_priv, 0x32));
if (__ret) printf("%s %s: " "VED not power gated\n", dev_driver_string
((&dev_priv->drm)->dev), ""); __builtin_expect(!!(__ret
), 0); })
;
5514}
5515
5516static void assert_isp_power_gated(struct drm_i915_privateinteldrm_softc *dev_priv)
5517{
5518#ifdef notyet
5519 static const struct pci_device_id isp_ids[] = {
5520 {PCI_DEVICE(PCI_VENDOR_ID_INTEL0x8086, 0x0f38)},
5521 {PCI_DEVICE(PCI_VENDOR_ID_INTEL0x8086, 0x22b8)},
5522 {}
5523 };
5524
5525 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated
(dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect
(!!(__ret), 0); })
5526 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated
(dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect
(!!(__ret), 0); })
5527 "ISP not power gated\n")({ int __ret = !!(!pci_dev_present(isp_ids) && !vlv_punit_is_power_gated
(dev_priv, 0x39)); if (__ret) printf("%s %s: " "ISP not power gated\n"
, dev_driver_string((&dev_priv->drm)->dev), ""); __builtin_expect
(!!(__ret), 0); })
;
5528#endif
5529}
5530
5531static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *dev_priv);
5532
5533/**
5534 * intel_power_domains_init_hw - initialize hardware power domain state
5535 * @i915: i915 device instance
5536 * @resume: Called from resume code paths or not
5537 *
5538 * This function initializes the hardware power domain state and enables all
5539 * power wells belonging to the INIT power domain. Power wells in other
5540 * domains (and not in the INIT domain) are referenced or disabled by
5541 * intel_modeset_readout_hw_state(). After that the reference count of each
5542 * power well must match its HW enabled state, see
5543 * intel_power_domains_verify_state().
5544 *
5545 * It will return with power domains disabled (to be enabled later by
5546 * intel_power_domains_enable()) and must be paired with
5547 * intel_power_domains_driver_remove().
5548 */
5549void intel_power_domains_init_hw(struct drm_i915_privateinteldrm_softc *i915, bool_Bool resume)
5550{
5551 struct i915_power_domains *power_domains = &i915->power_domains;
5552
5553 power_domains->initializing = true1;
5554
5555 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11) {
5556 icl_display_core_init(i915, resume);
5557 } else if (IS_CANNONLAKE(i915)IS_PLATFORM(i915, INTEL_CANNONLAKE)) {
5558 cnl_display_core_init(i915, resume);
5559 } else if (IS_GEN9_BC(i915)((0 + (&(i915)->__info)->gen == (9)) && !((
&(i915)->__info)->is_lp))
) {
5560 skl_display_core_init(i915, resume);
5561 } else if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
) {
5562 bxt_display_core_init(i915, resume);
5563 } else if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) {
5564 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5565 chv_phy_control_init(i915);
5566 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5567 assert_isp_power_gated(i915);
5568 } else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
5569 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5570 vlv_cmnlane_wa(i915);
5571 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5572 assert_ved_power_gated(i915);
5573 assert_isp_power_gated(i915);
5574 } else if (IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL) || IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL)) {
5575 hsw_assert_cdclk(i915);
5576 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)(((i915)->pch_type) == PCH_NOP));
5577 } else if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) {
5578 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)(((i915)->pch_type) == PCH_NOP));
5579 }
5580
5581 /*
5582 * Keep all power wells enabled for any dependent HW access during
5583 * initialization and to make sure we keep BIOS enabled display HW
5584 * resources powered until display HW readout is complete. We drop
5585 * this reference in intel_power_domains_enable().
5586 */
5587 power_domains->wakeref =
5588 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5589
5590 /* Disable power support if the user asked so. */
5591 if (!i915->params.disable_power_well)
5592 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5593 intel_power_domains_sync_hw(i915);
5594
5595 power_domains->initializing = false0;
5596}
5597
5598/**
5599 * intel_power_domains_driver_remove - deinitialize hw power domain state
5600 * @i915: i915 device instance
5601 *
5602 * De-initializes the display power domain HW state. It also ensures that the
5603 * device stays powered up so that the driver can be reloaded.
5604 *
5605 * It must be called with power domains already disabled (after a call to
5606 * intel_power_domains_disable()) and must be paired with
5607 * intel_power_domains_init_hw().
5608 */
5609void intel_power_domains_driver_remove(struct drm_i915_privateinteldrm_softc *i915)
5610{
5611 intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) =
5612 fetch_and_zero(&i915->power_domains.wakeref)({ typeof(*&i915->power_domains.wakeref) __T = *(&
i915->power_domains.wakeref); *(&i915->power_domains
.wakeref) = (typeof(*&i915->power_domains.wakeref))0; __T
; })
;
5613
5614 /* Remove the refcount we took to keep power well support disabled. */
5615 if (!i915->params.disable_power_well)
5616 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5617
5618 intel_display_power_flush_work_sync(i915);
5619
5620 intel_power_domains_verify_state(i915);
5621
5622 /* Keep the power well enabled, but cancel its rpm wakeref. */
5623 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5624}
5625
5626/**
5627 * intel_power_domains_enable - enable toggling of display power wells
5628 * @i915: i915 device instance
5629 *
5630 * Enable the ondemand enabling/disabling of the display power wells. Note that
5631 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5632 * only at specific points of the display modeset sequence, thus they are not
5633 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5634 * of these function is to keep the rest of power wells enabled until the end
5635 * of display HW readout (which will acquire the power references reflecting
5636 * the current HW state).
5637 */
5638void intel_power_domains_enable(struct drm_i915_privateinteldrm_softc *i915)
5639{
5640 intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) =
5641 fetch_and_zero(&i915->power_domains.wakeref)({ typeof(*&i915->power_domains.wakeref) __T = *(&
i915->power_domains.wakeref); *(&i915->power_domains
.wakeref) = (typeof(*&i915->power_domains.wakeref))0; __T
; })
;
5642
5643 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5644 intel_power_domains_verify_state(i915);
5645}
5646
5647/**
5648 * intel_power_domains_disable - disable toggling of display power wells
5649 * @i915: i915 device instance
5650 *
5651 * Disable the ondemand enabling/disabling of the display power wells. See
5652 * intel_power_domains_enable() for which power wells this call controls.
5653 */
5654void intel_power_domains_disable(struct drm_i915_privateinteldrm_softc *i915)
5655{
5656 struct i915_power_domains *power_domains = &i915->power_domains;
5657
5658 drm_WARN_ON(&i915->drm, power_domains->wakeref)({ int __ret = !!((power_domains->wakeref)); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&i915->drm))->
dev), "", "drm_WARN_ON(" "power_domains->wakeref" ")"); __builtin_expect
(!!(__ret), 0); })
;
5659 power_domains->wakeref =
5660 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5661
5662 intel_power_domains_verify_state(i915);
5663}
5664
5665/**
5666 * intel_power_domains_suspend - suspend power domain state
5667 * @i915: i915 device instance
5668 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5669 *
5670 * This function prepares the hardware power domain state before entering
5671 * system suspend.
5672 *
5673 * It must be called with power domains already disabled (after a call to
5674 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5675 */
5676void intel_power_domains_suspend(struct drm_i915_privateinteldrm_softc *i915,
5677 enum i915_drm_suspend_mode suspend_mode)
5678{
5679 struct i915_power_domains *power_domains = &i915->power_domains;
5680 intel_wakeref_t wakeref __maybe_unused__attribute__((__unused__)) =
5681 fetch_and_zero(&power_domains->wakeref)({ typeof(*&power_domains->wakeref) __T = *(&power_domains
->wakeref); *(&power_domains->wakeref) = (typeof(*&
power_domains->wakeref))0; __T; })
;
5682
5683 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5684
5685 /*
5686 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5687 * support don't manually deinit the power domains. This also means the
5688 * CSR/DMC firmware will stay active, it will power down any HW
5689 * resources as required and also enable deeper system power states
5690 * that would be blocked if the firmware was inactive.
5691 */
5692 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9(1 << 3)) &&
5693 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5694 i915->csr.dmc_payload) {
5695 intel_display_power_flush_work(i915);
5696 intel_power_domains_verify_state(i915);
5697 return;
5698 }
5699
5700 /*
5701 * Even if power well support was disabled we still want to disable
5702 * power wells if power domains must be deinitialized for suspend.
5703 */
5704 if (!i915->params.disable_power_well)
5705 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5706
5707 intel_display_power_flush_work(i915);
5708 intel_power_domains_verify_state(i915);
5709
5710 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11)
5711 icl_display_core_uninit(i915);
5712 else if (IS_CANNONLAKE(i915)IS_PLATFORM(i915, INTEL_CANNONLAKE))
5713 cnl_display_core_uninit(i915);
5714 else if (IS_GEN9_BC(i915)((0 + (&(i915)->__info)->gen == (9)) && !((
&(i915)->__info)->is_lp))
)
5715 skl_display_core_uninit(i915);
5716 else if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
)
5717 bxt_display_core_uninit(i915);
5718
5719 power_domains->display_core_suspended = true1;
5720}
5721
5722/**
5723 * intel_power_domains_resume - resume power domain state
5724 * @i915: i915 device instance
5725 *
5726 * This function resume the hardware power domain state during system resume.
5727 *
5728 * It will return with power domain support disabled (to be enabled later by
5729 * intel_power_domains_enable()) and must be paired with
5730 * intel_power_domains_suspend().
5731 */
5732void intel_power_domains_resume(struct drm_i915_privateinteldrm_softc *i915)
5733{
5734 struct i915_power_domains *power_domains = &i915->power_domains;
5735
5736 if (power_domains->display_core_suspended) {
5737 intel_power_domains_init_hw(i915, true1);
5738 power_domains->display_core_suspended = false0;
5739 } else {
5740 drm_WARN_ON(&i915->drm, power_domains->wakeref)({ int __ret = !!((power_domains->wakeref)); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&i915->drm))->
dev), "", "drm_WARN_ON(" "power_domains->wakeref" ")"); __builtin_expect
(!!(__ret), 0); })
;
5741 power_domains->wakeref =
5742 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5743 }
5744
5745 intel_power_domains_verify_state(i915);
5746}
5747
5748#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0
5749
5750static void intel_power_domains_dump_info(struct drm_i915_privateinteldrm_softc *i915)
5751{
5752 struct i915_power_domains *power_domains = &i915->power_domains;
5753 struct i915_power_well *power_well;
5754
5755 for_each_power_well(i915, power_well)for ((power_well) = (i915)->power_domains.power_wells; (power_well
) - (i915)->power_domains.power_wells < (i915)->power_domains
.power_well_count; (power_well)++)
{
5756 enum intel_display_power_domain domain;
5757
5758 drm_dbg(&i915->drm, "%-25s %d\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%-25s %d\n"
, power_well->desc->name, power_well->count)
5759 power_well->desc->name, power_well->count)drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "%-25s %d\n"
, power_well->desc->name, power_well->count)
;
5760
5761 for_each_power_domain(domain, power_well->desc->domains)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++
) if (!((1ULL << (domain)) & (power_well->desc->
domains))) {} else
5762 drm_dbg(&i915->drm, " %-23s %d\n",drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, " %-23s %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
5763 intel_display_power_domain_str(domain),drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, " %-23s %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
5764 power_domains->domain_use_count[domain])drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, " %-23s %d\n"
, intel_display_power_domain_str(domain), power_domains->domain_use_count
[domain])
;
5765 }
5766}
5767
5768/**
5769 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5770 * @i915: i915 device instance
5771 *
5772 * Verify if the reference count of each power well matches its HW enabled
5773 * state and the total refcount of the domains it belongs to. This must be
5774 * called after modeset HW state sanitization, which is responsible for
5775 * acquiring reference counts for any power wells in use and disabling the
5776 * ones left on by BIOS but not required by any active output.
5777 */
5778static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *i915)
5779{
5780 struct i915_power_domains *power_domains = &i915->power_domains;
5781 struct i915_power_well *power_well;
5782 bool_Bool dump_domain_info;
5783
5784 mutex_lock(&power_domains->lock)rw_enter_write(&power_domains->lock);
5785
5786 verify_async_put_domains_state(power_domains);
5787
5788 dump_domain_info = false0;
5789 for_each_power_well(i915, power_well)for ((power_well) = (i915)->power_domains.power_wells; (power_well
) - (i915)->power_domains.power_wells < (i915)->power_domains
.power_well_count; (power_well)++)
{
5790 enum intel_display_power_domain domain;
5791 int domains_count;
5792 bool_Bool enabled;
5793
5794 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5795 if ((power_well->count || power_well->desc->always_on) !=
5796 enabled)
5797 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , power_well
->desc->name, power_well->count, enabled)
5798 "power well %s state mismatch (refcount %d/enabled %d)",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , power_well
->desc->name, power_well->count, enabled)
5799 power_well->desc->name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , power_well
->desc->name, power_well->count, enabled)
5800 power_well->count, enabled)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s state mismatch (refcount %d/enabled %d)"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , power_well
->desc->name, power_well->count, enabled)
;
5801
5802 domains_count = 0;
5803 for_each_power_domain(domain, power_well->desc->domains)for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++
) if (!((1ULL << (domain)) & (power_well->desc->
domains))) {} else
5804 domains_count += power_domains->domain_use_count[domain];
5805
5806 if (power_well->count != domains_count) {
5807 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , power_well->desc->name, power_well->
count, domains_count)
5808 "power well %s refcount/domain refcount mismatch "printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , power_well->desc->name, power_well->
count, domains_count)
5809 "(refcount %d/domains refcount %d)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , power_well->desc->name, power_well->
count, domains_count)
5810 power_well->desc->name, power_well->count,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , power_well->desc->name, power_well->
count, domains_count)
5811 domains_count)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n", ({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , power_well->desc->name, power_well->
count, domains_count)
;
5812 dump_domain_info = true1;
5813 }
5814 }
5815
5816 if (dump_domain_info) {
5817 static bool_Bool dumped;
5818
5819 if (!dumped) {
5820 intel_power_domains_dump_info(i915);
5821 dumped = true1;
5822 }
5823 }
5824
5825 mutex_unlock(&power_domains->lock)rw_exit_write(&power_domains->lock);
5826}
5827
5828#else
5829
5830static void intel_power_domains_verify_state(struct drm_i915_privateinteldrm_softc *i915)
5831{
5832}
5833
5834#endif
5835
5836void intel_display_power_suspend_late(struct drm_i915_privateinteldrm_softc *i915)
5837{
5838 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11 || IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
)
5839 bxt_enable_dc9(i915);
5840 else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL))
5841 hsw_enable_pc8(i915);
5842}
5843
5844void intel_display_power_resume_early(struct drm_i915_privateinteldrm_softc *i915)
5845{
5846 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11 || IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
) {
5847 gen9_sanitize_dc_state(i915);
5848 bxt_disable_dc9(i915);
5849 } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) {
5850 hsw_disable_pc8(i915);
5851 }
5852}
5853
5854void intel_display_power_suspend(struct drm_i915_privateinteldrm_softc *i915)
5855{
5856 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11) {
5857 icl_display_core_uninit(i915);
5858 bxt_enable_dc9(i915);
5859 } else if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
) {
5860 bxt_display_core_uninit(i915);
5861 bxt_enable_dc9(i915);
5862 } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) {
5863 hsw_enable_pc8(i915);
5864 }
5865}
5866
5867void intel_display_power_resume(struct drm_i915_privateinteldrm_softc *i915)
5868{
5869 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11) {
1
Assuming field 'gen' is >= 11
2
Taking true branch
5870 bxt_disable_dc9(i915);
5871 icl_display_core_init(i915, true1);
3
Calling 'icl_display_core_init'
5872 if (i915->csr.dmc_payload) {
5873 if (i915->csr.allowed_dc_mask &
5874 DC_STATE_EN_UPTO_DC6(2 << 0))
5875 skl_enable_dc6(i915);
5876 else if (i915->csr.allowed_dc_mask &
5877 DC_STATE_EN_UPTO_DC5(1 << 0))
5878 gen9_enable_dc5(i915);
5879 }
5880 } else if (IS_GEN9_LP(i915)((0 + (&(i915)->__info)->gen == (9)) && ((&
(i915)->__info)->is_lp))
) {
5881 bxt_disable_dc9(i915);
5882 bxt_display_core_init(i915, true1);
5883 if (i915->csr.dmc_payload &&
5884 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5(1 << 0)))
5885 gen9_enable_dc5(i915);
5886 } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) {
5887 hsw_disable_pc8(i915);
5888 }
5889}

/usr/src/sys/dev/pci/drm/i915/display/intel_de.h

1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#ifndef __INTEL_DE_H__
7#define __INTEL_DE_H__
8
9#include "i915_drv.h"
10#include "i915_reg.h"
11#include "intel_uncore.h"
12
13static inline u32
14intel_de_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
15{
16 return intel_uncore_read(&i915->uncore, reg);
17}
18
19static inline void
20intel_de_posting_read(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
21{
22 intel_uncore_posting_read(&i915->uncore, reg)((void)intel_uncore_read_notrace(&i915->uncore, reg));
23}
24
25/* Note: read the warnings for intel_uncore_*_fw() functions! */
26static inline u32
27intel_de_read_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg)
28{
29 return intel_uncore_read_fw(&i915->uncore, reg)__raw_uncore_read32(&i915->uncore, reg);
30}
31
32static inline void
33intel_de_write(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val)
34{
35 intel_uncore_write(&i915->uncore, reg, val);
36}
37
38/* Note: read the warnings for intel_uncore_*_fw() functions! */
39static inline void
40intel_de_write_fw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 val)
41{
42 intel_uncore_write_fw(&i915->uncore, reg, val)__raw_uncore_write32(&i915->uncore, reg, val);
43}
44
45static inline void
46intel_de_rmw(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg, u32 clear, u32 set)
47{
48 intel_uncore_rmw(&i915->uncore, reg, clear, set);
11
Calling 'intel_uncore_rmw'
49}
50
51static inline int
52intel_de_wait_for_register(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
53 u32 mask, u32 value, unsigned int timeout)
54{
55 return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
56}
57
58static inline int
59intel_de_wait_for_set(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
60 u32 mask, unsigned int timeout)
61{
62 return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
63}
64
65static inline int
66intel_de_wait_for_clear(struct drm_i915_privateinteldrm_softc *i915, i915_reg_t reg,
67 u32 mask, unsigned int timeout)
68{
69 return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
70}
71
72#endif /* __INTEL_DE_H__ */

/usr/src/sys/dev/pci/drm/i915/intel_uncore.h

1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __INTEL_UNCORE_H__
26#define __INTEL_UNCORE_H__
27
28#include <linux/spinlock.h>
29#include <linux/notifier.h>
30#include <linux/hrtimer.h>
31#include <linux/io-64-nonatomic-lo-hi.h>
32
33#include "i915_reg.h"
34
35struct drm_i915_privateinteldrm_softc;
36struct intel_runtime_pm;
37struct intel_uncore;
38struct intel_gt;
39
40struct intel_uncore_mmio_debug {
41 spinlock_t lock; /** lock is also taken in irq contexts. */
42 int unclaimed_mmio_check;
43 int saved_mmio_check;
44 u32 suspend_count;
45};
46
47enum forcewake_domain_id {
48 FW_DOMAIN_ID_RENDER = 0,
49 FW_DOMAIN_ID_BLITTER,
50 FW_DOMAIN_ID_MEDIA,
51 FW_DOMAIN_ID_MEDIA_VDBOX0,
52 FW_DOMAIN_ID_MEDIA_VDBOX1,
53 FW_DOMAIN_ID_MEDIA_VDBOX2,
54 FW_DOMAIN_ID_MEDIA_VDBOX3,
55 FW_DOMAIN_ID_MEDIA_VEBOX0,
56 FW_DOMAIN_ID_MEDIA_VEBOX1,
57
58 FW_DOMAIN_ID_COUNT
59};
60
61enum forcewake_domains {
62 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER)(1UL << (FW_DOMAIN_ID_RENDER)),
63 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER)(1UL << (FW_DOMAIN_ID_BLITTER)),
64 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA)(1UL << (FW_DOMAIN_ID_MEDIA)),
65 FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX0)),
66 FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX1)),
67 FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX2)),
68 FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3)(1UL << (FW_DOMAIN_ID_MEDIA_VDBOX3)),
69 FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX0)),
70 FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1)(1UL << (FW_DOMAIN_ID_MEDIA_VEBOX1)),
71
72 FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT)(1UL << (FW_DOMAIN_ID_COUNT)) - 1
73};
74
75struct intel_uncore_funcs {
76 void (*force_wake_get)(struct intel_uncore *uncore,
77 enum forcewake_domains domains);
78 void (*force_wake_put)(struct intel_uncore *uncore,
79 enum forcewake_domains domains);
80
81 enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
82 i915_reg_t r);
83 enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
84 i915_reg_t r);
85
86 u8 (*mmio_readb)(struct intel_uncore *uncore,
87 i915_reg_t r, bool_Bool trace);
88 u16 (*mmio_readw)(struct intel_uncore *uncore,
89 i915_reg_t r, bool_Bool trace);
90 u32 (*mmio_readl)(struct intel_uncore *uncore,
91 i915_reg_t r, bool_Bool trace);
92 u64 (*mmio_readq)(struct intel_uncore *uncore,
93 i915_reg_t r, bool_Bool trace);
94
95 void (*mmio_writeb)(struct intel_uncore *uncore,
96 i915_reg_t r, u8 val, bool_Bool trace);
97 void (*mmio_writew)(struct intel_uncore *uncore,
98 i915_reg_t r, u16 val, bool_Bool trace);
99 void (*mmio_writel)(struct intel_uncore *uncore,
100 i915_reg_t r, u32 val, bool_Bool trace);
101};
102
103struct intel_forcewake_range {
104 u32 start;
105 u32 end;
106
107 enum forcewake_domains domains;
108};
109
110struct intel_uncore {
111 void __iomem *regs;
112
113 struct drm_i915_privateinteldrm_softc *i915;
114 struct intel_runtime_pm *rpm;
115
116 spinlock_t lock; /** lock is also taken in irq contexts. */
117
118 unsigned int flags;
119#define UNCORE_HAS_FORCEWAKE(1UL << (0)) BIT(0)(1UL << (0))
120#define UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1)) BIT(1)(1UL << (1))
121#define UNCORE_HAS_DBG_UNCLAIMED(1UL << (2)) BIT(2)(1UL << (2))
122#define UNCORE_HAS_FIFO(1UL << (3)) BIT(3)(1UL << (3))
123
124 const struct intel_forcewake_range *fw_domains_table;
125 unsigned int fw_domains_table_entries;
126
127 struct notifier_block pmic_bus_access_nb;
128 struct intel_uncore_funcs funcs;
129
130 unsigned int fifo_count;
131
132 enum forcewake_domains fw_domains;
133 enum forcewake_domains fw_domains_active;
134 enum forcewake_domains fw_domains_timer;
135 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
136
137 struct intel_uncore_forcewake_domain {
138 struct intel_uncore *uncore;
139 enum forcewake_domain_id id;
140 enum forcewake_domains mask;
141 unsigned int wake_count;
142 bool_Bool active;
143 struct timeout timer;
144 u32 __iomem *reg_set;
145 u32 __iomem *reg_ack;
146 } *fw_domain[FW_DOMAIN_ID_COUNT];
147
148 unsigned int user_forcewake_count;
149
150 struct intel_uncore_mmio_debug *debug;
151};
152
153/* Iterate over initialised fw domains */
154#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__)for (tmp__ = (mask__); tmp__ ;) if (!(domain__ = (uncore__)->
fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__ &= ~(1UL <<
(__idx)); __idx; })])) {} else
\
155 for (tmp__ = (mask__); tmp__ ;) \
156 for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])if (!(domain__ = (uncore__)->fw_domain[({ int __idx = ffs(
tmp__) - 1; tmp__ &= ~(1UL << (__idx)); __idx; })])
) {} else
157
158#define for_each_fw_domain(domain__, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__
= (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__
&= ~(1UL << (__idx)); __idx; })])) {} else
\
159 for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)for (tmp__ = ((uncore__)->fw_domains); tmp__ ;) if (!(domain__
= (uncore__)->fw_domain[({ int __idx = ffs(tmp__) - 1; tmp__
&= ~(1UL << (__idx)); __idx; })])) {} else
160
161static inline bool_Bool
162intel_uncore_has_forcewake(const struct intel_uncore *uncore)
163{
164 return uncore->flags & UNCORE_HAS_FORCEWAKE(1UL << (0));
165}
166
167static inline bool_Bool
168intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
169{
170 return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1));
171}
172
173static inline bool_Bool
174intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
175{
176 return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED(1UL << (2));
177}
178
179static inline bool_Bool
180intel_uncore_has_fifo(const struct intel_uncore *uncore)
181{
182 return uncore->flags & UNCORE_HAS_FIFO(1UL << (3));
183}
184
185void
186intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
187void intel_uncore_init_early(struct intel_uncore *uncore,
188 struct drm_i915_privateinteldrm_softc *i915);
189int intel_uncore_init_mmio(struct intel_uncore *uncore);
190void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
191 struct intel_gt *gt);
192bool_Bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
193bool_Bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
194void intel_uncore_fini_mmio(struct intel_uncore *uncore);
195void intel_uncore_suspend(struct intel_uncore *uncore);
196void intel_uncore_resume_early(struct intel_uncore *uncore);
197void intel_uncore_runtime_resume(struct intel_uncore *uncore);
198
199void assert_forcewakes_inactive(struct intel_uncore *uncore);
200void assert_forcewakes_active(struct intel_uncore *uncore,
201 enum forcewake_domains fw_domains);
202const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
203
204enum forcewake_domains
205intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
206 i915_reg_t reg, unsigned int op);
207#define FW_REG_READ(1) (1)
208#define FW_REG_WRITE(2) (2)
209
210void intel_uncore_forcewake_get(struct intel_uncore *uncore,
211 enum forcewake_domains domains);
212void intel_uncore_forcewake_put(struct intel_uncore *uncore,
213 enum forcewake_domains domains);
214void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
215 enum forcewake_domains fw_domains);
216
217/*
218 * Like above but the caller must manage the uncore.lock itself.
219 * Must be used with I915_READ_FW and friends.
220 */
221void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
222 enum forcewake_domains domains);
223void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
224 enum forcewake_domains domains);
225
226void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
227void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
228
229int __intel_wait_for_register(struct intel_uncore *uncore,
230 i915_reg_t reg,
231 u32 mask,
232 u32 value,
233 unsigned int fast_timeout_us,
234 unsigned int slow_timeout_ms,
235 u32 *out_value);
236static inline int
237intel_wait_for_register(struct intel_uncore *uncore,
238 i915_reg_t reg,
239 u32 mask,
240 u32 value,
241 unsigned int timeout_ms)
242{
243 return __intel_wait_for_register(uncore, reg, mask, value, 2,
244 timeout_ms, NULL((void *)0));
245}
246
247int __intel_wait_for_register_fw(struct intel_uncore *uncore,
248 i915_reg_t reg,
249 u32 mask,
250 u32 value,
251 unsigned int fast_timeout_us,
252 unsigned int slow_timeout_ms,
253 u32 *out_value);
254static inline int
255intel_wait_for_register_fw(struct intel_uncore *uncore,
256 i915_reg_t reg,
257 u32 mask,
258 u32 value,
259 unsigned int timeout_ms)
260{
261 return __intel_wait_for_register_fw(uncore, reg, mask, value,
262 2, timeout_ms, NULL((void *)0));
263}
264
265/* register access functions */
266#define __raw_read(x__, s__) \
267static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
268 i915_reg_t reg) \
269{ \
270 return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
271}
272
273#define __raw_write(x__, s__) \
274static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
275 i915_reg_t reg, u##x__ val) \
276{ \
277 write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
278}
279__raw_read(8, b)
280__raw_read(16, w)
281__raw_read(32, l)
282__raw_read(64, q)
283
284__raw_write(8, b)
285__raw_write(16, w)
286__raw_write(32, l)
287__raw_write(64, q)
288
289#undef __raw_read
290#undef __raw_write
291
292#define __uncore_read(name__, x__, s__, trace__) \
293static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
294 i915_reg_t reg) \
295{ \
296 return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
297}
298
299#define __uncore_write(name__, x__, s__, trace__) \
300static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
301 i915_reg_t reg, u##x__ val) \
302{ \
303 uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
304}
305
306__uncore_read(read8, 8, b, true1)
307__uncore_read(read16, 16, w, true1)
308__uncore_read(read, 32, l, true1)
13
Passed-by-value struct argument contains uninitialized data (e.g., field: 'reg')
309__uncore_read(read16_notrace, 16, w, false0)
310__uncore_read(read_notrace, 32, l, false0)
311
312__uncore_write(write8, 8, b, true1)
313__uncore_write(write16, 16, w, true1)
314__uncore_write(write, 32, l, true1)
315__uncore_write(write_notrace, 32, l, false0)
316
317/* Be very careful with read/write 64-bit values. On 32-bit machines, they
318 * will be implemented using 2 32-bit writes in an arbitrary order with
319 * an arbitrary delay between them. This can cause the hardware to
320 * act upon the intermediate value, possibly leading to corruption and
321 * machine death. For this reason we do not support I915_WRITE64, or
322 * uncore->funcs.mmio_writeq.
323 *
324 * When reading a 64-bit value as two 32-bit values, the delay may cause
325 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
326 * occasionally a 64-bit register does not actually support a full readq
327 * and must be read using two 32-bit reads.
328 *
329 * You have been warned.
330 */
331__uncore_read(read64, 64, q, true1)
332
333static inline u64
334intel_uncore_read64_2x32(struct intel_uncore *uncore,
335 i915_reg_t lower_reg, i915_reg_t upper_reg)
336{
337 u32 upper, lower, old_upper, loop = 0;
338 upper = intel_uncore_read(uncore, upper_reg);
339 do {
340 old_upper = upper;
341 lower = intel_uncore_read(uncore, lower_reg);
342 upper = intel_uncore_read(uncore, upper_reg);
343 } while (upper != old_upper && loop++ < 2);
344 return (u64)upper << 32 | lower;
345}
346
347#define intel_uncore_posting_read(...)((void)intel_uncore_read_notrace(...)) ((void)intel_uncore_read_notrace(__VA_ARGS__))
348#define intel_uncore_posting_read16(...)((void)intel_uncore_read16_notrace(...)) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
349
350#undef __uncore_read
351#undef __uncore_write
352
353/* These are untraced mmio-accessors that are only valid to be used inside
354 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
355 * controlled.
356 *
357 * Think twice, and think again, before using these.
358 *
359 * As an example, these accessors can possibly be used between:
360 *
361 * spin_lock_irq(&uncore->lock);
362 * intel_uncore_forcewake_get__locked();
363 *
364 * and
365 *
366 * intel_uncore_forcewake_put__locked();
367 * spin_unlock_irq(&uncore->lock);
368 *
369 *
370 * Note: some registers may not need forcewake held, so
371 * intel_uncore_forcewake_{get,put} can be omitted, see
372 * intel_uncore_forcewake_for_reg().
373 *
374 * Certain architectures will die if the same cacheline is concurrently accessed
375 * by different clients (e.g. on Ivybridge). Access to registers should
376 * therefore generally be serialised, by either the dev_priv->uncore.lock or
377 * a more localised lock guarding all access to that bank of registers.
378 */
379#define intel_uncore_read_fw(...)__raw_uncore_read32(...) __raw_uncore_read32(__VA_ARGS__)
380#define intel_uncore_write_fw(...)__raw_uncore_write32(...) __raw_uncore_write32(__VA_ARGS__)
381#define intel_uncore_write64_fw(...)__raw_uncore_write64(...) __raw_uncore_write64(__VA_ARGS__)
382#define intel_uncore_posting_read_fw(...)((void)__raw_uncore_read32(...)) ((void)intel_uncore_read_fw(__VA_ARGS__)__raw_uncore_read32(__VA_ARGS__))
383
384static inline void intel_uncore_rmw(struct intel_uncore *uncore,
385 i915_reg_t reg, u32 clear, u32 set)
386{
387 u32 old, val;
388
389 old = intel_uncore_read(uncore, reg);
12
Calling 'intel_uncore_read'
390 val = (old & ~clear) | set;
391 if (val != old)
392 intel_uncore_write(uncore, reg, val);
393}
394
395static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
396 i915_reg_t reg, u32 clear, u32 set)
397{
398 u32 old, val;
399
400 old = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg);
401 val = (old & ~clear) | set;
402 if (val != old)
403 intel_uncore_write_fw(uncore, reg, val)__raw_uncore_write32(uncore, reg, val);
404}
405
406static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
407 i915_reg_t reg, u32 val,
408 u32 mask, u32 expected_val)
409{
410 u32 reg_val;
411
412 intel_uncore_write(uncore, reg, val);
413 reg_val = intel_uncore_read(uncore, reg);
414
415 return (reg_val & mask) != expected_val ? -EINVAL22 : 0;
416}
417
418#define raw_reg_read(base, reg)ioread32(base + i915_mmio_reg_offset(reg)) \
419 readl(base + i915_mmio_reg_offset(reg))ioread32(base + i915_mmio_reg_offset(reg))
420#define raw_reg_write(base, reg, value)iowrite32(value, base + i915_mmio_reg_offset(reg)) \
421 writel(value, base + i915_mmio_reg_offset(reg))iowrite32(value, base + i915_mmio_reg_offset(reg))
422
423#endif /* !__INTEL_UNCORE_H__ */