File: | dev/pci/drm/i915/display/intel_psr.c |
Warning: | line 1756, column 2 Value stored to 'intel_connector' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright © 2014 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include <drm/drm_atomic_helper.h> |
25 | |
26 | #include "display/intel_dp.h" |
27 | |
28 | #include "i915_drv.h" |
29 | #include "intel_atomic.h" |
30 | #include "intel_display_types.h" |
31 | #include "intel_psr.h" |
32 | #include "intel_sprite.h" |
33 | #include "intel_hdmi.h" |
34 | |
35 | /** |
36 | * DOC: Panel Self Refresh (PSR/SRD) |
37 | * |
38 | * Since Haswell Display controller supports Panel Self-Refresh on display |
39 | * panels witch have a remote frame buffer (RFB) implemented according to PSR |
40 | * spec in eDP1.3. PSR feature allows the display to go to lower standby states |
41 | * when system is idle but display is on as it eliminates display refresh |
42 | * request to DDR memory completely as long as the frame buffer for that |
43 | * display is unchanged. |
44 | * |
45 | * Panel Self Refresh must be supported by both Hardware (source) and |
46 | * Panel (sink). |
47 | * |
48 | * PSR saves power by caching the framebuffer in the panel RFB, which allows us |
49 | * to power down the link and memory controller. For DSI panels the same idea |
50 | * is called "manual mode". |
51 | * |
52 | * The implementation uses the hardware-based PSR support which automatically |
53 | * enters/exits self-refresh mode. The hardware takes care of sending the |
54 | * required DP aux message and could even retrain the link (that part isn't |
55 | * enabled yet though). The hardware also keeps track of any frontbuffer |
56 | * changes to know when to exit self-refresh mode again. Unfortunately that |
57 | * part doesn't work too well, hence why the i915 PSR support uses the |
58 | * software frontbuffer tracking to make sure it doesn't miss a screen |
59 | * update. For this integration intel_psr_invalidate() and intel_psr_flush() |
60 | * get called by the frontbuffer tracking code. Note that because of locking |
61 | * issues the self-refresh re-enable code is done from a work queue, which |
62 | * must be correctly synchronized/cancelled when shutting down the pipe." |
63 | * |
64 | * DC3CO (DC3 clock off) |
65 | * |
66 | * On top of PSR2, GEN12 adds a intermediate power savings state that turns |
67 | * clock off automatically during PSR2 idle state. |
68 | * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep |
69 | * entry/exit allows the HW to enter a low-power state even when page flipping |
70 | * periodically (for instance a 30fps video playback scenario). |
71 | * |
72 | * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), |
73 | * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 |
74 | * frames, if no other flip occurs and the function above is executed, DC3CO is |
75 | * disabled and PSR2 is configured to enter deep sleep, resetting again in case |
76 | * of another flip. |
77 | * Front buffer modifications do not trigger DC3CO activation on purpose as it |
78 | * would bring a lot of complexity and most of the moderns systems will only |
79 | * use page flips. |
80 | */ |
81 | |
82 | static bool_Bool psr_global_enabled(struct drm_i915_privateinteldrm_softc *i915) |
83 | { |
84 | switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f) { |
85 | case I915_PSR_DEBUG_DEFAULT0x00: |
86 | return i915->params.enable_psr; |
87 | case I915_PSR_DEBUG_DISABLE0x01: |
88 | return false0; |
89 | default: |
90 | return true1; |
91 | } |
92 | } |
93 | |
94 | static bool_Bool intel_psr2_enabled(struct drm_i915_privateinteldrm_softc *dev_priv, |
95 | const struct intel_crtc_state *crtc_state) |
96 | { |
97 | /* Cannot enable DSC and PSR2 simultaneously */ |
98 | drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&({ int __ret = !!((crtc_state->dsc.compression_enable && crtc_state->has_psr2)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "crtc_state->dsc.compression_enable && crtc_state->has_psr2" ")"); __builtin_expect(!!(__ret), 0); }) |
99 | crtc_state->has_psr2)({ int __ret = !!((crtc_state->dsc.compression_enable && crtc_state->has_psr2)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "crtc_state->dsc.compression_enable && crtc_state->has_psr2" ")"); __builtin_expect(!!(__ret), 0); }); |
100 | |
101 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f) { |
102 | case I915_PSR_DEBUG_DISABLE0x01: |
103 | case I915_PSR_DEBUG_FORCE_PSR10x03: |
104 | return false0; |
105 | default: |
106 | return crtc_state->has_psr2; |
107 | } |
108 | } |
109 | |
110 | static void psr_irq_control(struct drm_i915_privateinteldrm_softc *dev_priv) |
111 | { |
112 | enum transcoder trans_shift; |
113 | u32 mask, val; |
114 | i915_reg_t imr_reg; |
115 | |
116 | /* |
117 | * gen12+ has registers relative to transcoder and one per transcoder |
118 | * using the same bit definition: handle it as TRANSCODER_EDP to force |
119 | * 0 shift in bit definition |
120 | */ |
121 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { |
122 | trans_shift = 0; |
123 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60814) + ( (&(dev_priv)->__info)->display_mmio_offset))) }); |
124 | } else { |
125 | trans_shift = dev_priv->psr.transcoder; |
126 | imr_reg = EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) }); |
127 | } |
128 | |
129 | mask = EDP_PSR_ERROR(trans_shift)(0x4 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8)); |
130 | if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ0x10) |
131 | mask |= EDP_PSR_POST_EXIT(trans_shift)(0x2 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8)) | |
132 | EDP_PSR_PRE_ENTRY(trans_shift)(0x1 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8)); |
133 | |
134 | /* Warning: it is masking/setting reserved bits too */ |
135 | val = intel_de_read(dev_priv, imr_reg); |
136 | val &= ~EDP_PSR_TRANS_MASK(trans_shift)(0x7 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8)); |
137 | val |= ~mask; |
138 | intel_de_write(dev_priv, imr_reg, val); |
139 | } |
140 | |
141 | static void psr_event_print(struct drm_i915_privateinteldrm_softc *i915, |
142 | u32 val, bool_Bool psr2_enabled) |
143 | { |
144 | drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val)drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "PSR exit events: 0x%x\n" , val); |
145 | if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE(1 << 17)) |
146 | drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPSR2 watchdog timer expired\n" ); |
147 | if ((val & PSR_EVENT_PSR2_DISABLED(1 << 16)) && psr2_enabled) |
148 | drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPSR2 disabled\n" ); |
149 | if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN(1 << 15)) |
150 | drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tSU dirty FIFO underrun\n" ); |
151 | if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN(1 << 14)) |
152 | drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tSU CRC FIFO underrun\n" ); |
153 | if (val & PSR_EVENT_GRAPHICS_RESET(1 << 12)) |
154 | drm_dbg_kms(&i915->drm, "\tGraphics reset\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tGraphics reset\n" ); |
155 | if (val & PSR_EVENT_PCH_INTERRUPT(1 << 11)) |
156 | drm_dbg_kms(&i915->drm, "\tPCH interrupt\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPCH interrupt\n" ); |
157 | if (val & PSR_EVENT_MEMORY_UP(1 << 10)) |
158 | drm_dbg_kms(&i915->drm, "\tMemory up\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tMemory up\n" ); |
159 | if (val & PSR_EVENT_FRONT_BUFFER_MODIFY(1 << 9)) |
160 | drm_dbg_kms(&i915->drm, "\tFront buffer modification\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tFront buffer modification\n" ); |
161 | if (val & PSR_EVENT_WD_TIMER_EXPIRE(1 << 8)) |
162 | drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPSR watchdog timer expired\n" ); |
163 | if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE(1 << 6)) |
164 | drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPIPE registers updated\n" ); |
165 | if (val & PSR_EVENT_REGISTER_UPDATE(1 << 5)) |
166 | drm_dbg_kms(&i915->drm, "\tRegister updated\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tRegister updated\n" ); |
167 | if (val & PSR_EVENT_HDCP_ENABLE(1 << 4)) |
168 | drm_dbg_kms(&i915->drm, "\tHDCP enabled\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tHDCP enabled\n" ); |
169 | if (val & PSR_EVENT_KVMR_SESSION_ENABLE(1 << 3)) |
170 | drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tKVMR session enabled\n" ); |
171 | if (val & PSR_EVENT_VBI_ENABLE(1 << 2)) |
172 | drm_dbg_kms(&i915->drm, "\tVBI enabled\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tVBI enabled\n" ); |
173 | if (val & PSR_EVENT_LPSP_MODE_EXIT(1 << 1)) |
174 | drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tLPSP mode exited\n" ); |
175 | if ((val & PSR_EVENT_PSR_DISABLE(1 << 0)) && !psr2_enabled) |
176 | drm_dbg_kms(&i915->drm, "\tPSR disabled\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "\tPSR disabled\n" ); |
177 | } |
178 | |
179 | void intel_psr_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 psr_iir) |
180 | { |
181 | enum transcoder cpu_transcoder = dev_priv->psr.transcoder; |
182 | enum transcoder trans_shift; |
183 | i915_reg_t imr_reg; |
184 | ktime_t time_ns = ktime_get(); |
185 | |
186 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { |
187 | trans_shift = 0; |
188 | imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60814) + ( (&(dev_priv)->__info)->display_mmio_offset))) }); |
189 | } else { |
190 | trans_shift = dev_priv->psr.transcoder; |
191 | imr_reg = EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) }); |
192 | } |
193 | |
194 | if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)(0x1 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8))) { |
195 | dev_priv->psr.last_entry_attempt = time_ns; |
196 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n" , transcoder_name(cpu_transcoder)) |
197 | "[transcoder %s] PSR entry attempt in 2 vblanks\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n" , transcoder_name(cpu_transcoder)) |
198 | transcoder_name(cpu_transcoder))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n" , transcoder_name(cpu_transcoder)); |
199 | } |
200 | |
201 | if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)(0x2 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8))) { |
202 | dev_priv->psr.last_exit = time_ns; |
203 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR exit completed\n" , transcoder_name(cpu_transcoder)) |
204 | "[transcoder %s] PSR exit completed\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR exit completed\n" , transcoder_name(cpu_transcoder)) |
205 | transcoder_name(cpu_transcoder))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "[transcoder %s] PSR exit completed\n" , transcoder_name(cpu_transcoder)); |
206 | |
207 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) { |
208 | u32 val = intel_de_read(dev_priv, |
209 | PSR_EVENT(cpu_transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(cpu_transcoder)] - (&(dev_priv)->__info )->trans_offsets[TRANSCODER_A] + (0x60848) + ((&(dev_priv )->__info)->display_mmio_offset))) })); |
210 | bool_Bool psr2_enabled = dev_priv->psr.psr2_enabled; |
211 | |
212 | intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(cpu_transcoder)] - (&(dev_priv)->__info )->trans_offsets[TRANSCODER_A] + (0x60848) + ((&(dev_priv )->__info)->display_mmio_offset))) }), |
213 | val); |
214 | psr_event_print(dev_priv, val, psr2_enabled); |
215 | } |
216 | } |
217 | |
218 | if (psr_iir & EDP_PSR_ERROR(trans_shift)(0x4 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8))) { |
219 | u32 val; |
220 | |
221 | drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",printf("drm:pid%d:%s *WARNING* " "[drm] " "[transcoder %s] PSR aux error\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , transcoder_name (cpu_transcoder)) |
222 | transcoder_name(cpu_transcoder))printf("drm:pid%d:%s *WARNING* " "[drm] " "[transcoder %s] PSR aux error\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , transcoder_name (cpu_transcoder)); |
223 | |
224 | dev_priv->psr.irq_aux_error = true1; |
225 | |
226 | /* |
227 | * If this interruption is not masked it will keep |
228 | * interrupting so fast that it prevents the scheduled |
229 | * work to run. |
230 | * Also after a PSR error, we don't want to arm PSR |
231 | * again so we don't care about unmask the interruption |
232 | * or unset irq_aux_error. |
233 | */ |
234 | val = intel_de_read(dev_priv, imr_reg); |
235 | val |= EDP_PSR_ERROR(trans_shift)(0x4 << ((trans_shift) == TRANSCODER_EDP ? 0 : ((trans_shift ) - TRANSCODER_A + 1) * 8)); |
236 | intel_de_write(dev_priv, imr_reg, val); |
237 | |
238 | schedule_work(&dev_priv->psr.work); |
239 | } |
240 | } |
241 | |
242 | static bool_Bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) |
243 | { |
244 | u8 alpm_caps = 0; |
245 | |
246 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP0x02e, |
247 | &alpm_caps) != 1) |
248 | return false0; |
249 | return alpm_caps & DP_ALPM_CAP(1 << 0); |
250 | } |
251 | |
252 | static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) |
253 | { |
254 | struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp); |
255 | u8 val = 8; /* assume the worst if we can't read the value */ |
256 | |
257 | if (drm_dp_dpcd_readb(&intel_dp->aux, |
258 | DP_SYNCHRONIZATION_LATENCY_IN_SINK0x2009, &val) == 1) |
259 | val &= DP_MAX_RESYNC_FRAME_COUNT_MASK(0xf << 0); |
260 | else |
261 | drm_dbg_kms(&i915->drm,drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "Unable to get sink synchronization latency, assuming 8 frames\n" ) |
262 | "Unable to get sink synchronization latency, assuming 8 frames\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "Unable to get sink synchronization latency, assuming 8 frames\n" ); |
263 | return val; |
264 | } |
265 | |
266 | static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp) |
267 | { |
268 | struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp); |
269 | u16 val; |
270 | ssize_t r; |
271 | |
272 | /* |
273 | * Returning the default X granularity if granularity not required or |
274 | * if DPCD read fails |
275 | */ |
276 | if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED(1 << 5))) |
277 | return 4; |
278 | |
279 | r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY0x072, &val, 2); |
280 | if (r != 2) |
281 | drm_dbg_kms(&i915->drm,drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "Unable to read DP_PSR2_SU_X_GRANULARITY\n" ) |
282 | "Unable to read DP_PSR2_SU_X_GRANULARITY\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_KMS, "Unable to read DP_PSR2_SU_X_GRANULARITY\n" ); |
283 | |
284 | /* |
285 | * Spec says that if the value read is 0 the default granularity should |
286 | * be used instead. |
287 | */ |
288 | if (r != 2 || val == 0) |
289 | val = 4; |
290 | |
291 | return val; |
292 | } |
293 | |
294 | void intel_psr_init_dpcd(struct intel_dp *intel_dp) |
295 | { |
296 | struct drm_i915_privateinteldrm_softc *dev_priv = |
297 | to_i915(dp_to_dig_port(intel_dp)->base.base.dev); |
298 | |
299 | if (dev_priv->psr.dp) { |
300 | drm_warn(&dev_priv->drm,printf("drm:pid%d:%s *WARNING* " "[drm] " "More than one eDP panel found, PSR support should be extended\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
301 | "More than one eDP panel found, PSR support should be extended\n")printf("drm:pid%d:%s *WARNING* " "[drm] " "More than one eDP panel found, PSR support should be extended\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
302 | return; |
303 | } |
304 | |
305 | drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT0x070, intel_dp->psr_dpcd, |
306 | sizeof(intel_dp->psr_dpcd)); |
307 | |
308 | if (!intel_dp->psr_dpcd[0]) |
309 | return; |
310 | drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "eDP panel supports PSR version %x\n" , intel_dp->psr_dpcd[0]) |
311 | intel_dp->psr_dpcd[0])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "eDP panel supports PSR version %x\n" , intel_dp->psr_dpcd[0]); |
312 | |
313 | if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) { |
314 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR support not currently available for this panel\n" ) |
315 | "PSR support not currently available for this panel\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR support not currently available for this panel\n" ); |
316 | return; |
317 | } |
318 | |
319 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP(1 << 7))) { |
320 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Panel lacks power state control, PSR cannot be enabled\n" ) |
321 | "Panel lacks power state control, PSR cannot be enabled\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Panel lacks power state control, PSR cannot be enabled\n" ); |
322 | return; |
323 | } |
324 | |
325 | dev_priv->psr.sink_support = true1; |
326 | dev_priv->psr.sink_sync_latency = |
327 | intel_dp_get_sink_sync_latency(intel_dp); |
328 | |
329 | dev_priv->psr.dp = intel_dp; |
330 | |
331 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9 && |
332 | (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED3)) { |
333 | bool_Bool y_req = intel_dp->psr_dpcd[1] & |
334 | DP_PSR2_SU_Y_COORDINATE_REQUIRED(1 << 4); |
335 | bool_Bool alpm = intel_dp_get_alpm_status(intel_dp); |
336 | |
337 | /* |
338 | * All panels that supports PSR version 03h (PSR2 + |
339 | * Y-coordinate) can handle Y-coordinates in VSC but we are |
340 | * only sure that it is going to be used when required by the |
341 | * panel. This way panel is capable to do selective update |
342 | * without a aux frame sync. |
343 | * |
344 | * To support PSR version 02h and PSR version 03h without |
345 | * Y-coordinate requirement panels we would need to enable |
346 | * GTC first. |
347 | */ |
348 | dev_priv->psr.sink_psr2_support = y_req && alpm; |
349 | drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 %ssupported\n" , dev_priv->psr.sink_psr2_support ? "" : "not ") |
350 | dev_priv->psr.sink_psr2_support ? "" : "not ")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 %ssupported\n" , dev_priv->psr.sink_psr2_support ? "" : "not "); |
351 | |
352 | if (dev_priv->psr.sink_psr2_support) { |
353 | dev_priv->psr.colorimetry_support = |
354 | intel_dp_get_colorimetry_status(intel_dp); |
355 | dev_priv->psr.su_x_granularity = |
356 | intel_dp_get_su_x_granulartiy(intel_dp); |
357 | } |
358 | } |
359 | } |
360 | |
361 | static void hsw_psr_setup_aux(struct intel_dp *intel_dp) |
362 | { |
363 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
364 | u32 aux_clock_divider, aux_ctl; |
365 | int i; |
366 | static const u8 aux_msg[] = { |
367 | [0] = DP_AUX_NATIVE_WRITE0x8 << 4, |
368 | [1] = DP_SET_POWER0x600 >> 8, |
369 | [2] = DP_SET_POWER0x600 & 0xff, |
370 | [3] = 1 - 1, |
371 | [4] = DP_SET_POWER_D00x1, |
372 | }; |
373 | u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK(3 << 26) | |
374 | EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK(0x1f << 20) | |
375 | EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK(0xf << 16) | |
376 | EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK(0x7ff); |
377 | |
378 | BUILD_BUG_ON(sizeof(aux_msg) > 20)extern char _ctassert[(!(sizeof(aux_msg) > 20)) ? 1 : -1 ] __attribute__((__unused__)); |
379 | for (i = 0; i < sizeof(aux_msg); i += 4) |
380 | intel_de_write(dev_priv, |
381 | EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60814) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust) + (i >> 2) + 4) }), |
382 | intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); |
383 | |
384 | aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); |
385 | |
386 | /* Start with bits set for DDI_AUX_CTL register */ |
387 | aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), |
388 | aux_clock_divider); |
389 | |
390 | /* Select only valid bits for SRD_AUX_CTL */ |
391 | aux_ctl &= psr_aux_mask; |
392 | intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60810) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), |
393 | aux_ctl); |
394 | } |
395 | |
396 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) |
397 | { |
398 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
399 | u8 dpcd_val = DP_PSR_ENABLE(1 << 0); |
400 | |
401 | /* Enable ALPM at sink for psr2 */ |
402 | if (dev_priv->psr.psr2_enabled) { |
403 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG0x116, |
404 | DP_ALPM_ENABLE(1 << 0) | |
405 | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE(1 << 1)); |
406 | |
407 | dpcd_val |= DP_PSR_ENABLE_PSR2(1 << 6) | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS(1 << 5); |
408 | } else { |
409 | if (dev_priv->psr.link_standby) |
410 | dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE(1 << 1); |
411 | |
412 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) |
413 | dpcd_val |= DP_PSR_CRC_VERIFICATION(1 << 2); |
414 | } |
415 | |
416 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG0x170, dpcd_val); |
417 | |
418 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER0x600, DP_SET_POWER_D00x1); |
419 | } |
420 | |
421 | static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) |
422 | { |
423 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
424 | u32 val = 0; |
425 | |
426 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 11) |
427 | val |= EDP_PSR_TP4_TIME_0US(3 << 6); |
428 | |
429 | if (dev_priv->params.psr_safest_params) { |
430 | val |= EDP_PSR_TP1_TIME_2500us(2 << 4); |
431 | val |= EDP_PSR_TP2_TP3_TIME_2500us(2 << 8); |
432 | goto check_tp3_sel; |
433 | } |
434 | |
435 | if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) |
436 | val |= EDP_PSR_TP1_TIME_0us(3 << 4); |
437 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) |
438 | val |= EDP_PSR_TP1_TIME_100us(1 << 4); |
439 | else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) |
440 | val |= EDP_PSR_TP1_TIME_500us(0 << 4); |
441 | else |
442 | val |= EDP_PSR_TP1_TIME_2500us(2 << 4); |
443 | |
444 | if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) |
445 | val |= EDP_PSR_TP2_TP3_TIME_0us(3 << 8); |
446 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) |
447 | val |= EDP_PSR_TP2_TP3_TIME_100us(1 << 8); |
448 | else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) |
449 | val |= EDP_PSR_TP2_TP3_TIME_500us(0 << 8); |
450 | else |
451 | val |= EDP_PSR_TP2_TP3_TIME_2500us(2 << 8); |
452 | |
453 | check_tp3_sel: |
454 | if (intel_dp_source_supports_hbr2(intel_dp) && |
455 | drm_dp_tps3_supported(intel_dp->dpcd)) |
456 | val |= EDP_PSR_TP1_TP3_SEL(1 << 11); |
457 | else |
458 | val |= EDP_PSR_TP1_TP2_SEL(0 << 11); |
459 | |
460 | return val; |
461 | } |
462 | |
463 | static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) |
464 | { |
465 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
466 | int idle_frames; |
467 | |
468 | /* Let's use 6 as the minimum to cover all known cases including the |
469 | * off-by-one issue that HW has in some cases. |
470 | */ |
471 | idle_frames = max(6, dev_priv->vbt.psr.idle_frames)(((6)>(dev_priv->vbt.psr.idle_frames))?(6):(dev_priv-> vbt.psr.idle_frames)); |
472 | idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1)(((idle_frames)>(dev_priv->psr.sink_sync_latency + 1))? (idle_frames):(dev_priv->psr.sink_sync_latency + 1)); |
473 | |
474 | if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)({ int __ret = !!((idle_frames > 0xf)); if (__ret) printf( "%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "idle_frames > 0xf" ")"); __builtin_expect (!!(__ret), 0); })) |
475 | idle_frames = 0xf; |
476 | |
477 | return idle_frames; |
478 | } |
479 | |
480 | static void hsw_activate_psr1(struct intel_dp *intel_dp) |
481 | { |
482 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
483 | u32 max_sleep_time = 0x1f; |
484 | u32 val = EDP_PSR_ENABLE(1 << 31); |
485 | |
486 | val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT0; |
487 | |
488 | val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT20; |
489 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) |
490 | val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES(0 << 25); |
491 | |
492 | if (dev_priv->psr.link_standby) |
493 | val |= EDP_PSR_LINK_STANDBY(1 << 27); |
494 | |
495 | val |= intel_psr1_get_tp_time(intel_dp); |
496 | |
497 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 8) |
498 | val |= EDP_PSR_CRC_ENABLE(1 << 10); |
499 | |
500 | val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) })) & |
501 | EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK(1 << 29)); |
502 | intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), val); |
503 | } |
504 | |
505 | static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) |
506 | { |
507 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
508 | u32 val = 0; |
509 | |
510 | if (dev_priv->params.psr_safest_params) |
511 | return EDP_PSR2_TP2_TIME_2500us(2 << 8); |
512 | |
513 | if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && |
514 | dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) |
515 | val |= EDP_PSR2_TP2_TIME_50us(3 << 8); |
516 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) |
517 | val |= EDP_PSR2_TP2_TIME_100us(1 << 8); |
518 | else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) |
519 | val |= EDP_PSR2_TP2_TIME_500us(0 << 8); |
520 | else |
521 | val |= EDP_PSR2_TP2_TIME_2500us(2 << 8); |
522 | |
523 | return val; |
524 | } |
525 | |
526 | static void hsw_activate_psr2(struct intel_dp *intel_dp) |
527 | { |
528 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
529 | u32 val; |
530 | |
531 | val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT0; |
532 | |
533 | val |= EDP_PSR2_ENABLE(1 << 31) | EDP_SU_TRACK_ENABLE(1 << 30); |
534 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) |
535 | val |= EDP_Y_COORDINATE_ENABLE(1 << 25); |
536 | |
537 | val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1)((dev_priv->psr.sink_sync_latency + 1) << 4); |
538 | val |= intel_psr2_get_tp_time(intel_dp); |
539 | |
540 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { |
541 | /* |
542 | * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default |
543 | * values from BSpec. In order to setting an optimal power |
544 | * consumption, lower than 4k resoluition mode needs to decrese |
545 | * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution |
546 | * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. |
547 | */ |
548 | val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2(0 << 28); |
549 | val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7)(((7) - 5) << 13); |
550 | val |= TGL_EDP_PSR2_FAST_WAKE(7)(((7) - 5) << 10); |
551 | } else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) { |
552 | val |= EDP_PSR2_IO_BUFFER_WAKE(7)((8 - (7)) << 13); |
553 | val |= EDP_PSR2_FAST_WAKE(7)((8 - (7)) << 11); |
554 | } |
555 | |
556 | if (dev_priv->psr.psr2_sel_fetch_enabled) { |
557 | /* WA 1408330847 */ |
558 | if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && tgl_revids_get (dev_priv)->disp_stepping >= (TGL_REVID_A0) && tgl_revids_get (dev_priv)->disp_stepping <= (TGL_REVID_A0)) || |
559 | IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)(IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) && (((dev_priv )->drm.pdev->revision) >= (0x0) && ((dev_priv )->drm.pdev->revision) <= (0x0)))) |
560 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }), |
561 | DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16), |
562 | DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16)); |
563 | |
564 | intel_de_write(dev_priv, |
565 | PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60910) + ( (&(dev_priv)->__info)->display_mmio_offset))) }), |
566 | PSR2_MAN_TRK_CTL_ENABLE((u32)((1UL << (31)) + 0))); |
567 | } else if (HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__info)->gen) >= 12)) { |
568 | intel_de_write(dev_priv, |
569 | PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60910) + ( (&(dev_priv)->__info)->display_mmio_offset))) }), 0); |
570 | } |
571 | |
572 | /* |
573 | * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is |
574 | * recommending keep this bit unset while PSR2 is enabled. |
575 | */ |
576 | intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), 0); |
577 | |
578 | intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) }), val); |
579 | } |
580 | |
581 | static bool_Bool |
582 | transcoder_has_psr2(struct drm_i915_privateinteldrm_softc *dev_priv, enum transcoder trans) |
583 | { |
584 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 9) |
585 | return false0; |
586 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) |
587 | return trans == TRANSCODER_A; |
588 | else |
589 | return trans == TRANSCODER_EDP; |
590 | } |
591 | |
592 | static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) |
593 | { |
594 | if (!cstate || !cstate->hw.active) |
595 | return 0; |
596 | |
597 | return DIV_ROUND_UP(1000 * 1000,(((1000 * 1000) + ((drm_mode_vrefresh(&cstate->hw.adjusted_mode )) - 1)) / (drm_mode_vrefresh(&cstate->hw.adjusted_mode ))) |
598 | drm_mode_vrefresh(&cstate->hw.adjusted_mode))(((1000 * 1000) + ((drm_mode_vrefresh(&cstate->hw.adjusted_mode )) - 1)) / (drm_mode_vrefresh(&cstate->hw.adjusted_mode ))); |
599 | } |
600 | |
601 | static void psr2_program_idle_frames(struct drm_i915_privateinteldrm_softc *dev_priv, |
602 | u32 idle_frames) |
603 | { |
604 | u32 val; |
605 | |
606 | idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT0; |
607 | val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) })); |
608 | val &= ~EDP_PSR2_IDLE_FRAME_MASK0xf; |
609 | val |= idle_frames; |
610 | intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) }), val); |
611 | } |
612 | |
613 | static void tgl_psr2_enable_dc3co(struct drm_i915_privateinteldrm_softc *dev_priv) |
614 | { |
615 | psr2_program_idle_frames(dev_priv, 0); |
616 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0))); |
617 | } |
618 | |
619 | static void tgl_psr2_disable_dc3co(struct drm_i915_privateinteldrm_softc *dev_priv) |
620 | { |
621 | struct intel_dp *intel_dp = dev_priv->psr.dp; |
622 | |
623 | intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6(2 << 0)); |
624 | psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); |
625 | } |
626 | |
627 | static void tgl_dc3co_disable_work(struct work_struct *work) |
628 | { |
629 | struct drm_i915_privateinteldrm_softc *dev_priv = |
630 | container_of(work, typeof(*dev_priv), psr.dc3co_work.work)({ const __typeof( ((typeof(*dev_priv) *)0)->psr.dc3co_work .work ) *__mptr = (work); (typeof(*dev_priv) *)( (char *)__mptr - __builtin_offsetof(typeof(*dev_priv), psr.dc3co_work.work) );}); |
631 | |
632 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
633 | /* If delayed work is pending, it is not idle */ |
634 | if (delayed_work_pending(&dev_priv->psr.dc3co_work)) |
635 | goto unlock; |
636 | |
637 | tgl_psr2_disable_dc3co(dev_priv); |
638 | unlock: |
639 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
640 | } |
641 | |
642 | static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_privateinteldrm_softc *dev_priv) |
643 | { |
644 | if (!dev_priv->psr.dc3co_enabled) |
645 | return; |
646 | |
647 | cancel_delayed_work(&dev_priv->psr.dc3co_work); |
648 | /* Before PSR2 exit disallow dc3co*/ |
649 | tgl_psr2_disable_dc3co(dev_priv); |
650 | } |
651 | |
652 | static void |
653 | tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, |
654 | struct intel_crtc_state *crtc_state) |
655 | { |
656 | const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; |
657 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
658 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
659 | u32 exit_scanlines; |
660 | |
661 | if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)))) |
662 | return; |
663 | |
664 | /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ |
665 | if (to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );})->pipe != PIPE_A || |
666 | dig_port->base.port != PORT_A) |
667 | return; |
668 | |
669 | /* |
670 | * DC3CO Exit time 200us B.Spec 49196 |
671 | * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 |
672 | */ |
673 | exit_scanlines = |
674 | intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; |
675 | |
676 | if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)({ int __ret = !!((exit_scanlines > crtc_vdisplay)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "exit_scanlines > crtc_vdisplay" ")"); __builtin_expect(!!(__ret), 0); })) |
677 | return; |
678 | |
679 | crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; |
680 | } |
681 | |
682 | static bool_Bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, |
683 | struct intel_crtc_state *crtc_state) |
684 | { |
685 | struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); |
686 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
687 | struct intel_plane_state *plane_state; |
688 | struct intel_plane *plane; |
689 | int i; |
690 | |
691 | if (!dev_priv->params.enable_psr2_sel_fetch) { |
692 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, disabled by parameter\n" ) |
693 | "PSR2 sel fetch not enabled, disabled by parameter\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, disabled by parameter\n" ); |
694 | return false0; |
695 | } |
696 | |
697 | if (crtc_state->uapi.async_flip) { |
698 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, async flip enabled\n" ) |
699 | "PSR2 sel fetch not enabled, async flip enabled\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, async flip enabled\n" ); |
700 | return false0; |
701 | } |
702 | |
703 | for_each_new_intel_plane_in_state(state, plane, plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (plane_state) = ({ const __typeof( ( (struct intel_plane_state *)0)->uapi ) *__mptr = ((state)-> base.planes[i].new_state); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state, uapi ) );}), 1); (i)++) if (!(plane)) {} else { |
704 | if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0(1<<0)) { |
705 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, plane rotated\n" ) |
706 | "PSR2 sel fetch not enabled, plane rotated\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 sel fetch not enabled, plane rotated\n" ); |
707 | return false0; |
708 | } |
709 | } |
710 | |
711 | return crtc_state->enable_psr2_sel_fetch = true1; |
712 | } |
713 | |
714 | static bool_Bool intel_psr2_config_valid(struct intel_dp *intel_dp, |
715 | struct intel_crtc_state *crtc_state) |
716 | { |
717 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
718 | int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay; |
719 | int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; |
720 | int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; |
721 | |
722 | if (!dev_priv->psr.sink_psr2_support) |
723 | return false0; |
724 | |
725 | if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { |
726 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not supported in transcoder %s\n" , transcoder_name(crtc_state->cpu_transcoder)) |
727 | "PSR2 not supported in transcoder %s\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not supported in transcoder %s\n" , transcoder_name(crtc_state->cpu_transcoder)) |
728 | transcoder_name(crtc_state->cpu_transcoder))drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not supported in transcoder %s\n" , transcoder_name(crtc_state->cpu_transcoder)); |
729 | return false0; |
730 | } |
731 | |
732 | /* |
733 | * DSC and PSR2 cannot be enabled simultaneously. If a requested |
734 | * resolution requires DSC to be enabled, priority is given to DSC |
735 | * over PSR2. |
736 | */ |
737 | if (crtc_state->dsc.compression_enable) { |
738 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 cannot be enabled since DSC is enabled\n" ) |
739 | "PSR2 cannot be enabled since DSC is enabled\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 cannot be enabled since DSC is enabled\n" ); |
740 | return false0; |
741 | } |
742 | |
743 | if (crtc_state->crc_enabled) { |
744 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled because it would inhibit pipe CRC calculation\n" ) |
745 | "PSR2 not enabled because it would inhibit pipe CRC calculation\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled because it would inhibit pipe CRC calculation\n" ); |
746 | return false0; |
747 | } |
748 | |
749 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { |
750 | psr_max_h = 5120; |
751 | psr_max_v = 3200; |
752 | max_bpp = 30; |
753 | } else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 10 || IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)) { |
754 | psr_max_h = 4096; |
755 | psr_max_v = 2304; |
756 | max_bpp = 24; |
757 | } else if (IS_GEN(dev_priv, 9)(0 + (&(dev_priv)->__info)->gen == (9))) { |
758 | psr_max_h = 3640; |
759 | psr_max_v = 2304; |
760 | max_bpp = 24; |
761 | } |
762 | |
763 | if (crtc_state->pipe_bpp > max_bpp) { |
764 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n" , crtc_state->pipe_bpp, max_bpp) |
765 | "PSR2 not enabled, pipe bpp %d > max supported %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n" , crtc_state->pipe_bpp, max_bpp) |
766 | crtc_state->pipe_bpp, max_bpp)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n" , crtc_state->pipe_bpp, max_bpp); |
767 | return false0; |
768 | } |
769 | |
770 | /* |
771 | * HW sends SU blocks of size four scan lines, which means the starting |
772 | * X coordinate and Y granularity requirements will always be met. We |
773 | * only need to validate the SU block width is a multiple of |
774 | * x granularity. |
775 | */ |
776 | if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { |
777 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, hdisplay(%d) not multiple of %d\n" , crtc_hdisplay, dev_priv->psr.su_x_granularity) |
778 | "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, hdisplay(%d) not multiple of %d\n" , crtc_hdisplay, dev_priv->psr.su_x_granularity) |
779 | crtc_hdisplay, dev_priv->psr.su_x_granularity)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, hdisplay(%d) not multiple of %d\n" , crtc_hdisplay, dev_priv->psr.su_x_granularity); |
780 | return false0; |
781 | } |
782 | |
783 | if (HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__info)->gen) >= 12)) { |
784 | if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && |
785 | !HAS_PSR_HW_TRACKING(dev_priv)((&(dev_priv)->__info)->display.has_psr_hw_tracking )) { |
786 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, selective fetch not valid and no HW tracking available\n" ) |
787 | "PSR2 not enabled, selective fetch not valid and no HW tracking available\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, selective fetch not valid and no HW tracking available\n" ); |
788 | return false0; |
789 | } |
790 | } |
791 | |
792 | if (!crtc_state->enable_psr2_sel_fetch && |
793 | (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { |
794 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n" , crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v) |
795 | "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n" , crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v) |
796 | crtc_hdisplay, crtc_vdisplay,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n" , crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v) |
797 | psr_max_h, psr_max_v)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n" , crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v); |
798 | return false0; |
799 | } |
800 | |
801 | tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); |
802 | return true1; |
803 | } |
804 | |
805 | void intel_psr_compute_config(struct intel_dp *intel_dp, |
806 | struct intel_crtc_state *crtc_state) |
807 | { |
808 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
809 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
810 | const struct drm_display_mode *adjusted_mode = |
811 | &crtc_state->hw.adjusted_mode; |
812 | int psr_setup_time; |
813 | |
814 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support)) |
815 | return; |
816 | |
817 | if (intel_dp != dev_priv->psr.dp) |
818 | return; |
819 | |
820 | if (!psr_global_enabled(dev_priv)) |
821 | return; |
822 | /* |
823 | * HSW spec explicitly says PSR is tied to port A. |
824 | * BDW+ platforms have a instance of PSR registers per transcoder but |
825 | * for now it only supports one instance of PSR, so lets keep it |
826 | * hardcoded to PORT_A |
827 | */ |
828 | if (dig_port->base.port != PORT_A) { |
829 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Port not supported\n" ) |
830 | "PSR condition failed: Port not supported\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Port not supported\n" ); |
831 | return; |
832 | } |
833 | |
834 | if (dev_priv->psr.sink_not_reliable) { |
835 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR sink implementation is not reliable\n" ) |
836 | "PSR sink implementation is not reliable\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR sink implementation is not reliable\n" ); |
837 | return; |
838 | } |
839 | |
840 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4)) { |
841 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Interlaced mode enabled\n" ) |
842 | "PSR condition failed: Interlaced mode enabled\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Interlaced mode enabled\n" ); |
843 | return; |
844 | } |
845 | |
846 | psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); |
847 | if (psr_setup_time < 0) { |
848 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n" , intel_dp->psr_dpcd[1]) |
849 | "PSR condition failed: Invalid PSR setup time (0x%02x)\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n" , intel_dp->psr_dpcd[1]) |
850 | intel_dp->psr_dpcd[1])drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n" , intel_dp->psr_dpcd[1]); |
851 | return; |
852 | } |
853 | |
854 | if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > |
855 | adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { |
856 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n" , psr_setup_time) |
857 | "PSR condition failed: PSR setup time (%d us) too long\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n" , psr_setup_time) |
858 | psr_setup_time)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n" , psr_setup_time); |
859 | return; |
860 | } |
861 | |
862 | crtc_state->has_psr = true1; |
863 | crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); |
864 | crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC0x07); |
865 | } |
866 | |
867 | static void intel_psr_activate(struct intel_dp *intel_dp) |
868 | { |
869 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
870 | |
871 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) |
872 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->trans_offsets[(dev_priv ->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets [TRANSCODER_A] + (0x60900) + ((&(dev_priv)->__info)-> display_mmio_offset))) })) & (1 << 31))); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ((&(dev_priv)->__info)->display_mmio_offset))) })) & (1 << 31)" ")"); __builtin_expect(!!(__ret), 0); }) |
873 | intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE)({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = (((&(dev_priv)->__info)->trans_offsets[(dev_priv ->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets [TRANSCODER_A] + (0x60900) + ((&(dev_priv)->__info)-> display_mmio_offset))) })) & (1 << 31))); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ((&(dev_priv)->__info)->display_mmio_offset))) })) & (1 << 31)" ")"); __builtin_expect(!!(__ret), 0); }); |
874 | |
875 | drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = ((((&(dev_priv)->__info)->trans_offsets[( dev_priv->psr.transcoder)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60800) + ((&(dev_priv)-> __info)->display_mmio_offset)) - dev_priv->hsw_psr_mmio_adjust )) })) & (1 << 31))); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)->trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ((&(dev_priv)->__info)->display_mmio_offset)) - dev_priv->hsw_psr_mmio_adjust)) })) & (1 << 31)" ")"); __builtin_expect(!!(__ret), 0); }) |
876 | intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE)({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t ){ .reg = ((((&(dev_priv)->__info)->trans_offsets[( dev_priv->psr.transcoder)] - (&(dev_priv)->__info)-> trans_offsets[TRANSCODER_A] + (0x60800) + ((&(dev_priv)-> __info)->display_mmio_offset)) - dev_priv->hsw_psr_mmio_adjust )) })) & (1 << 31))); if (__ret) printf("%s %s: " "%s" , dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)->trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv)->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ((&(dev_priv)->__info)->display_mmio_offset)) - dev_priv->hsw_psr_mmio_adjust)) })) & (1 << 31)" ")"); __builtin_expect(!!(__ret), 0); }); |
877 | drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active)({ int __ret = !!((dev_priv->psr.active)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "dev_priv->psr.active" ")"); __builtin_expect (!!(__ret), 0); }); |
878 | lockdep_assert_held(&dev_priv->psr.lock)do { (void)(&dev_priv->psr.lock); } while(0); |
879 | |
880 | /* psr1 and psr2 are mutually exclusive.*/ |
881 | if (dev_priv->psr.psr2_enabled) |
882 | hsw_activate_psr2(intel_dp); |
883 | else |
884 | hsw_activate_psr1(intel_dp); |
885 | |
886 | dev_priv->psr.active = true1; |
887 | } |
888 | |
889 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
890 | const struct intel_crtc_state *crtc_state) |
891 | { |
892 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
893 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
894 | u32 mask; |
895 | |
896 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
897 | * use hardcoded values PSR AUX transactions |
898 | */ |
899 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) |
900 | hsw_psr_setup_aux(intel_dp); |
901 | |
902 | if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9)(0 + (&(dev_priv)->__info)->gen == (9)) && |
903 | !IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE))) { |
904 | i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder)((const i915_reg_t){ .reg = ((((const u32 []){ [TRANSCODER_EDP ] = 0x420cc, [TRANSCODER_A] = 0x420c0, [TRANSCODER_B] = 0x420c4 , [TRANSCODER_C] = 0x420c8, [TRANSCODER_D] = 0x420d8 })[(cpu_transcoder )])) }); |
905 | u32 chicken = intel_de_read(dev_priv, reg); |
906 | |
907 | chicken |= PSR2_VSC_ENABLE_PROG_HEADER(1 << 12) | |
908 | PSR2_ADD_VERTICAL_LINE_COUNT(1 << 15); |
909 | intel_de_write(dev_priv, reg, chicken); |
910 | } |
911 | |
912 | /* |
913 | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also |
914 | * mask LPSP to avoid dependency on other drivers that might block |
915 | * runtime_pm besides preventing other hw tracking issues now we |
916 | * can rely on frontbuffer tracking. |
917 | */ |
918 | mask = EDP_PSR_DEBUG_MASK_MEMUP(1 << 26) | |
919 | EDP_PSR_DEBUG_MASK_HPD(1 << 25) | |
920 | EDP_PSR_DEBUG_MASK_LPSP(1 << 27) | |
921 | EDP_PSR_DEBUG_MASK_MAX_SLEEP(1 << 28); |
922 | |
923 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 11) |
924 | mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE(1 << 16); |
925 | |
926 | intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60860) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), |
927 | mask); |
928 | |
929 | psr_irq_control(dev_priv); |
930 | |
931 | if (crtc_state->dc3co_exitline) { |
932 | u32 val; |
933 | |
934 | /* |
935 | * TODO: if future platforms supports DC3CO in more than one |
936 | * transcoder, EXITLINE will need to be unset when disabling PSR |
937 | */ |
938 | val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(cpu_transcoder)] - (&(dev_priv)->__info )->trans_offsets[TRANSCODER_A] + (0x60018) + ((&(dev_priv )->__info)->display_mmio_offset))) })); |
939 | val &= ~EXITLINE_MASK((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0)); |
940 | val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT0; |
941 | val |= EXITLINE_ENABLE((u32)((1UL << (31)) + 0)); |
942 | intel_de_write(dev_priv, EXITLINE(cpu_transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(cpu_transcoder)] - (&(dev_priv)->__info )->trans_offsets[TRANSCODER_A] + (0x60018) + ((&(dev_priv )->__info)->display_mmio_offset))) }), val); |
943 | } |
944 | |
945 | if (HAS_PSR_HW_TRACKING(dev_priv)((&(dev_priv)->__info)->display.has_psr_hw_tracking )) |
946 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }), IGNORE_PSR2_HW_TRACKING(1 << 1), |
947 | dev_priv->psr.psr2_sel_fetch_enabled ? |
948 | IGNORE_PSR2_HW_TRACKING(1 << 1) : 0); |
949 | } |
950 | |
951 | static void intel_psr_enable_locked(struct drm_i915_privateinteldrm_softc *dev_priv, |
952 | const struct intel_crtc_state *crtc_state, |
953 | const struct drm_connector_state *conn_state) |
954 | { |
955 | struct intel_dp *intel_dp = dev_priv->psr.dp; |
956 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
957 | struct intel_encoder *encoder = &dig_port->base; |
958 | u32 val; |
959 | |
960 | drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled)({ int __ret = !!((dev_priv->psr.enabled)); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "dev_priv->psr.enabled" ")"); __builtin_expect (!!(__ret), 0); }); |
961 | |
962 | dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); |
963 | dev_priv->psr.busy_frontbuffer_bits = 0; |
964 | dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );})->pipe; |
965 | dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; |
966 | dev_priv->psr.transcoder = crtc_state->cpu_transcoder; |
967 | /* DC5/DC6 requires at least 6 idle frames */ |
968 | val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6)(((uint64_t)(intel_get_frame_time_us(crtc_state) * 6)) * hz / 1000000); |
969 | dev_priv->psr.dc3co_exit_delay = val; |
970 | dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; |
971 | |
972 | /* |
973 | * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR |
974 | * will still keep the error set even after the reset done in the |
975 | * irq_preinstall and irq_uninstall hooks. |
976 | * And enabling in this situation cause the screen to freeze in the |
977 | * first time that PSR HW tries to activate so lets keep PSR disabled |
978 | * to avoid any rendering problems. |
979 | */ |
980 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 12) { |
981 | val = intel_de_read(dev_priv, |
982 | TRANS_PSR_IIR(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60818) + ( (&(dev_priv)->__info)->display_mmio_offset))) })); |
983 | val &= EDP_PSR_ERROR(0)(0x4 << ((0) == TRANSCODER_EDP ? 0 : ((0) - TRANSCODER_A + 1) * 8)); |
984 | } else { |
985 | val = intel_de_read(dev_priv, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) })); |
986 | val &= EDP_PSR_ERROR(dev_priv->psr.transcoder)(0x4 << ((dev_priv->psr.transcoder) == TRANSCODER_EDP ? 0 : ((dev_priv->psr.transcoder) - TRANSCODER_A + 1) * 8 )); |
987 | } |
988 | if (val) { |
989 | dev_priv->psr.sink_not_reliable = true1; |
990 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR interruption error set, not enabling PSR\n" ) |
991 | "PSR interruption error set, not enabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR interruption error set, not enabling PSR\n" ); |
992 | return; |
993 | } |
994 | |
995 | drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling PSR%s\n" , dev_priv->psr.psr2_enabled ? "2" : "1") |
996 | dev_priv->psr.psr2_enabled ? "2" : "1")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Enabling PSR%s\n" , dev_priv->psr.psr2_enabled ? "2" : "1"); |
997 | intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, |
998 | &dev_priv->psr.vsc); |
999 | intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc); |
1000 | intel_psr_enable_sink(intel_dp); |
1001 | intel_psr_enable_source(intel_dp, crtc_state); |
1002 | dev_priv->psr.enabled = true1; |
1003 | |
1004 | intel_psr_activate(intel_dp); |
1005 | } |
1006 | |
1007 | /** |
1008 | * intel_psr_enable - Enable PSR |
1009 | * @intel_dp: Intel DP |
1010 | * @crtc_state: new CRTC state |
1011 | * @conn_state: new CONNECTOR state |
1012 | * |
1013 | * This function can only be called after the pipe is fully trained and enabled. |
1014 | */ |
1015 | void intel_psr_enable(struct intel_dp *intel_dp, |
1016 | const struct intel_crtc_state *crtc_state, |
1017 | const struct drm_connector_state *conn_state) |
1018 | { |
1019 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1020 | |
1021 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || dev_priv->psr.dp != intel_dp) |
1022 | return; |
1023 | |
1024 | dev_priv->psr.force_mode_changed = false0; |
1025 | |
1026 | if (!crtc_state->has_psr) |
1027 | return; |
1028 | |
1029 | drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp)({ int __ret = !!((dev_priv->drrs.dp)); if (__ret) printf( "%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "dev_priv->drrs.dp" ")"); __builtin_expect (!!(__ret), 0); }); |
1030 | |
1031 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1032 | |
1033 | if (!psr_global_enabled(dev_priv)) { |
1034 | drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR disabled by flag\n" ); |
1035 | goto unlock; |
1036 | } |
1037 | |
1038 | intel_psr_enable_locked(dev_priv, crtc_state, conn_state); |
1039 | |
1040 | unlock: |
1041 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1042 | } |
1043 | |
1044 | static void intel_psr_exit(struct drm_i915_privateinteldrm_softc *dev_priv) |
1045 | { |
1046 | u32 val; |
1047 | |
1048 | if (!dev_priv->psr.active) { |
1049 | if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { |
1050 | val = intel_de_read(dev_priv, |
1051 | EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) })); |
1052 | drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE)({ int __ret = !!((val & (1 << 31))); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "val & (1 << 31)" ")"); __builtin_expect (!!(__ret), 0); }); |
1053 | } |
1054 | |
1055 | val = intel_de_read(dev_priv, |
1056 | EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) })); |
1057 | drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE)({ int __ret = !!((val & (1 << 31))); if (__ret) printf ("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))-> dev), "", "drm_WARN_ON(" "val & (1 << 31)" ")"); __builtin_expect (!!(__ret), 0); }); |
1058 | |
1059 | return; |
1060 | } |
1061 | |
1062 | if (dev_priv->psr.psr2_enabled) { |
1063 | tgl_disallow_dc3co_on_psr2_exit(dev_priv); |
1064 | val = intel_de_read(dev_priv, |
1065 | EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) })); |
1066 | drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE))({ int __ret = !!((!(val & (1 << 31)))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "!(val & (1 << 31))" ")"); __builtin_expect(!!(__ret), 0); }); |
1067 | val &= ~EDP_PSR2_ENABLE(1 << 31); |
1068 | intel_de_write(dev_priv, |
1069 | EDP_PSR2_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60900) + ( (&(dev_priv)->__info)->display_mmio_offset))) }), val); |
1070 | } else { |
1071 | val = intel_de_read(dev_priv, |
1072 | EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) })); |
1073 | drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE))({ int __ret = !!((!(val & (1 << 31)))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "!(val & (1 << 31))" ")"); __builtin_expect(!!(__ret), 0); }); |
1074 | val &= ~EDP_PSR_ENABLE(1 << 31); |
1075 | intel_de_write(dev_priv, |
1076 | EDP_PSR_CTL(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60800) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), val); |
1077 | } |
1078 | dev_priv->psr.active = false0; |
1079 | } |
1080 | |
1081 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
1082 | { |
1083 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1084 | i915_reg_t psr_status; |
1085 | u32 psr_status_mask; |
1086 | |
1087 | lockdep_assert_held(&dev_priv->psr.lock)do { (void)(&dev_priv->psr.lock); } while(0); |
1088 | |
1089 | if (!dev_priv->psr.enabled) |
1090 | return; |
1091 | |
1092 | drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling PSR%s\n" , dev_priv->psr.psr2_enabled ? "2" : "1") |
1093 | dev_priv->psr.psr2_enabled ? "2" : "1")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Disabling PSR%s\n" , dev_priv->psr.psr2_enabled ? "2" : "1"); |
1094 | |
1095 | intel_psr_exit(dev_priv); |
1096 | |
1097 | if (dev_priv->psr.psr2_enabled) { |
1098 | psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60940) + ( (&(dev_priv)->__info)->display_mmio_offset))) }); |
1099 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK(0xf << 28); |
1100 | } else { |
1101 | psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60840) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }); |
1102 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK(7 << 29); |
1103 | } |
1104 | |
1105 | /* Wait till PSR is idle */ |
1106 | if (intel_de_wait_for_clear(dev_priv, psr_status, |
1107 | psr_status_mask, 2000)) |
1108 | drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting PSR idle state\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1109 | |
1110 | /* WA 1408330847 */ |
1111 | if (dev_priv->psr.psr2_sel_fetch_enabled && |
1112 | (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && tgl_revids_get (dev_priv)->disp_stepping >= (TGL_REVID_A0) && tgl_revids_get (dev_priv)->disp_stepping <= (TGL_REVID_A0)) || |
1113 | IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)(IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) && (((dev_priv )->drm.pdev->revision) >= (0x0) && ((dev_priv )->drm.pdev->revision) <= (0x0))))) |
1114 | intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }), |
1115 | DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16), 0); |
1116 | |
1117 | /* Disable PSR on Sink */ |
1118 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG0x170, 0); |
1119 | |
1120 | if (dev_priv->psr.psr2_enabled) |
1121 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG0x116, 0); |
1122 | |
1123 | dev_priv->psr.enabled = false0; |
1124 | } |
1125 | |
1126 | /** |
1127 | * intel_psr_disable - Disable PSR |
1128 | * @intel_dp: Intel DP |
1129 | * @old_crtc_state: old CRTC state |
1130 | * |
1131 | * This function needs to be called before disabling pipe. |
1132 | */ |
1133 | void intel_psr_disable(struct intel_dp *intel_dp, |
1134 | const struct intel_crtc_state *old_crtc_state) |
1135 | { |
1136 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1137 | |
1138 | if (!old_crtc_state->has_psr) |
1139 | return; |
1140 | |
1141 | if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv))({ int __ret = !!((!(((&(dev_priv)->__info)->display .has_psr) && dev_priv->psr.sink_support))); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "!(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support)" ")"); __builtin_expect(!!(__ret), 0); })) |
1142 | return; |
1143 | |
1144 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1145 | |
1146 | intel_psr_disable_locked(intel_dp); |
1147 | |
1148 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1149 | cancel_work_sync(&dev_priv->psr.work); |
1150 | cancel_delayed_work_sync(&dev_priv->psr.dc3co_work); |
1151 | } |
1152 | |
1153 | static void psr_force_hw_tracking_exit(struct drm_i915_privateinteldrm_softc *dev_priv) |
1154 | { |
1155 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) >= 9) |
1156 | /* |
1157 | * Display WA #0884: skl+ |
1158 | * This documented WA for bxt can be safely applied |
1159 | * broadly so we can force HW tracking to exit PSR |
1160 | * instead of disabling and re-enabling. |
1161 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, |
1162 | * but it makes more sense write to the current active |
1163 | * pipe. |
1164 | */ |
1165 | intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)-> cursor_offsets[(dev_priv->psr.pipe)] - (&(dev_priv)-> __info)->cursor_offsets[PIPE_A] + (0x700ac) + ((&(dev_priv )->__info)->display_mmio_offset)) }), 0); |
1166 | else |
1167 | /* |
1168 | * A write to CURSURFLIVE do not cause HW tracking to exit PSR |
1169 | * on older gens so doing the manual exit instead. |
1170 | */ |
1171 | intel_psr_exit(dev_priv); |
1172 | } |
1173 | |
1174 | void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) |
1175 | { |
1176 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
1177 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); |
1178 | struct i915_psr *psr = &dev_priv->psr; |
1179 | |
1180 | if (!HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__info)->gen) >= 12) || |
1181 | !crtc_state->enable_psr2_sel_fetch) |
1182 | return; |
1183 | |
1184 | intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(psr->transcoder)] - (&(dev_priv)->__info )->trans_offsets[TRANSCODER_A] + (0x60910) + ((&(dev_priv )->__info)->display_mmio_offset))) }), |
1185 | crtc_state->psr2_man_track_ctl); |
1186 | } |
1187 | |
1188 | void intel_psr2_sel_fetch_update(struct intel_atomic_state *state, |
1189 | struct intel_crtc *crtc) |
1190 | { |
1191 | struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); |
1192 | |
1193 | if (!crtc_state->enable_psr2_sel_fetch) |
1194 | return; |
1195 | |
1196 | crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE((u32)((1UL << (31)) + 0)) | |
1197 | PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME((u32)((1UL << (3)) + 0)); |
1198 | } |
1199 | |
1200 | /** |
1201 | * intel_psr_update - Update PSR state |
1202 | * @intel_dp: Intel DP |
1203 | * @crtc_state: new CRTC state |
1204 | * @conn_state: new CONNECTOR state |
1205 | * |
1206 | * This functions will update PSR states, disabling, enabling or switching PSR |
1207 | * version when executing fastsets. For full modeset, intel_psr_disable() and |
1208 | * intel_psr_enable() should be called instead. |
1209 | */ |
1210 | void intel_psr_update(struct intel_dp *intel_dp, |
1211 | const struct intel_crtc_state *crtc_state, |
1212 | const struct drm_connector_state *conn_state) |
1213 | { |
1214 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1215 | struct i915_psr *psr = &dev_priv->psr; |
1216 | bool_Bool enable, psr2_enable; |
1217 | |
1218 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || READ_ONCE(psr->dp)({ typeof(psr->dp) __tmp = *(volatile typeof(psr->dp) * )&(psr->dp); membar_datadep_consumer(); __tmp; }) != intel_dp) |
1219 | return; |
1220 | |
1221 | dev_priv->psr.force_mode_changed = false0; |
1222 | |
1223 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1224 | |
1225 | enable = crtc_state->has_psr && psr_global_enabled(dev_priv); |
1226 | psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); |
1227 | |
1228 | if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { |
1229 | /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ |
1230 | if (crtc_state->crc_enabled && psr->enabled) |
1231 | psr_force_hw_tracking_exit(dev_priv); |
1232 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 9 && psr->enabled) { |
1233 | /* |
1234 | * Activate PSR again after a force exit when enabling |
1235 | * CRC in older gens |
1236 | */ |
1237 | if (!dev_priv->psr.active && |
1238 | !dev_priv->psr.busy_frontbuffer_bits) |
1239 | schedule_work(&dev_priv->psr.work); |
1240 | } |
1241 | |
1242 | goto unlock; |
1243 | } |
1244 | |
1245 | if (psr->enabled) |
1246 | intel_psr_disable_locked(intel_dp); |
1247 | |
1248 | if (enable) |
1249 | intel_psr_enable_locked(dev_priv, crtc_state, conn_state); |
1250 | |
1251 | unlock: |
1252 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1253 | } |
1254 | |
1255 | /** |
1256 | * intel_psr_wait_for_idle - wait for PSR1 to idle |
1257 | * @new_crtc_state: new CRTC state |
1258 | * @out_value: PSR status in case of failure |
1259 | * |
1260 | * This function is expected to be called from pipe_update_start() where it is |
1261 | * not expected to race with PSR enable or disable. |
1262 | * |
1263 | * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. |
1264 | */ |
1265 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
1266 | u32 *out_value) |
1267 | { |
1268 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (new_crtc_state->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
1269 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev); |
1270 | |
1271 | if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) |
1272 | return 0; |
1273 | |
1274 | /* FIXME: Update this for PSR2 if we need to wait for idle */ |
1275 | if (READ_ONCE(dev_priv->psr.psr2_enabled)({ typeof(dev_priv->psr.psr2_enabled) __tmp = *(volatile typeof (dev_priv->psr.psr2_enabled) *)&(dev_priv->psr.psr2_enabled ); membar_datadep_consumer(); __tmp; })) |
1276 | return 0; |
1277 | |
1278 | /* |
1279 | * From bspec: Panel Self Refresh (BDW+) |
1280 | * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of |
1281 | * exit training time + 1.5 ms of aux channel handshake. 50 ms is |
1282 | * defensive enough to cover everything. |
1283 | */ |
1284 | |
1285 | return __intel_wait_for_register(&dev_priv->uncore, |
1286 | EDP_PSR_STATUS(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60840) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }), |
1287 | EDP_PSR_STATUS_STATE_MASK(7 << 29), |
1288 | EDP_PSR_STATUS_STATE_IDLE(0 << 29), 2, 50, |
1289 | out_value); |
1290 | } |
1291 | |
1292 | static bool_Bool __psr_wait_for_idle_locked(struct drm_i915_privateinteldrm_softc *dev_priv) |
1293 | { |
1294 | i915_reg_t reg; |
1295 | u32 mask; |
1296 | int err; |
1297 | |
1298 | if (!dev_priv->psr.enabled) |
1299 | return false0; |
1300 | |
1301 | if (dev_priv->psr.psr2_enabled) { |
1302 | reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = (((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60940) + ( (&(dev_priv)->__info)->display_mmio_offset))) }); |
1303 | mask = EDP_PSR2_STATUS_STATE_MASK(0xf << 28); |
1304 | } else { |
1305 | reg = EDP_PSR_STATUS(dev_priv->psr.transcoder)((const i915_reg_t){ .reg = ((((&(dev_priv)->__info)-> trans_offsets[(dev_priv->psr.transcoder)] - (&(dev_priv )->__info)->trans_offsets[TRANSCODER_A] + (0x60840) + ( (&(dev_priv)->__info)->display_mmio_offset)) - dev_priv ->hsw_psr_mmio_adjust)) }); |
1306 | mask = EDP_PSR_STATUS_STATE_MASK(7 << 29); |
1307 | } |
1308 | |
1309 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1310 | |
1311 | err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); |
1312 | if (err) |
1313 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for PSR Idle for re-enable\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
1314 | "Timed out waiting for PSR Idle for re-enable\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for PSR Idle for re-enable\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1315 | |
1316 | /* After the unlocked wait, verify that PSR is still wanted! */ |
1317 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1318 | return err == 0 && dev_priv->psr.enabled; |
1319 | } |
1320 | |
1321 | static int intel_psr_fastset_force(struct drm_i915_privateinteldrm_softc *dev_priv) |
1322 | { |
1323 | struct drm_device *dev = &dev_priv->drm; |
1324 | struct drm_modeset_acquire_ctx ctx; |
1325 | struct drm_atomic_state *state; |
1326 | struct intel_crtc *crtc; |
1327 | int err; |
1328 | |
1329 | state = drm_atomic_state_alloc(dev); |
1330 | if (!state) |
1331 | return -ENOMEM12; |
1332 | |
1333 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE(1UL << (0))); |
1334 | state->acquire_ctx = &ctx; |
1335 | |
1336 | retry: |
1337 | for_each_intel_crtc(dev, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(dev)->mode_config.crtc_list)-> next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(dev)->mode_config.crtc_list); crtc = ({ const __typeof ( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc-> base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );})) { |
1338 | struct intel_crtc_state *crtc_state = |
1339 | intel_atomic_get_crtc_state(state, crtc); |
1340 | |
1341 | if (IS_ERR(crtc_state)) { |
1342 | err = PTR_ERR(crtc_state); |
1343 | goto error; |
1344 | } |
1345 | |
1346 | if (crtc_state->hw.active && crtc_state->has_psr) { |
1347 | /* Mark mode as changed to trigger a pipe->update() */ |
1348 | crtc_state->uapi.mode_changed = true1; |
1349 | break; |
1350 | } |
1351 | } |
1352 | |
1353 | err = drm_atomic_commit(state); |
1354 | |
1355 | error: |
1356 | if (err == -EDEADLK11) { |
1357 | drm_atomic_state_clear(state); |
1358 | err = drm_modeset_backoff(&ctx); |
1359 | if (!err) |
1360 | goto retry; |
1361 | } |
1362 | |
1363 | drm_modeset_drop_locks(&ctx); |
1364 | drm_modeset_acquire_fini(&ctx); |
1365 | drm_atomic_state_put(state); |
1366 | |
1367 | return err; |
1368 | } |
1369 | |
1370 | int intel_psr_debug_set(struct drm_i915_privateinteldrm_softc *dev_priv, u64 val) |
1371 | { |
1372 | const u32 mode = val & I915_PSR_DEBUG_MODE_MASK0x0f; |
1373 | u32 old_mode; |
1374 | int ret; |
1375 | |
1376 | if (val & ~(I915_PSR_DEBUG_IRQ0x10 | I915_PSR_DEBUG_MODE_MASK0x0f) || |
1377 | mode > I915_PSR_DEBUG_FORCE_PSR10x03) { |
1378 | drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val)drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Invalid debug mask %llx\n" , val); |
1379 | return -EINVAL22; |
1380 | } |
1381 | |
1382 | ret = mutex_lock_interruptible(&dev_priv->psr.lock); |
1383 | if (ret) |
1384 | return ret; |
1385 | |
1386 | old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f; |
1387 | dev_priv->psr.debug = val; |
1388 | |
1389 | /* |
1390 | * Do it right away if it's already enabled, otherwise it will be done |
1391 | * when enabling the source. |
1392 | */ |
1393 | if (dev_priv->psr.enabled) |
1394 | psr_irq_control(dev_priv); |
1395 | |
1396 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1397 | |
1398 | if (old_mode != mode) |
1399 | ret = intel_psr_fastset_force(dev_priv); |
1400 | |
1401 | return ret; |
1402 | } |
1403 | |
1404 | static void intel_psr_handle_irq(struct drm_i915_privateinteldrm_softc *dev_priv) |
1405 | { |
1406 | struct i915_psr *psr = &dev_priv->psr; |
1407 | |
1408 | intel_psr_disable_locked(psr->dp); |
1409 | psr->sink_not_reliable = true1; |
1410 | /* let's make sure that sink is awaken */ |
1411 | drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER0x600, DP_SET_POWER_D00x1); |
1412 | } |
1413 | |
1414 | static void intel_psr_work(struct work_struct *work) |
1415 | { |
1416 | struct drm_i915_privateinteldrm_softc *dev_priv = |
1417 | container_of(work, typeof(*dev_priv), psr.work)({ const __typeof( ((typeof(*dev_priv) *)0)->psr.work ) *__mptr = (work); (typeof(*dev_priv) *)( (char *)__mptr - __builtin_offsetof (typeof(*dev_priv), psr.work) );}); |
1418 | |
1419 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1420 | |
1421 | if (!dev_priv->psr.enabled) |
1422 | goto unlock; |
1423 | |
1424 | if (READ_ONCE(dev_priv->psr.irq_aux_error)({ typeof(dev_priv->psr.irq_aux_error) __tmp = *(volatile typeof (dev_priv->psr.irq_aux_error) *)&(dev_priv->psr.irq_aux_error ); membar_datadep_consumer(); __tmp; })) |
1425 | intel_psr_handle_irq(dev_priv); |
1426 | |
1427 | /* |
1428 | * We have to make sure PSR is ready for re-enable |
1429 | * otherwise it keeps disabled until next full enable/disable cycle. |
1430 | * PSR might take some time to get fully disabled |
1431 | * and be ready for re-enable. |
1432 | */ |
1433 | if (!__psr_wait_for_idle_locked(dev_priv)) |
1434 | goto unlock; |
1435 | |
1436 | /* |
1437 | * The delayed work can race with an invalidate hence we need to |
1438 | * recheck. Since psr_flush first clears this and then reschedules we |
1439 | * won't ever miss a flush when bailing out here. |
1440 | */ |
1441 | if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) |
1442 | goto unlock; |
1443 | |
1444 | intel_psr_activate(dev_priv->psr.dp); |
1445 | unlock: |
1446 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1447 | } |
1448 | |
1449 | /** |
1450 | * intel_psr_invalidate - Invalidade PSR |
1451 | * @dev_priv: i915 device |
1452 | * @frontbuffer_bits: frontbuffer plane tracking bits |
1453 | * @origin: which operation caused the invalidate |
1454 | * |
1455 | * Since the hardware frontbuffer tracking has gaps we need to integrate |
1456 | * with the software frontbuffer tracking. This function gets called every |
1457 | * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be |
1458 | * disabled if the frontbuffer mask contains a buffer relevant to PSR. |
1459 | * |
1460 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." |
1461 | */ |
1462 | void intel_psr_invalidate(struct drm_i915_privateinteldrm_softc *dev_priv, |
1463 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
1464 | { |
1465 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support)) |
1466 | return; |
1467 | |
1468 | if (origin == ORIGIN_FLIP) |
1469 | return; |
1470 | |
1471 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1472 | if (!dev_priv->psr.enabled) { |
1473 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1474 | return; |
1475 | } |
1476 | |
1477 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)(((~0UL) >> (64 - (8 * ((dev_priv->psr.pipe) + 1) - 1 ) - 1)) & ((~0UL) << (8 * (dev_priv->psr.pipe))) ); |
1478 | dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; |
1479 | |
1480 | if (frontbuffer_bits) |
1481 | intel_psr_exit(dev_priv); |
1482 | |
1483 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1484 | } |
1485 | |
1486 | /* |
1487 | * When we will be completely rely on PSR2 S/W tracking in future, |
1488 | * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP |
1489 | * event also therefore tgl_dc3co_flush() require to be changed |
1490 | * accordingly in future. |
1491 | */ |
1492 | static void |
1493 | tgl_dc3co_flush(struct drm_i915_privateinteldrm_softc *dev_priv, |
1494 | unsigned int frontbuffer_bits, enum fb_op_origin origin) |
1495 | { |
1496 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1497 | |
1498 | if (!dev_priv->psr.dc3co_enabled) |
1499 | goto unlock; |
1500 | |
1501 | if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) |
1502 | goto unlock; |
1503 | |
1504 | /* |
1505 | * At every frontbuffer flush flip event modified delay of delayed work, |
1506 | * when delayed work schedules that means display has been idle. |
1507 | */ |
1508 | if (!(frontbuffer_bits & |
1509 | INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)(((~0UL) >> (64 - (8 * ((dev_priv->psr.pipe) + 1) - 1 ) - 1)) & ((~0UL) << (8 * (dev_priv->psr.pipe))) ))) |
1510 | goto unlock; |
1511 | |
1512 | tgl_psr2_enable_dc3co(dev_priv); |
1513 | mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work, |
1514 | dev_priv->psr.dc3co_exit_delay); |
1515 | |
1516 | unlock: |
1517 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1518 | } |
1519 | |
1520 | /** |
1521 | * intel_psr_flush - Flush PSR |
1522 | * @dev_priv: i915 device |
1523 | * @frontbuffer_bits: frontbuffer plane tracking bits |
1524 | * @origin: which operation caused the flush |
1525 | * |
1526 | * Since the hardware frontbuffer tracking has gaps we need to integrate |
1527 | * with the software frontbuffer tracking. This function gets called every |
1528 | * time frontbuffer rendering has completed and flushed out to memory. PSR |
1529 | * can be enabled again if no other frontbuffer relevant to PSR is dirty. |
1530 | * |
1531 | * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. |
1532 | */ |
1533 | void intel_psr_flush(struct drm_i915_privateinteldrm_softc *dev_priv, |
1534 | unsigned frontbuffer_bits, enum fb_op_origin origin) |
1535 | { |
1536 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support)) |
1537 | return; |
1538 | |
1539 | if (origin == ORIGIN_FLIP) { |
1540 | tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); |
1541 | return; |
1542 | } |
1543 | |
1544 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1545 | if (!dev_priv->psr.enabled) { |
1546 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1547 | return; |
1548 | } |
1549 | |
1550 | frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)(((~0UL) >> (64 - (8 * ((dev_priv->psr.pipe) + 1) - 1 ) - 1)) & ((~0UL) << (8 * (dev_priv->psr.pipe))) ); |
1551 | dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; |
1552 | |
1553 | /* By definition flush = invalidate + flush */ |
1554 | if (frontbuffer_bits) |
1555 | psr_force_hw_tracking_exit(dev_priv); |
1556 | |
1557 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
1558 | schedule_work(&dev_priv->psr.work); |
1559 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1560 | } |
1561 | |
1562 | /** |
1563 | * intel_psr_init - Init basic PSR work and mutex. |
1564 | * @dev_priv: i915 device private |
1565 | * |
1566 | * This function is called only once at driver load to initialize basic |
1567 | * PSR stuff. |
1568 | */ |
1569 | void intel_psr_init(struct drm_i915_privateinteldrm_softc *dev_priv) |
1570 | { |
1571 | if (!HAS_PSR(dev_priv)((&(dev_priv)->__info)->display.has_psr)) |
1572 | return; |
1573 | |
1574 | if (!dev_priv->psr.sink_support) |
1575 | return; |
1576 | |
1577 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) |
1578 | /* |
1579 | * HSW don't have PSR registers on the same space as transcoder |
1580 | * so set this to a value that when subtract to the register |
1581 | * in transcoder space results in the right offset for HSW |
1582 | */ |
1583 | dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP0x6f800 - _HSW_EDP_PSR_BASE0x64800; |
1584 | |
1585 | if (dev_priv->params.enable_psr == -1) |
1586 | if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 9 || !dev_priv->vbt.psr.enable) |
1587 | dev_priv->params.enable_psr = 0; |
1588 | |
1589 | /* Set link_standby x link_off defaults */ |
1590 | if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL) || IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) |
1591 | /* HSW and BDW require workarounds that we don't implement. */ |
1592 | dev_priv->psr.link_standby = false0; |
1593 | else if (INTEL_GEN(dev_priv)((&(dev_priv)->__info)->gen) < 12) |
1594 | /* For new platforms up to TGL let's respect VBT back again */ |
1595 | dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; |
1596 | |
1597 | INIT_WORK(&dev_priv->psr.work, intel_psr_work); |
1598 | INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work); |
1599 | rw_init(&dev_priv->psr.lock, "psrlk")_rw_init_flags(&dev_priv->psr.lock, "psrlk", 0, ((void *)0)); |
1600 | } |
1601 | |
1602 | static int psr_get_status_and_error_status(struct intel_dp *intel_dp, |
1603 | u8 *status, u8 *error_status) |
1604 | { |
1605 | struct drm_dp_aux *aux = &intel_dp->aux; |
1606 | int ret; |
1607 | |
1608 | ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS0x2008, status); |
1609 | if (ret != 1) |
1610 | return ret; |
1611 | |
1612 | ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS0x2006, error_status); |
1613 | if (ret != 1) |
1614 | return ret; |
1615 | |
1616 | *status = *status & DP_PSR_SINK_STATE_MASK0x07; |
1617 | |
1618 | return 0; |
1619 | } |
1620 | |
1621 | static void psr_alpm_check(struct intel_dp *intel_dp) |
1622 | { |
1623 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1624 | struct drm_dp_aux *aux = &intel_dp->aux; |
1625 | struct i915_psr *psr = &dev_priv->psr; |
1626 | u8 val; |
1627 | int r; |
1628 | |
1629 | if (!psr->psr2_enabled) |
1630 | return; |
1631 | |
1632 | r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS0x200b, &val); |
1633 | if (r != 1) { |
1634 | drm_err(&dev_priv->drm, "Error reading ALPM status\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading ALPM status\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1635 | return; |
1636 | } |
1637 | |
1638 | if (val & DP_ALPM_LOCK_TIMEOUT_ERROR(1 << 0)) { |
1639 | intel_psr_disable_locked(intel_dp); |
1640 | psr->sink_not_reliable = true1; |
1641 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "ALPM lock timeout error, disabling PSR\n" ) |
1642 | "ALPM lock timeout error, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "ALPM lock timeout error, disabling PSR\n" ); |
1643 | |
1644 | /* Clearing error */ |
1645 | drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS0x200b, val); |
1646 | } |
1647 | } |
1648 | |
1649 | static void psr_capability_changed_check(struct intel_dp *intel_dp) |
1650 | { |
1651 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1652 | struct i915_psr *psr = &dev_priv->psr; |
1653 | u8 val; |
1654 | int r; |
1655 | |
1656 | r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI0x2007, &val); |
1657 | if (r != 1) { |
1658 | drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading DP_PSR_ESI\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1659 | return; |
1660 | } |
1661 | |
1662 | if (val & DP_PSR_CAPS_CHANGE(1 << 0)) { |
1663 | intel_psr_disable_locked(intel_dp); |
1664 | psr->sink_not_reliable = true1; |
1665 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Sink PSR capability changed, disabling PSR\n" ) |
1666 | "Sink PSR capability changed, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "Sink PSR capability changed, disabling PSR\n" ); |
1667 | |
1668 | /* Clearing it */ |
1669 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI0x2007, val); |
1670 | } |
1671 | } |
1672 | |
1673 | void intel_psr_short_pulse(struct intel_dp *intel_dp) |
1674 | { |
1675 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1676 | struct i915_psr *psr = &dev_priv->psr; |
1677 | u8 status, error_status; |
1678 | const u8 errors = DP_PSR_RFB_STORAGE_ERROR(1 << 1) | |
1679 | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR(1 << 2) | |
1680 | DP_PSR_LINK_CRC_ERROR(1 << 0); |
1681 | |
1682 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || !intel_dp_is_edp(intel_dp)) |
1683 | return; |
1684 | |
1685 | mutex_lock(&psr->lock)rw_enter_write(&psr->lock); |
1686 | |
1687 | if (!psr->enabled || psr->dp != intel_dp) |
1688 | goto exit; |
1689 | |
1690 | if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { |
1691 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading PSR status or error status\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
1692 | "Error reading PSR status or error status\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading PSR status or error status\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1693 | goto exit; |
1694 | } |
1695 | |
1696 | if (status == DP_PSR_SINK_INTERNAL_ERROR7 || (error_status & errors)) { |
1697 | intel_psr_disable_locked(intel_dp); |
1698 | psr->sink_not_reliable = true1; |
1699 | } |
1700 | |
1701 | if (status == DP_PSR_SINK_INTERNAL_ERROR7 && !error_status) |
1702 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR sink internal error, disabling PSR\n" ) |
1703 | "PSR sink internal error, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR sink internal error, disabling PSR\n" ); |
1704 | if (error_status & DP_PSR_RFB_STORAGE_ERROR(1 << 1)) |
1705 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR RFB storage error, disabling PSR\n" ) |
1706 | "PSR RFB storage error, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR RFB storage error, disabling PSR\n" ); |
1707 | if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR(1 << 2)) |
1708 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR VSC SDP uncorrectable error, disabling PSR\n" ) |
1709 | "PSR VSC SDP uncorrectable error, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR VSC SDP uncorrectable error, disabling PSR\n" ); |
1710 | if (error_status & DP_PSR_LINK_CRC_ERROR(1 << 0)) |
1711 | drm_dbg_kms(&dev_priv->drm,drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR Link CRC error, disabling PSR\n" ) |
1712 | "PSR Link CRC error, disabling PSR\n")drm_dev_dbg((&dev_priv->drm)->dev, DRM_UT_KMS, "PSR Link CRC error, disabling PSR\n" ); |
1713 | |
1714 | if (error_status & ~errors) |
1715 | drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status & ~errors) |
1716 | "PSR_ERROR_STATUS unhandled errors %x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status & ~errors) |
1717 | error_status & ~errors)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status & ~errors); |
1718 | /* clear status register */ |
1719 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS0x2006, error_status); |
1720 | |
1721 | psr_alpm_check(intel_dp); |
1722 | psr_capability_changed_check(intel_dp); |
1723 | |
1724 | exit: |
1725 | mutex_unlock(&psr->lock)rw_exit_write(&psr->lock); |
1726 | } |
1727 | |
1728 | bool_Bool intel_psr_enabled(struct intel_dp *intel_dp) |
1729 | { |
1730 | struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp); |
1731 | bool_Bool ret; |
1732 | |
1733 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || !intel_dp_is_edp(intel_dp)) |
1734 | return false0; |
1735 | |
1736 | mutex_lock(&dev_priv->psr.lock)rw_enter_write(&dev_priv->psr.lock); |
1737 | ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); |
1738 | mutex_unlock(&dev_priv->psr.lock)rw_exit_write(&dev_priv->psr.lock); |
1739 | |
1740 | return ret; |
1741 | } |
1742 | |
1743 | void intel_psr_atomic_check(struct drm_connector *connector, |
1744 | struct drm_connector_state *old_state, |
1745 | struct drm_connector_state *new_state) |
1746 | { |
1747 | struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(connector->dev); |
1748 | struct intel_connector *intel_connector; |
1749 | struct intel_digital_port *dig_port; |
1750 | struct drm_crtc_state *crtc_state; |
1751 | |
1752 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || !new_state->crtc || |
1753 | !dev_priv->psr.force_mode_changed) |
1754 | return; |
1755 | |
1756 | intel_connector = to_intel_connector(connector)({ const __typeof( ((struct intel_connector *)0)->base ) * __mptr = (connector); (struct intel_connector *)( (char *)__mptr - __builtin_offsetof(struct intel_connector, base) );}); |
Value stored to 'intel_connector' is never read | |
1757 | dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder)({ const __typeof( ((struct intel_encoder *)0)->base ) *__mptr = (new_state->best_encoder); (struct intel_encoder *)( (char *)__mptr - __builtin_offsetof(struct intel_encoder, base) ); })); |
1758 | if (dev_priv->psr.dp != &dig_port->dp) |
1759 | return; |
1760 | |
1761 | crtc_state = drm_atomic_get_new_crtc_state(new_state->state, |
1762 | new_state->crtc); |
1763 | crtc_state->mode_changed = true1; |
1764 | } |
1765 | |
1766 | void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp) |
1767 | { |
1768 | struct drm_i915_privateinteldrm_softc *dev_priv; |
1769 | |
1770 | if (!intel_dp) |
1771 | return; |
1772 | |
1773 | dev_priv = dp_to_i915(intel_dp); |
1774 | if (!CAN_PSR(dev_priv)(((&(dev_priv)->__info)->display.has_psr) && dev_priv->psr.sink_support) || intel_dp != dev_priv->psr.dp) |
1775 | return; |
1776 | |
1777 | dev_priv->psr.force_mode_changed = true1; |
1778 | } |