Bug Summary

File:dev/pci/drm/i915/display/intel_psr.c
Warning:line 132, column 3
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'u32'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name intel_psr.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/display/intel_psr.c
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <drm/drm_atomic_helper.h>
25#include <drm/drm_damage_helper.h>
26
27#include "display/intel_dp.h"
28
29#include "i915_drv.h"
30#include "intel_atomic.h"
31#include "intel_crtc.h"
32#include "intel_de.h"
33#include "intel_display_types.h"
34#include "intel_dp_aux.h"
35#include "intel_hdmi.h"
36#include "intel_psr.h"
37#include "intel_snps_phy.h"
38#include "skl_universal_plane.h"
39
40/**
41 * DOC: Panel Self Refresh (PSR/SRD)
42 *
43 * Since Haswell Display controller supports Panel Self-Refresh on display
44 * panels witch have a remote frame buffer (RFB) implemented according to PSR
45 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
46 * when system is idle but display is on as it eliminates display refresh
47 * request to DDR memory completely as long as the frame buffer for that
48 * display is unchanged.
49 *
50 * Panel Self Refresh must be supported by both Hardware (source) and
51 * Panel (sink).
52 *
53 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
54 * to power down the link and memory controller. For DSI panels the same idea
55 * is called "manual mode".
56 *
57 * The implementation uses the hardware-based PSR support which automatically
58 * enters/exits self-refresh mode. The hardware takes care of sending the
59 * required DP aux message and could even retrain the link (that part isn't
60 * enabled yet though). The hardware also keeps track of any frontbuffer
61 * changes to know when to exit self-refresh mode again. Unfortunately that
62 * part doesn't work too well, hence why the i915 PSR support uses the
63 * software frontbuffer tracking to make sure it doesn't miss a screen
64 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
65 * get called by the frontbuffer tracking code. Note that because of locking
66 * issues the self-refresh re-enable code is done from a work queue, which
67 * must be correctly synchronized/cancelled when shutting down the pipe."
68 *
69 * DC3CO (DC3 clock off)
70 *
71 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
72 * clock off automatically during PSR2 idle state.
73 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
74 * entry/exit allows the HW to enter a low-power state even when page flipping
75 * periodically (for instance a 30fps video playback scenario).
76 *
77 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
78 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
79 * frames, if no other flip occurs and the function above is executed, DC3CO is
80 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
81 * of another flip.
82 * Front buffer modifications do not trigger DC3CO activation on purpose as it
83 * would bring a lot of complexity and most of the moderns systems will only
84 * use page flips.
85 */
86
87static bool_Bool psr_global_enabled(struct intel_dp *intel_dp)
88{
89 struct intel_connector *connector = intel_dp->attached_connector;
90 struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
91
92 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f) {
93 case I915_PSR_DEBUG_DEFAULT0x00:
94 if (i915->params.enable_psr == -1)
95 return connector->panel.vbt.psr.enable;
96 return i915->params.enable_psr;
97 case I915_PSR_DEBUG_DISABLE0x01:
98 return false0;
99 default:
100 return true1;
101 }
102}
103
104static bool_Bool psr2_global_enabled(struct intel_dp *intel_dp)
105{
106 struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
107
108 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f) {
109 case I915_PSR_DEBUG_DISABLE0x01:
110 case I915_PSR_DEBUG_FORCE_PSR10x03:
111 return false0;
112 default:
113 if (i915->params.enable_psr == 1)
114 return false0;
115 return true1;
116 }
117}
118
119static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
120{
121 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
122
123 return DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12 ? TGL_PSR_ERROR((u32)((1UL << (2)) + 0)) :
124 EDP_PSR_ERROR(intel_dp->psr.transcoder)(((u32)((1UL << (2)) + 0)) << ((intel_dp->psr.
transcoder) == TRANSCODER_EDP ? 0 : ((intel_dp->psr.transcoder
) - TRANSCODER_A + 1) * 8))
;
125}
126
127static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
128{
129 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
130
131 return DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12 ? TGL_PSR_POST_EXIT((u32)((1UL << (1)) + 0)) :
7
'?' condition is false
132 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder)(((u32)((1UL << (1)) + 0)) << ((intel_dp->psr.
transcoder) == TRANSCODER_EDP ? 0 : ((intel_dp->psr.transcoder
) - TRANSCODER_A + 1) * 8))
;
8
'?' condition is false
9
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'u32'
133}
134
135static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
136{
137 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
138
139 return DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12 ? TGL_PSR_PRE_ENTRY((u32)((1UL << (0)) + 0)) :
140 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder)(((u32)((1UL << (0)) + 0)) << ((intel_dp->psr.
transcoder) == TRANSCODER_EDP ? 0 : ((intel_dp->psr.transcoder
) - TRANSCODER_A + 1) * 8))
;
141}
142
143static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
144{
145 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
146
147 return DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12 ? TGL_PSR_MASK((u32)((((~0UL) >> (64 - (2) - 1)) & ((~0UL) <<
(0))) + 0))
:
148 EDP_PSR_MASK(intel_dp->psr.transcoder)(((u32)((((~0UL) >> (64 - (2) - 1)) & ((~0UL) <<
(0))) + 0)) << ((intel_dp->psr.transcoder) == TRANSCODER_EDP
? 0 : ((intel_dp->psr.transcoder) - TRANSCODER_A + 1) * 8
))
;
149}
150
151static void psr_irq_control(struct intel_dp *intel_dp)
152{
153 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
154 i915_reg_t imr_reg;
155 u32 mask, val;
156
157 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
158 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60814)) })
;
159 else
160 imr_reg = EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) });
161
162 mask = psr_irq_psr_error_bit_get(intel_dp);
163 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ0x10)
164 mask |= psr_irq_post_exit_bit_get(intel_dp) |
165 psr_irq_pre_entry_bit_get(intel_dp);
166
167 val = intel_de_read(dev_priv, imr_reg);
168 val &= ~psr_irq_mask_get(intel_dp);
169 val |= ~mask;
170 intel_de_write(dev_priv, imr_reg, val);
171}
172
173static void psr_event_print(struct drm_i915_privateinteldrm_softc *i915,
174 u32 val, bool_Bool psr2_enabled)
175{
176 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "PSR exit events: 0x%x\n"
, val)
;
177 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE(1 << 17))
178 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPSR2 watchdog timer expired\n"
)
;
179 if ((val & PSR_EVENT_PSR2_DISABLED(1 << 16)) && psr2_enabled)
180 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPSR2 disabled\n")
;
181 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN(1 << 15))
182 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tSU dirty FIFO underrun\n"
)
;
183 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN(1 << 14))
184 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tSU CRC FIFO underrun\n"
)
;
185 if (val & PSR_EVENT_GRAPHICS_RESET(1 << 12))
186 drm_dbg_kms(&i915->drm, "\tGraphics reset\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tGraphics reset\n")
;
187 if (val & PSR_EVENT_PCH_INTERRUPT(1 << 11))
188 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPCH interrupt\n")
;
189 if (val & PSR_EVENT_MEMORY_UP(1 << 10))
190 drm_dbg_kms(&i915->drm, "\tMemory up\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tMemory up\n")
;
191 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY(1 << 9))
192 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tFront buffer modification\n"
)
;
193 if (val & PSR_EVENT_WD_TIMER_EXPIRE(1 << 8))
194 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPSR watchdog timer expired\n"
)
;
195 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE(1 << 6))
196 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPIPE registers updated\n"
)
;
197 if (val & PSR_EVENT_REGISTER_UPDATE(1 << 5))
198 drm_dbg_kms(&i915->drm, "\tRegister updated\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tRegister updated\n"
)
;
199 if (val & PSR_EVENT_HDCP_ENABLE(1 << 4))
200 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tHDCP enabled\n")
;
201 if (val & PSR_EVENT_KVMR_SESSION_ENABLE(1 << 3))
202 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tKVMR session enabled\n"
)
;
203 if (val & PSR_EVENT_VBI_ENABLE(1 << 2))
204 drm_dbg_kms(&i915->drm, "\tVBI enabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tVBI enabled\n")
;
205 if (val & PSR_EVENT_LPSP_MODE_EXIT(1 << 1))
206 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tLPSP mode exited\n"
)
;
207 if ((val & PSR_EVENT_PSR_DISABLE(1 << 0)) && !psr2_enabled)
208 drm_dbg_kms(&i915->drm, "\tPSR disabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "\tPSR disabled\n")
;
209}
210
211void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
212{
213 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
214 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
215 ktime_t time_ns = ktime_get();
216 i915_reg_t imr_reg;
217
218 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
1
Assuming field 'ver' is < 12
2
Taking false branch
219 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60814)) })
;
220 else
221 imr_reg = EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) });
222
223 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
3
Assuming the condition is true
4
Taking true branch
224 intel_dp->psr.last_entry_attempt = time_ns;
225 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n"
, transcoder_name(cpu_transcoder))
5
'?' condition is true
226 "[transcoder %s] PSR entry attempt in 2 vblanks\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n"
, transcoder_name(cpu_transcoder))
227 transcoder_name(cpu_transcoder))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR entry attempt in 2 vblanks\n"
, transcoder_name(cpu_transcoder))
;
228 }
229
230 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
6
Calling 'psr_irq_post_exit_bit_get'
231 intel_dp->psr.last_exit = time_ns;
232 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR exit completed\n"
, transcoder_name(cpu_transcoder))
233 "[transcoder %s] PSR exit completed\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR exit completed\n"
, transcoder_name(cpu_transcoder))
234 transcoder_name(cpu_transcoder))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "[transcoder %s] PSR exit completed\n"
, transcoder_name(cpu_transcoder))
;
235
236 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9) {
237 u32 val = intel_de_read(dev_priv,
238 PSR_EVENT(cpu_transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(cpu_transcoder)] - (&(dev_priv)->
__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x60848)) })
);
239 bool_Bool psr2_enabled = intel_dp->psr.psr2_enabled;
240
241 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(cpu_transcoder)] - (&(dev_priv)->
__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x60848)) })
,
242 val);
243 psr_event_print(dev_priv, val, psr2_enabled);
244 }
245 }
246
247 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
248 u32 val;
249
250 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",printf("drm:pid%d:%s *WARNING* " "[drm] " "[transcoder %s] PSR aux error\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , transcoder_name
(cpu_transcoder))
251 transcoder_name(cpu_transcoder))printf("drm:pid%d:%s *WARNING* " "[drm] " "[transcoder %s] PSR aux error\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , transcoder_name
(cpu_transcoder))
;
252
253 intel_dp->psr.irq_aux_error = true1;
254
255 /*
256 * If this interruption is not masked it will keep
257 * interrupting so fast that it prevents the scheduled
258 * work to run.
259 * Also after a PSR error, we don't want to arm PSR
260 * again so we don't care about unmask the interruption
261 * or unset irq_aux_error.
262 */
263 val = intel_de_read(dev_priv, imr_reg);
264 val |= psr_irq_psr_error_bit_get(intel_dp);
265 intel_de_write(dev_priv, imr_reg, val);
266
267 schedule_work(&intel_dp->psr.work);
268 }
269}
270
271static bool_Bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
272{
273 u8 alpm_caps = 0;
274
275 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP0x02e,
276 &alpm_caps) != 1)
277 return false0;
278 return alpm_caps & DP_ALPM_CAP(1 << 0);
279}
280
281static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
282{
283 struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
284 u8 val = 8; /* assume the worst if we can't read the value */
285
286 if (drm_dp_dpcd_readb(&intel_dp->aux,
287 DP_SYNCHRONIZATION_LATENCY_IN_SINK0x2009, &val) == 1)
288 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK(0xf << 0);
289 else
290 drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to get sink synchronization latency, assuming 8 frames\n"
)
291 "Unable to get sink synchronization latency, assuming 8 frames\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to get sink synchronization latency, assuming 8 frames\n"
)
;
292 return val;
293}
294
295static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
296{
297 struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
298 ssize_t r;
299 u16 w;
300 u8 y;
301
302 /* If sink don't have specific granularity requirements set legacy ones */
303 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED(1 << 5))) {
304 /* As PSR2 HW sends full lines, we do not care about x granularity */
305 w = 4;
306 y = 4;
307 goto exit;
308 }
309
310 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY0x072, &w, 2);
311 if (r != 2)
312 drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to read DP_PSR2_SU_X_GRANULARITY\n"
)
313 "Unable to read DP_PSR2_SU_X_GRANULARITY\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to read DP_PSR2_SU_X_GRANULARITY\n"
)
;
314 /*
315 * Spec says that if the value read is 0 the default granularity should
316 * be used instead.
317 */
318 if (r != 2 || w == 0)
319 w = 4;
320
321 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY0x074, &y, 1);
322 if (r != 1) {
323 drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to read DP_PSR2_SU_Y_GRANULARITY\n"
)
324 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_KMS, "Unable to read DP_PSR2_SU_Y_GRANULARITY\n"
)
;
325 y = 4;
326 }
327 if (y == 0)
328 y = 1;
329
330exit:
331 intel_dp->psr.su_w_granularity = w;
332 intel_dp->psr.su_y_granularity = y;
333}
334
335void intel_psr_init_dpcd(struct intel_dp *intel_dp)
336{
337 struct drm_i915_privateinteldrm_softc *dev_priv =
338 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
339
340 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT0x070, intel_dp->psr_dpcd,
341 sizeof(intel_dp->psr_dpcd));
342
343 if (!intel_dp->psr_dpcd[0])
344 return;
345 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "eDP panel supports PSR version %x\n"
, intel_dp->psr_dpcd[0])
346 intel_dp->psr_dpcd[0])__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "eDP panel supports PSR version %x\n"
, intel_dp->psr_dpcd[0])
;
347
348 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
349 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR support not currently available for this panel\n"
)
350 "PSR support not currently available for this panel\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR support not currently available for this panel\n"
)
;
351 return;
352 }
353
354 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP(1 << 7))) {
355 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Panel lacks power state control, PSR cannot be enabled\n"
)
356 "Panel lacks power state control, PSR cannot be enabled\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Panel lacks power state control, PSR cannot be enabled\n"
)
;
357 return;
358 }
359
360 intel_dp->psr.sink_support = true1;
361 intel_dp->psr.sink_sync_latency =
362 intel_dp_get_sink_sync_latency(intel_dp);
363
364 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9 &&
365 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED3)) {
366 bool_Bool y_req = intel_dp->psr_dpcd[1] &
367 DP_PSR2_SU_Y_COORDINATE_REQUIRED(1 << 4);
368 bool_Bool alpm = intel_dp_get_alpm_status(intel_dp);
369
370 /*
371 * All panels that supports PSR version 03h (PSR2 +
372 * Y-coordinate) can handle Y-coordinates in VSC but we are
373 * only sure that it is going to be used when required by the
374 * panel. This way panel is capable to do selective update
375 * without a aux frame sync.
376 *
377 * To support PSR version 02h and PSR version 03h without
378 * Y-coordinate requirement panels we would need to enable
379 * GTC first.
380 */
381 intel_dp->psr.sink_psr2_support = y_req && alpm;
382 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 %ssupported\n"
, intel_dp->psr.sink_psr2_support ? "" : "not ")
383 intel_dp->psr.sink_psr2_support ? "" : "not ")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 %ssupported\n"
, intel_dp->psr.sink_psr2_support ? "" : "not ")
;
384
385 if (intel_dp->psr.sink_psr2_support) {
386 intel_dp->psr.colorimetry_support =
387 intel_dp_get_colorimetry_status(intel_dp);
388 intel_dp_get_su_granularity(intel_dp);
389 }
390 }
391}
392
393static void intel_psr_enable_sink(struct intel_dp *intel_dp)
394{
395 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
396 u8 dpcd_val = DP_PSR_ENABLE(1UL << (0));
397
398 /* Enable ALPM at sink for psr2 */
399 if (intel_dp->psr.psr2_enabled) {
400 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG0x116,
401 DP_ALPM_ENABLE(1 << 0) |
402 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE(1 << 1));
403
404 dpcd_val |= DP_PSR_ENABLE_PSR2(1UL << (6)) | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS(1UL << (5));
405 } else {
406 if (intel_dp->psr.link_standby)
407 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE(1UL << (1));
408
409 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 8)
410 dpcd_val |= DP_PSR_CRC_VERIFICATION(1UL << (2));
411 }
412
413 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
414 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE(1UL << (4));
415
416 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG0x170, dpcd_val);
417
418 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER0x600, DP_SET_POWER_D00x1);
419}
420
421static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
422{
423 struct intel_connector *connector = intel_dp->attached_connector;
424 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
425 u32 val = 0;
426
427 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11)
428 val |= EDP_PSR_TP4_TIME_0US(3 << 6);
429
430 if (dev_priv->params.psr_safest_params) {
431 val |= EDP_PSR_TP1_TIME_2500us(2 << 4);
432 val |= EDP_PSR_TP2_TP3_TIME_2500us(2 << 8);
433 goto check_tp3_sel;
434 }
435
436 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
437 val |= EDP_PSR_TP1_TIME_0us(3 << 4);
438 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
439 val |= EDP_PSR_TP1_TIME_100us(1 << 4);
440 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
441 val |= EDP_PSR_TP1_TIME_500us(0 << 4);
442 else
443 val |= EDP_PSR_TP1_TIME_2500us(2 << 4);
444
445 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
446 val |= EDP_PSR_TP2_TP3_TIME_0us(3 << 8);
447 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
448 val |= EDP_PSR_TP2_TP3_TIME_100us(1 << 8);
449 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
450 val |= EDP_PSR_TP2_TP3_TIME_500us(0 << 8);
451 else
452 val |= EDP_PSR_TP2_TP3_TIME_2500us(2 << 8);
453
454check_tp3_sel:
455 if (intel_dp_source_supports_tps3(dev_priv) &&
456 drm_dp_tps3_supported(intel_dp->dpcd))
457 val |= EDP_PSR_TP1_TP3_SEL(1 << 11);
458 else
459 val |= EDP_PSR_TP1_TP2_SEL(0 << 11);
460
461 return val;
462}
463
464static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
465{
466 struct intel_connector *connector = intel_dp->attached_connector;
467 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
468 int idle_frames;
469
470 /* Let's use 6 as the minimum to cover all known cases including the
471 * off-by-one issue that HW has in some cases.
472 */
473 idle_frames = max(6, connector->panel.vbt.psr.idle_frames)(((6)>(connector->panel.vbt.psr.idle_frames))?(6):(connector
->panel.vbt.psr.idle_frames))
;
474 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1)(((idle_frames)>(intel_dp->psr.sink_sync_latency + 1))?
(idle_frames):(intel_dp->psr.sink_sync_latency + 1))
;
475
476 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)({ int __ret = !!((idle_frames > 0xf)); if (__ret) printf(
"%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "idle_frames > 0xf" ")"); __builtin_expect
(!!(__ret), 0); })
)
477 idle_frames = 0xf;
478
479 return idle_frames;
480}
481
482static void hsw_activate_psr1(struct intel_dp *intel_dp)
483{
484 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
485 u32 max_sleep_time = 0x1f;
486 u32 val = EDP_PSR_ENABLE(1 << 31);
487
488 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT0;
489
490 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT20;
491 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL))
492 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES(0 << 25);
493
494 if (intel_dp->psr.link_standby)
495 val |= EDP_PSR_LINK_STANDBY(1 << 27);
496
497 val |= intel_psr1_get_tp_time(intel_dp);
498
499 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 8)
500 val |= EDP_PSR_CRC_ENABLE(1 << 10);
501
502 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
) &
503 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK(1 << 29));
504 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
, val);
505}
506
507static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
508{
509 struct intel_connector *connector = intel_dp->attached_connector;
510 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
511 u32 val = 0;
512
513 if (dev_priv->params.psr_safest_params)
514 return EDP_PSR2_TP2_TIME_2500us(2 << 8);
515
516 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
517 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
518 val |= EDP_PSR2_TP2_TIME_50us(3 << 8);
519 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
520 val |= EDP_PSR2_TP2_TIME_100us(1 << 8);
521 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
522 val |= EDP_PSR2_TP2_TIME_500us(0 << 8);
523 else
524 val |= EDP_PSR2_TP2_TIME_2500us(2 << 8);
525
526 return val;
527}
528
529static void hsw_activate_psr2(struct intel_dp *intel_dp)
530{
531 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
532 u32 val = EDP_PSR2_ENABLE(1 << 31);
533
534 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT0;
535
536 if (!IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
537 val |= EDP_SU_TRACK_ENABLE(1 << 30);
538
539 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 10 && DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) <= 12)
540 val |= EDP_Y_COORDINATE_ENABLE((u32)((1UL << (25)) + 0));
541
542 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2))((({ u8 __max_a = (intel_dp->psr.sink_sync_latency + 1); u8
__max_b = (2); __max_a > __max_b ? __max_a : __max_b; }))
<< 4)
;
543 val |= intel_psr2_get_tp_time(intel_dp);
544
545 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
546 if (intel_dp->psr.io_wake_lines < 9 &&
547 intel_dp->psr.fast_wake_lines < 9)
548 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2(0 << 28);
549 else
550 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3(1 << 28);
551 }
552
553 /* Wa_22012278275:adl-p */
554 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_E0)))
) {
555 static const u8 map[] = {
556 2, /* 5 lines */
557 1, /* 6 lines */
558 0, /* 7 lines */
559 3, /* 8 lines */
560 6, /* 9 lines */
561 5, /* 10 lines */
562 4, /* 11 lines */
563 7, /* 12 lines */
564 };
565 /*
566 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
567 * comments bellow for more information
568 */
569 u32 tmp;
570
571 tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES5];
572 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT13;
573 val |= tmp;
574
575 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES5];
576 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT10;
577 val |= tmp;
578 } else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
579 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines)(((intel_dp->psr.io_wake_lines) - 5) << 13);
580 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines)(((intel_dp->psr.fast_wake_lines) - 5) << 10);
581 } else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9) {
582 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines)((8 - (intel_dp->psr.io_wake_lines)) << 13);
583 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines)((8 - (intel_dp->psr.fast_wake_lines)) << 11);
584 }
585
586 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
587 val |= EDP_PSR2_SU_SDP_SCANLINE((u32)((1UL << (25)) + 0));
588
589 if (intel_dp->psr.psr2_sel_fetch_enabled) {
590 u32 tmp;
591
592 /* Wa_1408330847 */
593 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
)
594 intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }),
595 DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16),
596 DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16));
597
598 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
);
599 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE))({ int __ret = !!((!(tmp & ((u32)((1UL << (31)) + 0
))))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((
&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!(tmp & ((u32)((1UL << (31)) + 0)))"
")"); __builtin_expect(!!(__ret), 0); })
;
600 } else if (HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__runtime)->display.ip.ver) >= 12
)
) {
601 intel_de_write(dev_priv,
602 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
, 0);
603 }
604
605 /*
606 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
607 * recommending keep this bit unset while PSR2 is enabled.
608 */
609 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
, 0);
610
611 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
, val);
612}
613
614static bool_Bool
615transcoder_has_psr2(struct drm_i915_privateinteldrm_softc *dev_priv, enum transcoder trans)
616{
617 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
618 return trans == TRANSCODER_A || trans == TRANSCODER_B;
619 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
620 return trans == TRANSCODER_A;
621 else
622 return trans == TRANSCODER_EDP;
623}
624
625static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
626{
627 if (!cstate || !cstate->hw.active)
628 return 0;
629
630 return DIV_ROUND_UP(1000 * 1000,(((1000 * 1000) + ((drm_mode_vrefresh(&cstate->hw.adjusted_mode
)) - 1)) / (drm_mode_vrefresh(&cstate->hw.adjusted_mode
)))
631 drm_mode_vrefresh(&cstate->hw.adjusted_mode))(((1000 * 1000) + ((drm_mode_vrefresh(&cstate->hw.adjusted_mode
)) - 1)) / (drm_mode_vrefresh(&cstate->hw.adjusted_mode
)))
;
632}
633
634static void psr2_program_idle_frames(struct intel_dp *intel_dp,
635 u32 idle_frames)
636{
637 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
638 u32 val;
639
640 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT0;
641 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
);
642 val &= ~EDP_PSR2_IDLE_FRAME_MASK0xf;
643 val |= idle_frames;
644 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
, val);
645}
646
647static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
648{
649 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
650
651 psr2_program_idle_frames(intel_dp, 0);
652 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0)));
653}
654
655static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
656{
657 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
658
659 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6(2 << 0));
660 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
661}
662
663static void tgl_dc3co_disable_work(struct work_struct *work)
664{
665 struct intel_dp *intel_dp =
666 container_of(work, typeof(*intel_dp), psr.dc3co_work.work)({ const __typeof( ((typeof(*intel_dp) *)0)->psr.dc3co_work
.work ) *__mptr = (work); (typeof(*intel_dp) *)( (char *)__mptr
- __builtin_offsetof(typeof(*intel_dp), psr.dc3co_work.work)
);})
;
667
668 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
669 /* If delayed work is pending, it is not idle */
670 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
671 goto unlock;
672
673 tgl_psr2_disable_dc3co(intel_dp);
674unlock:
675 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
676}
677
678static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
679{
680 if (!intel_dp->psr.dc3co_exitline)
681 return;
682
683 cancel_delayed_work(&intel_dp->psr.dc3co_work);
684 /* Before PSR2 exit disallow dc3co*/
685 tgl_psr2_disable_dc3co(intel_dp);
686}
687
688static bool_Bool
689dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
690 struct intel_crtc_state *crtc_state)
691{
692 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
693 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc_state->uapi.crtc); (struct intel_crtc *)( (char *
)__mptr - __builtin_offsetof(struct intel_crtc, base) );})
->pipe;
694 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
695 enum port port = dig_port->base.port;
696
697 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
698 return pipe <= PIPE_B && port <= PORT_B;
699 else
700 return pipe == PIPE_A && port == PORT_A;
701}
702
703static void
704tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
705 struct intel_crtc_state *crtc_state)
706{
707 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
708 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
709 u32 exit_scanlines;
710
711 /*
712 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
713 * disable DC3CO until the changed dc3co activating/deactivating sequence
714 * is applied. B.Specs:49196
715 */
716 return;
717
718 /*
719 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
720 * TODO: when the issue is addressed, this restriction should be removed.
721 */
722 if (crtc_state->enable_psr2_sel_fetch)
723 return;
724
725 if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO((u32)((1UL << (30)) + 0))))
726 return;
727
728 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
729 return;
730
731 /* Wa_16011303918:adl-p */
732 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
)
733 return;
734
735 /*
736 * DC3CO Exit time 200us B.Spec 49196
737 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
738 */
739 exit_scanlines =
740 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
741
742 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)({ int __ret = !!((exit_scanlines > crtc_vdisplay)); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "exit_scanlines > crtc_vdisplay"
")"); __builtin_expect(!!(__ret), 0); })
)
743 return;
744
745 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
746}
747
748static bool_Bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
749 struct intel_crtc_state *crtc_state)
750{
751 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
752
753 if (!dev_priv->params.enable_psr2_sel_fetch &&
754 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH0x4) {
755 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, disabled by parameter\n"
)
756 "PSR2 sel fetch not enabled, disabled by parameter\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, disabled by parameter\n"
)
;
757 return false0;
758 }
759
760 if (crtc_state->uapi.async_flip) {
761 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, async flip enabled\n"
)
762 "PSR2 sel fetch not enabled, async flip enabled\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, async flip enabled\n"
)
;
763 return false0;
764 }
765
766 /* Wa_14010254185 Wa_14010103792 */
767 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_C0)))
) {
768 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, missing the implementation of WAs\n"
)
769 "PSR2 sel fetch not enabled, missing the implementation of WAs\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 sel fetch not enabled, missing the implementation of WAs\n"
)
;
770 return false0;
771 }
772
773 return crtc_state->enable_psr2_sel_fetch = true1;
774}
775
776static bool_Bool psr2_granularity_check(struct intel_dp *intel_dp,
777 struct intel_crtc_state *crtc_state)
778{
779 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
780 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
781 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
782 u16 y_granularity = 0;
783
784 /* PSR2 HW only send full lines so we only need to validate the width */
785 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
786 return false0;
787
788 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
789 return false0;
790
791 /* HW tracking is only aligned to 4 lines */
792 if (!crtc_state->enable_psr2_sel_fetch)
793 return intel_dp->psr.su_y_granularity == 4;
794
795 /*
796 * adl_p has 1 line granularity. For other platforms with SW tracking we
797 * can adjust the y coordinates to match sink requirement if multiple of
798 * 4.
799 */
800 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
801 y_granularity = intel_dp->psr.su_y_granularity;
802 else if (intel_dp->psr.su_y_granularity <= 2)
803 y_granularity = 4;
804 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
805 y_granularity = intel_dp->psr.su_y_granularity;
806
807 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
808 return false0;
809
810 crtc_state->su_y_granularity = y_granularity;
811 return true1;
812}
813
814static bool_Bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
815 struct intel_crtc_state *crtc_state)
816{
817 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
818 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
819 u32 hblank_total, hblank_ns, req_ns;
820
821 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
822 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
823
824 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
825 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
826
827 if ((hblank_ns - req_ns) > 100)
828 return true1;
829
830 /* Not supported <13 / Wa_22012279113:adl-p */
831 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b0x05)
832 return false0;
833
834 crtc_state->req_psr2_sdp_prior_scanline = true1;
835 return true1;
836}
837
838static bool_Bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
839 struct intel_crtc_state *crtc_state)
840{
841 struct drm_i915_privateinteldrm_softc *i915 = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
842 int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
843 u8 max_wake_lines;
844
845 if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) {
846 io_wake_time = 42;
847 /*
848 * According to Bspec it's 42us, but based on testing
849 * it is not enough -> use 45 us.
850 */
851 fast_wake_time = 45;
852 max_wake_lines = 12;
853 } else {
854 io_wake_time = 50;
855 fast_wake_time = 32;
856 max_wake_lines = 8;
857 }
858
859 io_wake_lines = intel_usecs_to_scanlines(
860 &crtc_state->hw.adjusted_mode, io_wake_time);
861 fast_wake_lines = intel_usecs_to_scanlines(
862 &crtc_state->hw.adjusted_mode, fast_wake_time);
863
864 if (io_wake_lines > max_wake_lines ||
865 fast_wake_lines > max_wake_lines)
866 return false0;
867
868 if (i915->params.psr_safest_params)
869 io_wake_lines = fast_wake_lines = max_wake_lines;
870
871 /* According to Bspec lower limit should be set as 7 lines. */
872 intel_dp->psr.io_wake_lines = max(io_wake_lines, 7)(((io_wake_lines)>(7))?(io_wake_lines):(7));
873 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7)(((fast_wake_lines)>(7))?(fast_wake_lines):(7));
874
875 return true1;
876}
877
878static bool_Bool intel_psr2_config_valid(struct intel_dp *intel_dp,
879 struct intel_crtc_state *crtc_state)
880{
881 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
882 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
883 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
884 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
885
886 if (!intel_dp->psr.sink_psr2_support)
887 return false0;
888
889 /* JSL and EHL only supports eDP 1.3 */
890 if (IS_JSL_EHL(dev_priv)(IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || IS_PLATFORM(dev_priv
, INTEL_ELKHARTLAKE))
) {
891 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not supported by phy\n"
)
;
892 return false0;
893 }
894
895 /* Wa_16011181250 */
896 if (IS_ROCKETLAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) || IS_ALDERLAKE_S(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) ||
897 IS_DG2(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG2)) {
898 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 is defeatured for this platform\n"
)
;
899 return false0;
900 }
901
902 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
) {
903 drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not completely functional in this stepping\n"
)
;
904 return false0;
905 }
906
907 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
908 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not supported in transcoder %s\n"
, transcoder_name(crtc_state->cpu_transcoder))
909 "PSR2 not supported in transcoder %s\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not supported in transcoder %s\n"
, transcoder_name(crtc_state->cpu_transcoder))
910 transcoder_name(crtc_state->cpu_transcoder))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not supported in transcoder %s\n"
, transcoder_name(crtc_state->cpu_transcoder))
;
911 return false0;
912 }
913
914 if (!psr2_global_enabled(intel_dp)) {
915 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 disabled by flag\n"
)
;
916 return false0;
917 }
918
919 /*
920 * DSC and PSR2 cannot be enabled simultaneously. If a requested
921 * resolution requires DSC to be enabled, priority is given to DSC
922 * over PSR2.
923 */
924 if (crtc_state->dsc.compression_enable) {
925 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 cannot be enabled since DSC is enabled\n"
)
926 "PSR2 cannot be enabled since DSC is enabled\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 cannot be enabled since DSC is enabled\n"
)
;
927 return false0;
928 }
929
930 if (crtc_state->crc_enabled) {
931 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled because it would inhibit pipe CRC calculation\n"
)
932 "PSR2 not enabled because it would inhibit pipe CRC calculation\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled because it would inhibit pipe CRC calculation\n"
)
;
933 return false0;
934 }
935
936 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
937 psr_max_h = 5120;
938 psr_max_v = 3200;
939 max_bpp = 30;
940 } else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 10) {
941 psr_max_h = 4096;
942 psr_max_v = 2304;
943 max_bpp = 24;
944 } else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 9) {
945 psr_max_h = 3640;
946 psr_max_v = 2304;
947 max_bpp = 24;
948 }
949
950 if (crtc_state->pipe_bpp > max_bpp) {
951 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n"
, crtc_state->pipe_bpp, max_bpp)
952 "PSR2 not enabled, pipe bpp %d > max supported %d\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n"
, crtc_state->pipe_bpp, max_bpp)
953 crtc_state->pipe_bpp, max_bpp)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, pipe bpp %d > max supported %d\n"
, crtc_state->pipe_bpp, max_bpp)
;
954 return false0;
955 }
956
957 /* Wa_16011303918:adl-p */
958 if (crtc_state->vrr.enable &&
959 IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
) {
960 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, not compatible with HW stepping + VRR\n"
)
961 "PSR2 not enabled, not compatible with HW stepping + VRR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, not compatible with HW stepping + VRR\n"
)
;
962 return false0;
963 }
964
965 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
966 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"
)
967 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n"
)
;
968 return false0;
969 }
970
971 if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
972 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, Unable to use long enough wake times\n"
)
973 "PSR2 not enabled, Unable to use long enough wake times\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, Unable to use long enough wake times\n"
)
;
974 return false0;
975 }
976
977 if (HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__runtime)->display.ip.ver) >= 12
)
) {
978 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
979 !HAS_PSR_HW_TRACKING(dev_priv)((&(dev_priv)->__info)->display.has_psr_hw_tracking
)
) {
980 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"
)
981 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"
)
;
982 return false0;
983 }
984 }
985
986 /* Wa_2209313811 */
987 if (!crtc_state->enable_psr2_sel_fetch &&
988 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_C0)))
) {
989 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 HW tracking is not supported this Display stepping\n"
)
;
990 goto unsupported;
991 }
992
993 if (!psr2_granularity_check(intel_dp, crtc_state)) {
994 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, SU granularity not compatible\n"
)
;
995 goto unsupported;
996 }
997
998 if (!crtc_state->enable_psr2_sel_fetch &&
999 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1000 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n"
, crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v)
1001 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n"
, crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v)
1002 crtc_hdisplay, crtc_vdisplay,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n"
, crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v)
1003 psr_max_h, psr_max_v)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n"
, crtc_hdisplay, crtc_vdisplay, psr_max_h, psr_max_v)
;
1004 goto unsupported;
1005 }
1006
1007 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1008 return true1;
1009
1010unsupported:
1011 crtc_state->enable_psr2_sel_fetch = false0;
1012 return false0;
1013}
1014
1015void intel_psr_compute_config(struct intel_dp *intel_dp,
1016 struct intel_crtc_state *crtc_state,
1017 struct drm_connector_state *conn_state)
1018{
1019 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1020 const struct drm_display_mode *adjusted_mode =
1021 &crtc_state->hw.adjusted_mode;
1022 int psr_setup_time;
1023
1024 /*
1025 * Current PSR panels don't work reliably with VRR enabled
1026 * So if VRR is enabled, do not enable PSR.
1027 */
1028 if (crtc_state->vrr.enable)
1029 return;
1030
1031 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
1032 return;
1033
1034 if (!psr_global_enabled(intel_dp)) {
1035 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR disabled by flag\n"
)
;
1036 return;
1037 }
1038
1039 if (intel_dp->psr.sink_not_reliable) {
1040 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR sink implementation is not reliable\n"
)
1041 "PSR sink implementation is not reliable\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR sink implementation is not reliable\n"
)
;
1042 return;
1043 }
1044
1045 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4)) {
1046 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Interlaced mode enabled\n"
)
1047 "PSR condition failed: Interlaced mode enabled\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Interlaced mode enabled\n"
)
;
1048 return;
1049 }
1050
1051 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1052 if (psr_setup_time < 0) {
1053 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n"
, intel_dp->psr_dpcd[1])
1054 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n"
, intel_dp->psr_dpcd[1])
1055 intel_dp->psr_dpcd[1])__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Invalid PSR setup time (0x%02x)\n"
, intel_dp->psr_dpcd[1])
;
1056 return;
1057 }
1058
1059 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1060 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1061 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n"
, psr_setup_time)
1062 "PSR condition failed: PSR setup time (%d us) too long\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n"
, psr_setup_time)
1063 psr_setup_time)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: PSR setup time (%d us) too long\n"
, psr_setup_time)
;
1064 return;
1065 }
1066
1067 crtc_state->has_psr = true1;
1068 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1069
1070 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC0x07);
1071 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1072 &crtc_state->psr_vsc);
1073}
1074
1075void intel_psr_get_config(struct intel_encoder *encoder,
1076 struct intel_crtc_state *pipe_config)
1077{
1078 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(encoder->base.dev);
1079 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1080 struct intel_dp *intel_dp;
1081 u32 val;
1082
1083 if (!dig_port)
1084 return;
1085
1086 intel_dp = &dig_port->dp;
1087 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
1088 return;
1089
1090 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
1091 if (!intel_dp->psr.enabled)
1092 goto unlock;
1093
1094 /*
1095 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1096 * enabled/disabled because of frontbuffer tracking and others.
1097 */
1098 pipe_config->has_psr = true1;
1099 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1100 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC0x07);
1101
1102 if (!intel_dp->psr.psr2_enabled)
1103 goto unlock;
1104
1105 if (HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__runtime)->display.ip.ver) >= 12
)
) {
1106 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
);
1107 if (val & PSR2_MAN_TRK_CTL_ENABLE((u32)((1UL << (31)) + 0)))
1108 pipe_config->enable_psr2_sel_fetch = true1;
1109 }
1110
1111 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
1112 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60018)) })
);
1113 val &= EXITLINE_MASK((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) <<
(0))) + 0))
;
1114 pipe_config->dc3co_exitline = val;
1115 }
1116unlock:
1117 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
1118}
1119
1120static void intel_psr_activate(struct intel_dp *intel_dp)
1121{
1122 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1123 enum transcoder transcoder = intel_dp->psr.transcoder;
1124
1125 if (transcoder_has_psr2(dev_priv, transcoder))
1126 drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = ((&(dev_priv)->__info)->display.trans_offsets
[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets
[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset
) + (0x60900)) })) & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->display.trans_offsets[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset) + (0x60900)) })) & (1 << 31)"
")"); __builtin_expect(!!(__ret), 0); })
1127 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE)({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = ((&(dev_priv)->__info)->display.trans_offsets
[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets
[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset
) + (0x60900)) })) & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->display.trans_offsets[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset) + (0x60900)) })) & (1 << 31)"
")"); __builtin_expect(!!(__ret), 0); })
;
1128
1129 drm_WARN_ON(&dev_priv->drm,({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = ((&(dev_priv)->__info)->display.trans_offsets
[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets
[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset
) + (0x60800)) })) & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->display.trans_offsets[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset) + (0x60800)) })) & (1 << 31)"
")"); __builtin_expect(!!(__ret), 0); })
1130 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE)({ int __ret = !!((intel_de_read(dev_priv, ((const i915_reg_t
){ .reg = ((&(dev_priv)->__info)->display.trans_offsets
[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets
[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset
) + (0x60800)) })) & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_de_read(dev_priv, ((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->display.trans_offsets[(transcoder)] - (&(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info)->display.mmio_offset) + (0x60800)) })) & (1 << 31)"
")"); __builtin_expect(!!(__ret), 0); })
;
1131 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active)({ int __ret = !!((intel_dp->psr.active)); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_dp->psr.active" ")"); __builtin_expect
(!!(__ret), 0); })
;
1132 lockdep_assert_held(&intel_dp->psr.lock)do { (void)(&intel_dp->psr.lock); } while(0);
1133
1134 /* psr1 and psr2 are mutually exclusive.*/
1135 if (intel_dp->psr.psr2_enabled)
1136 hsw_activate_psr2(intel_dp);
1137 else
1138 hsw_activate_psr1(intel_dp);
1139
1140 intel_dp->psr.active = true1;
1141}
1142
1143static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1144{
1145 switch (intel_dp->psr.pipe) {
1146 case PIPE_A:
1147 return LATENCY_REPORTING_REMOVED_PIPE_A((u32)((1UL << (23)) + 0));
1148 case PIPE_B:
1149 return LATENCY_REPORTING_REMOVED_PIPE_B((u32)((1UL << (24)) + 0));
1150 case PIPE_C:
1151 return LATENCY_REPORTING_REMOVED_PIPE_C((u32)((1UL << (25)) + 0));
1152 default:
1153 MISSING_CASE(intel_dp->psr.pipe)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "intel_dp->psr.pipe", (long)(intel_dp->psr.pipe)); __builtin_expect
(!!(__ret), 0); })
;
1154 return 0;
1155 }
1156}
1157
1158static void intel_psr_enable_source(struct intel_dp *intel_dp,
1159 const struct intel_crtc_state *crtc_state)
1160{
1161 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1162 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1163 u32 mask;
1164
1165 /*
1166 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1167 * mask LPSP to avoid dependency on other drivers that might block
1168 * runtime_pm besides preventing other hw tracking issues now we
1169 * can rely on frontbuffer tracking.
1170 */
1171 mask = EDP_PSR_DEBUG_MASK_MEMUP(1 << 26) |
1172 EDP_PSR_DEBUG_MASK_HPD(1 << 25) |
1173 EDP_PSR_DEBUG_MASK_LPSP(1 << 27) |
1174 EDP_PSR_DEBUG_MASK_MAX_SLEEP(1 << 28);
1175
1176 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) < 11)
1177 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE(1 << 16);
1178
1179 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60860)) })
,
1180 mask);
1181
1182 psr_irq_control(intel_dp);
1183
1184 if (intel_dp->psr.dc3co_exitline) {
1185 u32 val;
1186
1187 /*
1188 * TODO: if future platforms supports DC3CO in more than one
1189 * transcoder, EXITLINE will need to be unset when disabling PSR
1190 */
1191 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(cpu_transcoder)] - (&(dev_priv)->
__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x60018)) })
);
1192 val &= ~EXITLINE_MASK((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) <<
(0))) + 0))
;
1193 val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT0;
1194 val |= EXITLINE_ENABLE((u32)((1UL << (31)) + 0));
1195 intel_de_write(dev_priv, EXITLINE(cpu_transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(cpu_transcoder)] - (&(dev_priv)->
__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x60018)) })
, val);
1196 }
1197
1198 if (HAS_PSR_HW_TRACKING(dev_priv)((&(dev_priv)->__info)->display.has_psr_hw_tracking
)
&& HAS_PSR2_SEL_FETCH(dev_priv)(((&(dev_priv)->__runtime)->display.ip.ver) >= 12
)
)
1199 intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }), IGNORE_PSR2_HW_TRACKING(1 << 1),
1200 intel_dp->psr.psr2_sel_fetch_enabled ?
1201 IGNORE_PSR2_HW_TRACKING(1 << 1) : 0);
1202
1203 if (intel_dp->psr.psr2_enabled) {
1204 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 9)
1205 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder)((const i915_reg_t){ .reg = ((((const u32 []){ [TRANSCODER_EDP
] = 0x420cc, [TRANSCODER_A] = 0x420c0, [TRANSCODER_B] = 0x420c4
, [TRANSCODER_C] = 0x420c8, [TRANSCODER_D] = 0x420d8 })[(cpu_transcoder
)])) })
, 0,
1206 PSR2_VSC_ENABLE_PROG_HEADER((u32)((1UL << (12)) + 0)) |
1207 PSR2_ADD_VERTICAL_LINE_COUNT((u32)((1UL << (15)) + 0)));
1208
1209 /*
1210 * Wa_16014451276:adlp
1211 * All supported adlp panels have 1-based X granularity, this may
1212 * cause issues if non-supported panels are used.
1213 */
1214 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
1215 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder)((const i915_reg_t){ .reg = ((((const u32 []){ [TRANSCODER_EDP
] = 0x420cc, [TRANSCODER_A] = 0x420c0, [TRANSCODER_B] = 0x420c4
, [TRANSCODER_C] = 0x420c8, [TRANSCODER_D] = 0x420d8 })[(cpu_transcoder
)])) })
, 0,
1216 ADLP_1_BASED_X_GRANULARITY((u32)((1UL << (18)) + 0)));
1217
1218 /* Wa_16011168373:adl-p */
1219 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
)
1220 intel_de_rmw(dev_priv,
1221 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x6007C)) })
,
1222 TRANS_SET_CONTEXT_LATENCY_MASK((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) <<
(0))) + 0))
,
1223 TRANS_SET_CONTEXT_LATENCY_VALUE(1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) &
((~0UL) << (0))) + 0))))((1)) << (__builtin_ffsll
(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) <<
(0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15
) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)
)
);
1224
1225 /* Wa_16012604467:adlp */
1226 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
1227 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC((const i915_reg_t){ .reg = (0x46534) }), 0,
1228 CLKGATE_DIS_MISC_DMASC_GATING_DIS((u32)((1UL << (21)) + 0)));
1229
1230 /* Wa_16013835468:tgl[b0+], dg1 */
1231 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_B0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_FOREVER)))
||
1232 IS_DG1(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG1)) {
1233 u16 vtotal, vblank;
1234
1235 vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
1236 crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1237 vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
1238 crtc_state->uapi.adjusted_mode.crtc_vblank_start;
1239 if (vblank > vtotal)
1240 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) }), 0,
1241 wa_16013835468_bit_get(intel_dp));
1242 }
1243 }
1244}
1245
1246static bool_Bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1247{
1248 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1249 u32 val;
1250
1251 /*
1252 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1253 * will still keep the error set even after the reset done in the
1254 * irq_preinstall and irq_uninstall hooks.
1255 * And enabling in this situation cause the screen to freeze in the
1256 * first time that PSR HW tries to activate so lets keep PSR disabled
1257 * to avoid any rendering problems.
1258 */
1259 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
1260 val = intel_de_read(dev_priv,
1261 TRANS_PSR_IIR(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60818)) })
);
1262 else
1263 val = intel_de_read(dev_priv, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }));
1264 val &= psr_irq_psr_error_bit_get(intel_dp);
1265 if (val) {
1266 intel_dp->psr.sink_not_reliable = true1;
1267 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR interruption error set, not enabling PSR\n"
)
1268 "PSR interruption error set, not enabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR interruption error set, not enabling PSR\n"
)
;
1269 return false0;
1270 }
1271
1272 return true1;
1273}
1274
1275static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1276 const struct intel_crtc_state *crtc_state)
1277{
1278 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1279 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1280 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1281 struct intel_encoder *encoder = &dig_port->base;
1282 u32 val;
1283
1284 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled)({ int __ret = !!((intel_dp->psr.enabled)); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "intel_dp->psr.enabled" ")"); __builtin_expect
(!!(__ret), 0); })
;
1285
1286 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1287 intel_dp->psr.busy_frontbuffer_bits = 0;
1288 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc_state->uapi.crtc); (struct intel_crtc *)( (char *
)__mptr - __builtin_offsetof(struct intel_crtc, base) );})
->pipe;
1289 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1290 /* DC5/DC6 requires at least 6 idle frames */
1291 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6)(((uint64_t)(intel_get_frame_time_us(crtc_state) * 6)) * hz /
1000000)
;
1292 intel_dp->psr.dc3co_exit_delay = val;
1293 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1294 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1295 intel_dp->psr.psr2_sel_fetch_cff_enabled = false0;
1296 intel_dp->psr.req_psr2_sdp_prior_scanline =
1297 crtc_state->req_psr2_sdp_prior_scanline;
1298
1299 if (!psr_interrupt_error_check(intel_dp))
1300 return;
1301
1302 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Enabling PSR%s\n"
, intel_dp->psr.psr2_enabled ? "2" : "1")
1303 intel_dp->psr.psr2_enabled ? "2" : "1")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Enabling PSR%s\n"
, intel_dp->psr.psr2_enabled ? "2" : "1")
;
1304 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1305 intel_snps_phy_update_psr_power_state(dev_priv, phy, true1);
1306 intel_psr_enable_sink(intel_dp);
1307 intel_psr_enable_source(intel_dp, crtc_state);
1308 intel_dp->psr.enabled = true1;
1309 intel_dp->psr.paused = false0;
1310
1311 intel_psr_activate(intel_dp);
1312}
1313
1314static void intel_psr_exit(struct intel_dp *intel_dp)
1315{
1316 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1317 u32 val;
1318
1319 if (!intel_dp->psr.active) {
1320 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1321 val = intel_de_read(dev_priv,
1322 EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
);
1323 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE)({ int __ret = !!((val & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "val & (1 << 31)" ")"); __builtin_expect
(!!(__ret), 0); })
;
1324 }
1325
1326 val = intel_de_read(dev_priv,
1327 EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
);
1328 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE)({ int __ret = !!((val & (1 << 31))); if (__ret) printf
("%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "val & (1 << 31)" ")"); __builtin_expect
(!!(__ret), 0); })
;
1329
1330 return;
1331 }
1332
1333 if (intel_dp->psr.psr2_enabled) {
1334 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1335 val = intel_de_read(dev_priv,
1336 EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
);
1337 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE))({ int __ret = !!((!(val & (1 << 31)))); if (__ret)
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!(val & (1 << 31))"
")"); __builtin_expect(!!(__ret), 0); })
;
1338 val &= ~EDP_PSR2_ENABLE(1 << 31);
1339 intel_de_write(dev_priv,
1340 EDP_PSR2_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60900)) })
, val);
1341 } else {
1342 val = intel_de_read(dev_priv,
1343 EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
);
1344 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE))({ int __ret = !!((!(val & (1 << 31)))); if (__ret)
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!(val & (1 << 31))"
")"); __builtin_expect(!!(__ret), 0); })
;
1345 val &= ~EDP_PSR_ENABLE(1 << 31);
1346 intel_de_write(dev_priv,
1347 EDP_PSR_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60800)) })
, val);
1348 }
1349 intel_dp->psr.active = false0;
1350}
1351
1352static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1353{
1354 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1355 i915_reg_t psr_status;
1356 u32 psr_status_mask;
1357
1358 if (intel_dp->psr.psr2_enabled) {
1359 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60940)) })
;
1360 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) <<
(28))) + 0))
;
1361 } else {
1362 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60840)) })
;
1363 psr_status_mask = EDP_PSR_STATUS_STATE_MASK(7 << 29);
1364 }
1365
1366 /* Wait till PSR is idle */
1367 if (intel_de_wait_for_clear(dev_priv, psr_status,
1368 psr_status_mask, 2000))
1369 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting PSR idle state\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1370}
1371
1372static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1373{
1374 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1375 enum phy phy = intel_port_to_phy(dev_priv,
1376 dp_to_dig_port(intel_dp)->base.port);
1377
1378 lockdep_assert_held(&intel_dp->psr.lock)do { (void)(&intel_dp->psr.lock); } while(0);
1379
1380 if (!intel_dp->psr.enabled)
1381 return;
1382
1383 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Disabling PSR%s\n"
, intel_dp->psr.psr2_enabled ? "2" : "1")
1384 intel_dp->psr.psr2_enabled ? "2" : "1")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Disabling PSR%s\n"
, intel_dp->psr.psr2_enabled ? "2" : "1")
;
1385
1386 intel_psr_exit(intel_dp);
1387 intel_psr_wait_exit_locked(intel_dp);
1388
1389 /* Wa_1408330847 */
1390 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1391 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
)
1392 intel_de_rmw(dev_priv, CHICKEN_PAR1_1((const i915_reg_t){ .reg = (0x42080) }),
1393 DIS_RAM_BYPASS_PSR2_MAN_TRACK(1 << 16), 0);
1394
1395 if (intel_dp->psr.psr2_enabled) {
1396 /* Wa_16011168373:adl-p */
1397 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)(IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_A0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_B0)))
)
1398 intel_de_rmw(dev_priv,
1399 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x6007C)) })
,
1400 TRANS_SET_CONTEXT_LATENCY_MASK((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) <<
(0))) + 0))
, 0);
1401
1402 /* Wa_16012604467:adlp */
1403 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P))
1404 intel_de_rmw(dev_priv, CLKGATE_DIS_MISC((const i915_reg_t){ .reg = (0x46534) }),
1405 CLKGATE_DIS_MISC_DMASC_GATING_DIS((u32)((1UL << (21)) + 0)), 0);
1406
1407 /* Wa_16013835468:tgl[b0+], dg1 */
1408 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)(IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) && (({ int __ret
= !!((((&(dev_priv)->__runtime)->step.display_step
) == STEP_NONE)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&(dev_priv)->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->step.display_step) == STEP_NONE"
")"); __builtin_expect(!!(__ret), 0); }), ((&(dev_priv)->
__runtime)->step.display_step) >= (STEP_B0) && (
(&(dev_priv)->__runtime)->step.display_step) < (
STEP_FOREVER)))
||
1409 IS_DG1(dev_priv)IS_PLATFORM(dev_priv, INTEL_DG1))
1410 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1((const i915_reg_t){ .reg = (0x46430) }),
1411 wa_16013835468_bit_get(intel_dp), 0);
1412 }
1413
1414 intel_snps_phy_update_psr_power_state(dev_priv, phy, false0);
1415
1416 /* Disable PSR on Sink */
1417 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG0x170, 0);
1418
1419 if (intel_dp->psr.psr2_enabled)
1420 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG0x116, 0);
1421
1422 intel_dp->psr.enabled = false0;
1423 intel_dp->psr.psr2_enabled = false0;
1424 intel_dp->psr.psr2_sel_fetch_enabled = false0;
1425 intel_dp->psr.psr2_sel_fetch_cff_enabled = false0;
1426}
1427
1428/**
1429 * intel_psr_disable - Disable PSR
1430 * @intel_dp: Intel DP
1431 * @old_crtc_state: old CRTC state
1432 *
1433 * This function needs to be called before disabling pipe.
1434 */
1435void intel_psr_disable(struct intel_dp *intel_dp,
1436 const struct intel_crtc_state *old_crtc_state)
1437{
1438 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1439
1440 if (!old_crtc_state->has_psr)
1441 return;
1442
1443 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp))({ int __ret = !!((!((intel_dp)->psr.sink_support &&
(intel_dp)->psr.source_support))); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "!((intel_dp)->psr.sink_support && (intel_dp)->psr.source_support)"
")"); __builtin_expect(!!(__ret), 0); })
)
1444 return;
1445
1446 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
1447
1448 intel_psr_disable_locked(intel_dp);
1449
1450 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
1451 cancel_work_sync(&intel_dp->psr.work);
1452 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1453}
1454
1455/**
1456 * intel_psr_pause - Pause PSR
1457 * @intel_dp: Intel DP
1458 *
1459 * This function need to be called after enabling psr.
1460 */
1461void intel_psr_pause(struct intel_dp *intel_dp)
1462{
1463 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1464 struct intel_psr *psr = &intel_dp->psr;
1465
1466 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
1467 return;
1468
1469 mutex_lock(&psr->lock)rw_enter_write(&psr->lock);
1470
1471 if (!psr->enabled) {
1472 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
1473 return;
1474 }
1475
1476 /* If we ever hit this, we will need to add refcount to pause/resume */
1477 drm_WARN_ON(&dev_priv->drm, psr->paused)({ int __ret = !!((psr->paused)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "psr->paused" ")"); __builtin_expect(!!(__ret
), 0); })
;
1478
1479 intel_psr_exit(intel_dp);
1480 intel_psr_wait_exit_locked(intel_dp);
1481 psr->paused = true1;
1482
1483 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
1484
1485 cancel_work_sync(&psr->work);
1486 cancel_delayed_work_sync(&psr->dc3co_work);
1487}
1488
1489/**
1490 * intel_psr_resume - Resume PSR
1491 * @intel_dp: Intel DP
1492 *
1493 * This function need to be called after pausing psr.
1494 */
1495void intel_psr_resume(struct intel_dp *intel_dp)
1496{
1497 struct intel_psr *psr = &intel_dp->psr;
1498
1499 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
1500 return;
1501
1502 mutex_lock(&psr->lock)rw_enter_write(&psr->lock);
1503
1504 if (!psr->paused)
1505 goto unlock;
1506
1507 psr->paused = false0;
1508 intel_psr_activate(intel_dp);
1509
1510unlock:
1511 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
1512}
1513
1514static u32 man_trk_ctl_enable_bit_get(struct drm_i915_privateinteldrm_softc *dev_priv)
1515{
1516 return IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) ? 0 : PSR2_MAN_TRK_CTL_ENABLE((u32)((1UL << (31)) + 0));
1517}
1518
1519static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_privateinteldrm_softc *dev_priv)
1520{
1521 return IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) ?
1522 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME((u32)((1UL << (14)) + 0)) :
1523 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME((u32)((1UL << (3)) + 0));
1524}
1525
1526static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_privateinteldrm_softc *dev_priv)
1527{
1528 return IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) ?
1529 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE((u32)((1UL << (31)) + 0)) :
1530 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE((u32)((1UL << (1)) + 0));
1531}
1532
1533static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_privateinteldrm_softc *dev_priv)
1534{
1535 return IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) ?
1536 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME((u32)((1UL << (13)) + 0)) :
1537 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME((u32)((1UL << (2)) + 0));
1538}
1539
1540static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1541{
1542 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
1543
1544 if (intel_dp->psr.psr2_sel_fetch_enabled)
1545 intel_de_write(dev_priv,
1546 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
,
1547 man_trk_ctl_enable_bit_get(dev_priv) |
1548 man_trk_ctl_partial_frame_bit_get(dev_priv) |
1549 man_trk_ctl_single_full_frame_bit_get(dev_priv));
1550
1551 /*
1552 * Display WA #0884: skl+
1553 * This documented WA for bxt can be safely applied
1554 * broadly so we can force HW tracking to exit PSR
1555 * instead of disabling and re-enabling.
1556 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1557 * but it makes more sense write to the current active
1558 * pipe.
1559 *
1560 * This workaround do not exist for platforms with display 10 or newer
1561 * but testing proved that it works for up display 13, for newer
1562 * than that testing will be needed.
1563 */
1564 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.cursor_offsets[(intel_dp->psr.pipe)] - (&(dev_priv
)->__info)->display.cursor_offsets[PIPE_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x700ac)) })
, 0);
1565}
1566
1567void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
1568 const struct intel_crtc_state *crtc_state)
1569{
1570 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev);
1571 enum pipe pipe = plane->pipe;
1572
1573 if (!crtc_state->enable_psr2_sel_fetch)
1574 return;
1575
1576 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x70890 - 0x70890) })
, 0);
1577}
1578
1579void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1580 const struct intel_crtc_state *crtc_state,
1581 const struct intel_plane_state *plane_state,
1582 int color_plane)
1583{
1584 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(plane->base.dev);
1585 enum pipe pipe = plane->pipe;
1586 const struct drm_rect *clip;
1587 u32 val;
1588 int x, y;
1589
1590 if (!crtc_state->enable_psr2_sel_fetch)
1591 return;
1592
1593 if (plane->id == PLANE_CURSOR) {
1594 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x70890 - 0x70890) })
,
1595 plane_state->ctl);
1596 return;
1597 }
1598
1599 clip = &plane_state->psr2_sel_fetch_area;
1600
1601 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1602 val |= plane_state->uapi.dst.x1;
1603 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x70894 - 0x70890) })
, val);
1604
1605 x = plane_state->view.color_plane[color_plane].x;
1606
1607 /*
1608 * From Bspec: UV surface Start Y Position = half of Y plane Y
1609 * start position.
1610 */
1611 if (!color_plane)
1612 y = plane_state->view.color_plane[color_plane].y + clip->y1;
1613 else
1614 y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
1615
1616 val = y << 16 | x;
1617
1618 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x7089C - 0x70890) })
,
1619 val);
1620
1621 /* Sizes are 0 based */
1622 val = (drm_rect_height(clip) - 1) << 16;
1623 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1624 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x70898 - 0x70890) })
, val);
1625
1626 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id)((const i915_reg_t){ .reg = ((((0x70890) + (pipe) * ((0x71890
) - (0x70890))) - 0x70890 + (((const u32 []){ 0x70890, 0x708B0
, 0x708D0, 0x708F0, 0x70920, 0x70940, 0x70960, 0x70880 })[plane
->id])) + 0x70890 - 0x70890) })
,
1627 PLANE_SEL_FETCH_CTL_ENABLE((u32)((1UL << (31)) + 0)));
1628}
1629
1630void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1631{
1632 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1633 struct intel_encoder *encoder;
1634
1635 if (!crtc_state->enable_psr2_sel_fetch)
1636 return;
1637
1638 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&dev_priv->drm)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(&dev_priv
->drm)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((crtc_state->uapi.encoder_mask) & drm_encoder_mask
(&(encoder)->base)) && intel_encoder_can_psr(encoder
))) {} else
1639 crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&dev_priv->drm)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(&dev_priv
->drm)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((crtc_state->uapi.encoder_mask) & drm_encoder_mask
(&(encoder)->base)) && intel_encoder_can_psr(encoder
))) {} else
{
1640 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1641
1642 lockdep_assert_held(&intel_dp->psr.lock)do { (void)(&intel_dp->psr.lock); } while(0);
1643 if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1644 return;
1645 break;
1646 }
1647
1648 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(crtc_state->cpu_transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
,
1649 crtc_state->psr2_man_track_ctl);
1650}
1651
1652static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1653 struct drm_rect *clip, bool_Bool full_update)
1654{
1655 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc_state->uapi.crtc); (struct intel_crtc *)( (char *
)__mptr - __builtin_offsetof(struct intel_crtc, base) );})
;
1656 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev);
1657 u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1658
1659 /* SF partial frame enable has to be set even on full update */
1660 val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1661
1662 if (full_update) {
1663 /*
1664 * Not applying Wa_14014971508:adlp as we do not support the
1665 * feature that requires this workaround.
1666 */
1667 val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1668 goto exit;
1669 }
1670
1671 if (clip->y1 == -1)
1672 goto exit;
1673
1674 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)) {
1675 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) &
((~0UL) << (16))) + 0))))(clip->y1) << (__builtin_ffsll
(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) <<
(16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (28
) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0
))
;
1676 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) &
((~0UL) << (0))) + 0))))(clip->y2 - 1) << (__builtin_ffsll
(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) <<
(0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (12
) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)
)
;
1677 } else {
1678 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4)({ int __ret = !!((clip->y1 % 4 || clip->y2 % 4)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((crtc_state->
uapi.crtc->dev))->dev), "", "drm_WARN_ON(" "clip->y1 % 4 || clip->y2 % 4"
")"); __builtin_expect(!!(__ret), 0); })
;
1679
1680 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (30) - 1)) &
((~0UL) << (21))) + 0))))(clip->y1 / 4 + 1) <<
(__builtin_ffsll(((u32)((((~0UL) >> (64 - (30) - 1)) &
((~0UL) << (21))) + 0))) - 1)) & (((u32)((((~0UL) >>
(64 - (30) - 1)) & ((~0UL) << (21))) + 0)))) + 0 +
0 + 0 + 0))
;
1681 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (20) - 1)) &
((~0UL) << (11))) + 0))))(clip->y2 / 4 + 1) <<
(__builtin_ffsll(((u32)((((~0UL) >> (64 - (20) - 1)) &
((~0UL) << (11))) + 0))) - 1)) & (((u32)((((~0UL) >>
(64 - (20) - 1)) & ((~0UL) << (11))) + 0)))) + 0 +
0 + 0 + 0))
;
1682 }
1683exit:
1684 crtc_state->psr2_man_track_ctl = val;
1685}
1686
1687static void clip_area_update(struct drm_rect *overlap_damage_area,
1688 struct drm_rect *damage_area,
1689 struct drm_rect *pipe_src)
1690{
1691 if (!drm_rect_intersect(damage_area, pipe_src))
1692 return;
1693
1694 if (overlap_damage_area->y1 == -1) {
1695 overlap_damage_area->y1 = damage_area->y1;
1696 overlap_damage_area->y2 = damage_area->y2;
1697 return;
1698 }
1699
1700 if (damage_area->y1 < overlap_damage_area->y1)
1701 overlap_damage_area->y1 = damage_area->y1;
1702
1703 if (damage_area->y2 > overlap_damage_area->y2)
1704 overlap_damage_area->y2 = damage_area->y2;
1705}
1706
1707static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1708 struct drm_rect *pipe_clip)
1709{
1710 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1711 const u16 y_alignment = crtc_state->su_y_granularity;
1712
1713 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1714 if (pipe_clip->y2 % y_alignment)
1715 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1716
1717 if (IS_ALDERLAKE_P(dev_priv)IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P) && crtc_state->dsc.compression_enable)
1718 drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n")printf("drm:pid%d:%s *WARNING* " "[drm] " "Missing PSR2 sel fetch alignment with DSC\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1719}
1720
1721/*
1722 * TODO: Not clear how to handle planes with negative position,
1723 * also planes are not updated if they have a negative X
1724 * position so for now doing a full update in this cases
1725 *
1726 * Plane scaling and rotation is not supported by selective fetch and both
1727 * properties can change without a modeset, so need to be check at every
1728 * atomic commit.
1729 */
1730static bool_Bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
1731{
1732 if (plane_state->uapi.dst.y1 < 0 ||
1733 plane_state->uapi.dst.x1 < 0 ||
1734 plane_state->scaler_id >= 0 ||
1735 plane_state->uapi.rotation != DRM_MODE_ROTATE_0(1<<0))
1736 return false0;
1737
1738 return true1;
1739}
1740
1741/*
1742 * Check for pipe properties that is not supported by selective fetch.
1743 *
1744 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
1745 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
1746 * enabled and going to the full update path.
1747 */
1748static bool_Bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
1749{
1750 if (crtc_state->scaler_state.scaler_id >= 0)
1751 return false0;
1752
1753 return true1;
1754}
1755
1756int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1757 struct intel_crtc *crtc)
1758{
1759 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev);
1760 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1761 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX0x7fffffff, .y2 = -1 };
1762 struct intel_plane_state *new_plane_state, *old_plane_state;
1763 struct intel_plane *plane;
1764 bool_Bool full_update = false0;
1765 int i, ret;
1766
1767 if (!crtc_state->enable_psr2_sel_fetch)
1768 return 0;
1769
1770 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
1771 full_update = true1;
1772 goto skip_sel_fetch_set_loop;
1773 }
1774
1775 /*
1776 * Calculate minimal selective fetch area of each plane and calculate
1777 * the pipe damaged area.
1778 * In the next loop the plane selective fetch area will actually be set
1779 * using whole pipe damaged area.
1780 */
1781 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane
&& ((plane) = ({ const __typeof( ((struct intel_plane
*)0)->base ) *__mptr = ((state)->base.planes[i].ptr); (
struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct
intel_plane, base) );}), (old_plane_state) = ({ const __typeof
( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state
)->base.planes[i].old_state); (struct intel_plane_state *)
( (char *)__mptr - __builtin_offsetof(struct intel_plane_state
, uapi) );}), (new_plane_state) = ({ const __typeof( ((struct
intel_plane_state *)0)->uapi ) *__mptr = ((state)->base
.planes[i].new_state); (struct intel_plane_state *)( (char *)
__mptr - __builtin_offsetof(struct intel_plane_state, uapi) )
;}), 1); (i)++) if (!(plane)) {} else
1782 new_plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane
&& ((plane) = ({ const __typeof( ((struct intel_plane
*)0)->base ) *__mptr = ((state)->base.planes[i].ptr); (
struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct
intel_plane, base) );}), (old_plane_state) = ({ const __typeof
( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state
)->base.planes[i].old_state); (struct intel_plane_state *)
( (char *)__mptr - __builtin_offsetof(struct intel_plane_state
, uapi) );}), (new_plane_state) = ({ const __typeof( ((struct
intel_plane_state *)0)->uapi ) *__mptr = ((state)->base
.planes[i].new_state); (struct intel_plane_state *)( (char *)
__mptr - __builtin_offsetof(struct intel_plane_state, uapi) )
;}), 1); (i)++) if (!(plane)) {} else
{
1783 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
1784 .x2 = INT_MAX0x7fffffff };
1785
1786 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1787 continue;
1788
1789 if (!new_plane_state->uapi.visible &&
1790 !old_plane_state->uapi.visible)
1791 continue;
1792
1793 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1794 full_update = true1;
1795 break;
1796 }
1797
1798 /*
1799 * If visibility or plane moved, mark the whole plane area as
1800 * damaged as it needs to be complete redraw in the new and old
1801 * position.
1802 */
1803 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1804 !drm_rect_equals(&new_plane_state->uapi.dst,
1805 &old_plane_state->uapi.dst)) {
1806 if (old_plane_state->uapi.visible) {
1807 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1808 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1809 clip_area_update(&pipe_clip, &damaged_area,
1810 &crtc_state->pipe_src);
1811 }
1812
1813 if (new_plane_state->uapi.visible) {
1814 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1815 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1816 clip_area_update(&pipe_clip, &damaged_area,
1817 &crtc_state->pipe_src);
1818 }
1819 continue;
1820 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
1821 /* If alpha changed mark the whole plane area as damaged */
1822 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1823 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1824 clip_area_update(&pipe_clip, &damaged_area,
1825 &crtc_state->pipe_src);
1826 continue;
1827 }
1828
1829 src = drm_plane_state_src(&new_plane_state->uapi);
1830 drm_rect_fp_to_int(&src, &src);
1831
1832 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
1833 &new_plane_state->uapi, &damaged_area))
1834 continue;
1835
1836 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1837 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1838 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
1839 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
1840
1841 clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
1842 }
1843
1844 /*
1845 * TODO: For now we are just using full update in case
1846 * selective fetch area calculation fails. To optimize this we
1847 * should identify cases where this happens and fix the area
1848 * calculation for those.
1849 */
1850 if (pipe_clip.y1 == -1) {
1851 drm_info_once(&dev_priv->drm,do { } while(0)
1852 "Selective fetch area calculation failed in pipe %c\n",do { } while(0)
1853 pipe_name(crtc->pipe))do { } while(0);
1854 full_update = true1;
1855 }
1856
1857 if (full_update)
1858 goto skip_sel_fetch_set_loop;
1859
1860 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1861 if (ret)
1862 return ret;
1863
1864 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1865
1866 /*
1867 * Now that we have the pipe damaged area check if it intersect with
1868 * every plane, if it does set the plane selective fetch area.
1869 */
1870 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane
&& ((plane) = ({ const __typeof( ((struct intel_plane
*)0)->base ) *__mptr = ((state)->base.planes[i].ptr); (
struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct
intel_plane, base) );}), (old_plane_state) = ({ const __typeof
( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state
)->base.planes[i].old_state); (struct intel_plane_state *)
( (char *)__mptr - __builtin_offsetof(struct intel_plane_state
, uapi) );}), (new_plane_state) = ({ const __typeof( ((struct
intel_plane_state *)0)->uapi ) *__mptr = ((state)->base
.planes[i].new_state); (struct intel_plane_state *)( (char *)
__mptr - __builtin_offsetof(struct intel_plane_state, uapi) )
;}), 1); (i)++) if (!(plane)) {} else
1871 new_plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane
&& ((plane) = ({ const __typeof( ((struct intel_plane
*)0)->base ) *__mptr = ((state)->base.planes[i].ptr); (
struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct
intel_plane, base) );}), (old_plane_state) = ({ const __typeof
( ((struct intel_plane_state *)0)->uapi ) *__mptr = ((state
)->base.planes[i].old_state); (struct intel_plane_state *)
( (char *)__mptr - __builtin_offsetof(struct intel_plane_state
, uapi) );}), (new_plane_state) = ({ const __typeof( ((struct
intel_plane_state *)0)->uapi ) *__mptr = ((state)->base
.planes[i].new_state); (struct intel_plane_state *)( (char *)
__mptr - __builtin_offsetof(struct intel_plane_state, uapi) )
;}), 1); (i)++) if (!(plane)) {} else
{
1872 struct drm_rect *sel_fetch_area, inter;
1873 struct intel_plane *linked = new_plane_state->planar_linked_plane;
1874
1875 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1876 !new_plane_state->uapi.visible)
1877 continue;
1878
1879 inter = pipe_clip;
1880 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1881 continue;
1882
1883 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
1884 full_update = true1;
1885 break;
1886 }
1887
1888 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1889 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1890 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1891 crtc_state->update_planes |= BIT(plane->id)(1UL << (plane->id));
1892
1893 /*
1894 * Sel_fetch_area is calculated for UV plane. Use
1895 * same area for Y plane as well.
1896 */
1897 if (linked) {
1898 struct intel_plane_state *linked_new_plane_state;
1899 struct drm_rect *linked_sel_fetch_area;
1900
1901 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
1902 if (IS_ERR(linked_new_plane_state))
1903 return PTR_ERR(linked_new_plane_state);
1904
1905 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
1906 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
1907 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
1908 crtc_state->update_planes |= BIT(linked->id)(1UL << (linked->id));
1909 }
1910 }
1911
1912skip_sel_fetch_set_loop:
1913 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1914 return 0;
1915}
1916
1917void intel_psr_pre_plane_update(struct intel_atomic_state *state,
1918 struct intel_crtc *crtc)
1919{
1920 struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev);
1921 const struct intel_crtc_state *old_crtc_state =
1922 intel_atomic_get_old_crtc_state(state, crtc);
1923 const struct intel_crtc_state *new_crtc_state =
1924 intel_atomic_get_new_crtc_state(state, crtc);
1925 struct intel_encoder *encoder;
1926
1927 if (!HAS_PSR(i915)((&(i915)->__info)->display.has_psr))
1928 return;
1929
1930 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(state->base.dev)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(state->
base.dev)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((old_crtc_state->uapi.encoder_mask) &
drm_encoder_mask(&(encoder)->base)) && intel_encoder_can_psr
(encoder))) {} else
1931 old_crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(state->base.dev)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(state->
base.dev)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((old_crtc_state->uapi.encoder_mask) &
drm_encoder_mask(&(encoder)->base)) && intel_encoder_can_psr
(encoder))) {} else
{
1932 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1933 struct intel_psr *psr = &intel_dp->psr;
1934 bool_Bool needs_to_disable = false0;
1935
1936 mutex_lock(&psr->lock)rw_enter_write(&psr->lock);
1937
1938 /*
1939 * Reasons to disable:
1940 * - PSR disabled in new state
1941 * - All planes will go inactive
1942 * - Changing between PSR versions
1943 */
1944 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
1945 needs_to_disable |= !new_crtc_state->has_psr;
1946 needs_to_disable |= !new_crtc_state->active_planes;
1947 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
1948
1949 if (psr->enabled && needs_to_disable)
1950 intel_psr_disable_locked(intel_dp);
1951
1952 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
1953 }
1954}
1955
1956static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
1957 const struct intel_crtc_state *crtc_state)
1958{
1959 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev);
1960 struct intel_encoder *encoder;
1961
1962 if (!crtc_state->has_psr)
1963 return;
1964
1965 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(state->base.dev)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(state->
base.dev)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((crtc_state->uapi.encoder_mask) & drm_encoder_mask
(&(encoder)->base)) && intel_encoder_can_psr(encoder
))) {} else
1966 crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(state->base.dev)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(state->
base.dev)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((crtc_state->uapi.encoder_mask) & drm_encoder_mask
(&(encoder)->base)) && intel_encoder_can_psr(encoder
))) {} else
{
1967 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1968 struct intel_psr *psr = &intel_dp->psr;
1969
1970 mutex_lock(&psr->lock)rw_enter_write(&psr->lock);
1971
1972 if (psr->sink_not_reliable)
1973 goto exit;
1974
1975 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes)({ int __ret = !!((psr->enabled && !crtc_state->
active_planes)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "psr->enabled && !crtc_state->active_planes"
")"); __builtin_expect(!!(__ret), 0); })
;
1976
1977 /* Only enable if there is active planes */
1978 if (!psr->enabled && crtc_state->active_planes)
1979 intel_psr_enable_locked(intel_dp, crtc_state);
1980
1981 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1982 if (crtc_state->crc_enabled && psr->enabled)
1983 psr_force_hw_tracking_exit(intel_dp);
1984
1985exit:
1986 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
1987 }
1988}
1989
1990void intel_psr_post_plane_update(const struct intel_atomic_state *state)
1991{
1992 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(state->base.dev);
1993 struct intel_crtc_state *crtc_state;
1994 struct intel_crtc *crtc;
1995 int i;
1996
1997 if (!HAS_PSR(dev_priv)((&(dev_priv)->__info)->display.has_psr))
1998 return;
1999
2000 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc
&& ((crtc) = ({ const __typeof( ((struct intel_crtc *
)0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct
intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc
, base) );}), (crtc_state) = ({ const __typeof( ((struct intel_crtc_state
*)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state
); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc))
{} else
2001 _intel_psr_post_plane_update(state, crtc_state);
2002}
2003
2004static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2005{
2006 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2007
2008 /*
2009 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2010 * As all higher states has bit 4 of PSR2 state set we can just wait for
2011 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2012 */
2013 return intel_de_wait_for_clear(dev_priv,
2014 EDP_PSR2_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60940)) })
,
2015 EDP_PSR2_STATUS_STATE_DEEP_SLEEP((u32)((((typeof(((u32)((((~0UL) >> (64 - (31) - 1)) &
((~0UL) << (28))) + 0))))(0x8) << (__builtin_ffsll
(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) <<
(28))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (31
) - 1)) & ((~0UL) << (28))) + 0)))) + 0 + 0 + 0 + 0
))
, 50);
2016}
2017
2018static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2019{
2020 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2021
2022 /*
2023 * From bspec: Panel Self Refresh (BDW+)
2024 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2025 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2026 * defensive enough to cover everything.
2027 */
2028 return intel_de_wait_for_clear(dev_priv,
2029 EDP_PSR_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60840)) })
,
2030 EDP_PSR_STATUS_STATE_MASK(7 << 29), 50);
2031}
2032
2033/**
2034 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2035 * @new_crtc_state: new CRTC state
2036 *
2037 * This function is expected to be called from pipe_update_start() where it is
2038 * not expected to race with PSR enable or disable.
2039 */
2040void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2041{
2042 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2043 struct intel_encoder *encoder;
2044
2045 if (!new_crtc_state->has_psr)
2046 return;
2047
2048 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&dev_priv->drm)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(&dev_priv
->drm)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((new_crtc_state->uapi.encoder_mask) &
drm_encoder_mask(&(encoder)->base)) && intel_encoder_can_psr
(encoder))) {} else
2049 new_crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&dev_priv->drm)->
mode_config.encoder_list)->next); (__typeof(*(encoder)) *)
( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );}); &(encoder)->base.head != (&(&dev_priv
->drm)->mode_config.encoder_list); (encoder) = ({ const
__typeof( ((__typeof(*(encoder)) *)0)->base.head ) *__mptr
= ((encoder)->base.head.next); (__typeof(*(encoder)) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*(encoder)), base
.head) );})) if (!(((new_crtc_state->uapi.encoder_mask) &
drm_encoder_mask(&(encoder)->base)) && intel_encoder_can_psr
(encoder))) {} else
{
2050 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2051 int ret;
2052
2053 lockdep_assert_held(&intel_dp->psr.lock)do { (void)(&intel_dp->psr.lock); } while(0);
2054
2055 if (!intel_dp->psr.enabled)
2056 continue;
2057
2058 if (intel_dp->psr.psr2_enabled)
2059 ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2060 else
2061 ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2062
2063 if (ret)
2064 drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR wait timed out, atomic update may fail\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2065 }
2066}
2067
2068static bool_Bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2069{
2070 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2071 i915_reg_t reg;
2072 u32 mask;
2073 int err;
2074
2075 if (!intel_dp->psr.enabled)
2076 return false0;
2077
2078 if (intel_dp->psr.psr2_enabled) {
2079 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60940)) })
;
2080 mask = EDP_PSR2_STATUS_STATE_MASK((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) <<
(28))) + 0))
;
2081 } else {
2082 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60840)) })
;
2083 mask = EDP_PSR_STATUS_STATE_MASK(7 << 29);
2084 }
2085
2086 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2087
2088 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2089 if (err)
2090 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for PSR Idle for re-enable\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2091 "Timed out waiting for PSR Idle for re-enable\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for PSR Idle for re-enable\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2092
2093 /* After the unlocked wait, verify that PSR is still wanted! */
2094 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2095 return err == 0 && intel_dp->psr.enabled;
2096}
2097
2098static int intel_psr_fastset_force(struct drm_i915_privateinteldrm_softc *dev_priv)
2099{
2100 struct drm_connector_list_iter conn_iter;
2101 struct drm_device *dev = &dev_priv->drm;
2102 struct drm_modeset_acquire_ctx ctx;
2103 struct drm_atomic_state *state;
2104 struct drm_connector *conn;
2105 int err = 0;
2106
2107 state = drm_atomic_state_alloc(dev);
2108 if (!state)
2109 return -ENOMEM12;
2110
2111 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE(1UL << (0)));
2112 state->acquire_ctx = &ctx;
2113
2114retry:
2115
2116 drm_connector_list_iter_begin(dev, &conn_iter);
2117 drm_for_each_connector_iter(conn, &conn_iter)while ((conn = drm_connector_list_iter_next(&conn_iter))) {
2118 struct drm_connector_state *conn_state;
2119 struct drm_crtc_state *crtc_state;
2120
2121 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP14)
2122 continue;
2123
2124 conn_state = drm_atomic_get_connector_state(state, conn);
2125 if (IS_ERR(conn_state)) {
2126 err = PTR_ERR(conn_state);
2127 break;
2128 }
2129
2130 if (!conn_state->crtc)
2131 continue;
2132
2133 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2134 if (IS_ERR(crtc_state)) {
2135 err = PTR_ERR(crtc_state);
2136 break;
2137 }
2138
2139 /* Mark mode as changed to trigger a pipe->update() */
2140 crtc_state->mode_changed = true1;
2141 }
2142 drm_connector_list_iter_end(&conn_iter);
2143
2144 if (err == 0)
2145 err = drm_atomic_commit(state);
2146
2147 if (err == -EDEADLK11) {
2148 drm_atomic_state_clear(state);
2149 err = drm_modeset_backoff(&ctx);
2150 if (!err)
2151 goto retry;
2152 }
2153
2154 drm_modeset_drop_locks(&ctx);
2155 drm_modeset_acquire_fini(&ctx);
2156 drm_atomic_state_put(state);
2157
2158 return err;
2159}
2160
2161int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2162{
2163 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2164 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK0x0f;
2165 u32 old_mode;
2166 int ret;
2167
2168 if (val & ~(I915_PSR_DEBUG_IRQ0x10 | I915_PSR_DEBUG_MODE_MASK0x0f) ||
2169 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH0x4) {
2170 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Invalid debug mask %llx\n"
, val)
;
2171 return -EINVAL22;
2172 }
2173
2174 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2175 if (ret)
2176 return ret;
2177
2178 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK0x0f;
2179 intel_dp->psr.debug = val;
2180
2181 /*
2182 * Do it right away if it's already enabled, otherwise it will be done
2183 * when enabling the source.
2184 */
2185 if (intel_dp->psr.enabled)
2186 psr_irq_control(intel_dp);
2187
2188 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2189
2190 if (old_mode != mode)
2191 ret = intel_psr_fastset_force(dev_priv);
2192
2193 return ret;
2194}
2195
2196static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2197{
2198 struct intel_psr *psr = &intel_dp->psr;
2199
2200 intel_psr_disable_locked(intel_dp);
2201 psr->sink_not_reliable = true1;
2202 /* let's make sure that sink is awaken */
2203 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER0x600, DP_SET_POWER_D00x1);
2204}
2205
2206static void intel_psr_work(struct work_struct *work)
2207{
2208 struct intel_dp *intel_dp =
2209 container_of(work, typeof(*intel_dp), psr.work)({ const __typeof( ((typeof(*intel_dp) *)0)->psr.work ) *__mptr
= (work); (typeof(*intel_dp) *)( (char *)__mptr - __builtin_offsetof
(typeof(*intel_dp), psr.work) );})
;
2210
2211 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2212
2213 if (!intel_dp->psr.enabled)
2214 goto unlock;
2215
2216 if (READ_ONCE(intel_dp->psr.irq_aux_error)({ typeof(intel_dp->psr.irq_aux_error) __tmp = *(volatile typeof
(intel_dp->psr.irq_aux_error) *)&(intel_dp->psr.irq_aux_error
); membar_datadep_consumer(); __tmp; })
)
2217 intel_psr_handle_irq(intel_dp);
2218
2219 /*
2220 * We have to make sure PSR is ready for re-enable
2221 * otherwise it keeps disabled until next full enable/disable cycle.
2222 * PSR might take some time to get fully disabled
2223 * and be ready for re-enable.
2224 */
2225 if (!__psr_wait_for_idle_locked(intel_dp))
2226 goto unlock;
2227
2228 /*
2229 * The delayed work can race with an invalidate hence we need to
2230 * recheck. Since psr_flush first clears this and then reschedules we
2231 * won't ever miss a flush when bailing out here.
2232 */
2233 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2234 goto unlock;
2235
2236 intel_psr_activate(intel_dp);
2237unlock:
2238 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2239}
2240
2241static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2242{
2243 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2244
2245 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2246 u32 val;
2247
2248 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2249 /* Send one update otherwise lag is observed in screen */
2250 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.cursor_offsets[(intel_dp->psr.pipe)] - (&(dev_priv
)->__info)->display.cursor_offsets[PIPE_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x700ac)) })
, 0);
2251 return;
2252 }
2253
2254 val = man_trk_ctl_enable_bit_get(dev_priv) |
2255 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2256 man_trk_ctl_continuos_full_frame(dev_priv);
2257 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
, val);
2258 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.cursor_offsets[(intel_dp->psr.pipe)] - (&(dev_priv
)->__info)->display.cursor_offsets[PIPE_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x700ac)) })
, 0);
2259 intel_dp->psr.psr2_sel_fetch_cff_enabled = true1;
2260 } else {
2261 intel_psr_exit(intel_dp);
2262 }
2263}
2264
2265/**
2266 * intel_psr_invalidate - Invalidate PSR
2267 * @dev_priv: i915 device
2268 * @frontbuffer_bits: frontbuffer plane tracking bits
2269 * @origin: which operation caused the invalidate
2270 *
2271 * Since the hardware frontbuffer tracking has gaps we need to integrate
2272 * with the software frontbuffer tracking. This function gets called every
2273 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2274 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2275 *
2276 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2277 */
2278void intel_psr_invalidate(struct drm_i915_privateinteldrm_softc *dev_priv,
2279 unsigned frontbuffer_bits, enum fb_op_origin origin)
2280{
2281 struct intel_encoder *encoder;
2282
2283 if (origin == ORIGIN_FLIP)
2284 return;
2285
2286 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&((&dev_priv->drm))
->mode_config.encoder_list)->next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );}); &(encoder)->base.head != (&((
&dev_priv->drm))->mode_config.encoder_list); (encoder
) = ({ const __typeof( ((__typeof(*(encoder)) *)0)->base.head
) *__mptr = ((encoder)->base.head.next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );})) if (!(intel_encoder_can_psr(encoder))) {
} else
{
2287 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2288 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2289
2290 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2291 if (!intel_dp->psr.enabled) {
2292 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2293 continue;
2294 }
2295
2296 pipe_frontbuffer_bits &=
2297 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)(((~0UL) >> (64 - (8 * ((intel_dp->psr.pipe) + 1) - 1
) - 1)) & ((~0UL) << (8 * (intel_dp->psr.pipe)))
)
;
2298 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2299
2300 if (pipe_frontbuffer_bits)
2301 _psr_invalidate_handle(intel_dp);
2302
2303 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2304 }
2305}
2306/*
2307 * When we will be completely rely on PSR2 S/W tracking in future,
2308 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2309 * event also therefore tgl_dc3co_flush_locked() require to be changed
2310 * accordingly in future.
2311 */
2312static void
2313tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2314 enum fb_op_origin origin)
2315{
2316 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2317 !intel_dp->psr.active)
2318 return;
2319
2320 /*
2321 * At every frontbuffer flush flip event modified delay of delayed work,
2322 * when delayed work schedules that means display has been idle.
2323 */
2324 if (!(frontbuffer_bits &
2325 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)(((~0UL) >> (64 - (8 * ((intel_dp->psr.pipe) + 1) - 1
) - 1)) & ((~0UL) << (8 * (intel_dp->psr.pipe)))
)
))
2326 return;
2327
2328 tgl_psr2_enable_dc3co(intel_dp);
2329 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2330 intel_dp->psr.dc3co_exit_delay);
2331}
2332
2333static void _psr_flush_handle(struct intel_dp *intel_dp)
2334{
2335 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2336
2337 if (intel_dp->psr.psr2_sel_fetch_enabled) {
2338 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2339 /* can we turn CFF off? */
2340 if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2341 u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2342 man_trk_ctl_partial_frame_bit_get(dev_priv) |
2343 man_trk_ctl_single_full_frame_bit_get(dev_priv);
2344
2345 /*
2346 * turn continuous full frame off and do a single
2347 * full frame
2348 */
2349 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60910)) })
,
2350 val);
2351 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.cursor_offsets[(intel_dp->psr.pipe)] - (&(dev_priv
)->__info)->display.cursor_offsets[PIPE_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x700ac)) })
, 0);
2352 intel_dp->psr.psr2_sel_fetch_cff_enabled = false0;
2353 }
2354 } else {
2355 /*
2356 * continuous full frame is disabled, only a single full
2357 * frame is required
2358 */
2359 psr_force_hw_tracking_exit(intel_dp);
2360 }
2361 } else {
2362 psr_force_hw_tracking_exit(intel_dp);
2363
2364 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2365 schedule_work(&intel_dp->psr.work);
2366 }
2367}
2368
2369/**
2370 * intel_psr_flush - Flush PSR
2371 * @dev_priv: i915 device
2372 * @frontbuffer_bits: frontbuffer plane tracking bits
2373 * @origin: which operation caused the flush
2374 *
2375 * Since the hardware frontbuffer tracking has gaps we need to integrate
2376 * with the software frontbuffer tracking. This function gets called every
2377 * time frontbuffer rendering has completed and flushed out to memory. PSR
2378 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2379 *
2380 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2381 */
2382void intel_psr_flush(struct drm_i915_privateinteldrm_softc *dev_priv,
2383 unsigned frontbuffer_bits, enum fb_op_origin origin)
2384{
2385 struct intel_encoder *encoder;
2386
2387 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&((&dev_priv->drm))
->mode_config.encoder_list)->next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );}); &(encoder)->base.head != (&((
&dev_priv->drm))->mode_config.encoder_list); (encoder
) = ({ const __typeof( ((__typeof(*(encoder)) *)0)->base.head
) *__mptr = ((encoder)->base.head.next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );})) if (!(intel_encoder_can_psr(encoder))) {
} else
{
2388 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2389 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2390
2391 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2392 if (!intel_dp->psr.enabled) {
2393 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2394 continue;
2395 }
2396
2397 pipe_frontbuffer_bits &=
2398 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)(((~0UL) >> (64 - (8 * ((intel_dp->psr.pipe) + 1) - 1
) - 1)) & ((~0UL) << (8 * (intel_dp->psr.pipe)))
)
;
2399 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2400
2401 /*
2402 * If the PSR is paused by an explicit intel_psr_paused() call,
2403 * we have to ensure that the PSR is not activated until
2404 * intel_psr_resume() is called.
2405 */
2406 if (intel_dp->psr.paused)
2407 goto unlock;
2408
2409 if (origin == ORIGIN_FLIP ||
2410 (origin == ORIGIN_CURSOR_UPDATE &&
2411 !intel_dp->psr.psr2_sel_fetch_enabled)) {
2412 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2413 goto unlock;
2414 }
2415
2416 if (pipe_frontbuffer_bits == 0)
2417 goto unlock;
2418
2419 /* By definition flush = invalidate + flush */
2420 _psr_flush_handle(intel_dp);
2421unlock:
2422 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2423 }
2424}
2425
2426/**
2427 * intel_psr_init - Init basic PSR work and mutex.
2428 * @intel_dp: Intel DP
2429 *
2430 * This function is called after the initializing connector.
2431 * (the initializing of connector treats the handling of connector capabilities)
2432 * And it initializes basic PSR stuff for each DP Encoder.
2433 */
2434void intel_psr_init(struct intel_dp *intel_dp)
2435{
2436 struct intel_connector *connector = intel_dp->attached_connector;
2437 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2438 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2439
2440 if (!HAS_PSR(dev_priv)((&(dev_priv)->__info)->display.has_psr))
2441 return;
2442
2443 /*
2444 * HSW spec explicitly says PSR is tied to port A.
2445 * BDW+ platforms have a instance of PSR registers per transcoder but
2446 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2447 * than eDP one.
2448 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2449 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2450 * But GEN12 supports a instance of PSR registers per transcoder.
2451 */
2452 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) < 12 && dig_port->base.port != PORT_A) {
2453 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Port not supported\n"
)
2454 "PSR condition failed: Port not supported\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR condition failed: Port not supported\n"
)
;
2455 return;
2456 }
2457
2458 intel_dp->psr.source_support = true1;
2459
2460 /* Set link_standby x link_off defaults */
2461 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) < 12)
2462 /* For new platforms up to TGL let's respect VBT back again */
2463 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2464
2465 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2466 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2467 rw_init(&intel_dp->psr.lock, "psrlk")_rw_init_flags(&intel_dp->psr.lock, "psrlk", 0, ((void
*)0))
;
2468}
2469
2470static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2471 u8 *status, u8 *error_status)
2472{
2473 struct drm_dp_aux *aux = &intel_dp->aux;
2474 int ret;
2475
2476 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS0x2008, status);
2477 if (ret != 1)
2478 return ret;
2479
2480 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS0x2006, error_status);
2481 if (ret != 1)
2482 return ret;
2483
2484 *status = *status & DP_PSR_SINK_STATE_MASK0x07;
2485
2486 return 0;
2487}
2488
2489static void psr_alpm_check(struct intel_dp *intel_dp)
2490{
2491 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2492 struct drm_dp_aux *aux = &intel_dp->aux;
2493 struct intel_psr *psr = &intel_dp->psr;
2494 u8 val;
2495 int r;
2496
2497 if (!psr->psr2_enabled)
2498 return;
2499
2500 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS0x200b, &val);
2501 if (r != 1) {
2502 drm_err(&dev_priv->drm, "Error reading ALPM status\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading ALPM status\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2503 return;
2504 }
2505
2506 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR(1 << 0)) {
2507 intel_psr_disable_locked(intel_dp);
2508 psr->sink_not_reliable = true1;
2509 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "ALPM lock timeout error, disabling PSR\n"
)
2510 "ALPM lock timeout error, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "ALPM lock timeout error, disabling PSR\n"
)
;
2511
2512 /* Clearing error */
2513 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS0x200b, val);
2514 }
2515}
2516
2517static void psr_capability_changed_check(struct intel_dp *intel_dp)
2518{
2519 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2520 struct intel_psr *psr = &intel_dp->psr;
2521 u8 val;
2522 int r;
2523
2524 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI0x2007, &val);
2525 if (r != 1) {
2526 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading DP_PSR_ESI\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2527 return;
2528 }
2529
2530 if (val & DP_PSR_CAPS_CHANGE(1 << 0)) {
2531 intel_psr_disable_locked(intel_dp);
2532 psr->sink_not_reliable = true1;
2533 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Sink PSR capability changed, disabling PSR\n"
)
2534 "Sink PSR capability changed, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "Sink PSR capability changed, disabling PSR\n"
)
;
2535
2536 /* Clearing it */
2537 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI0x2007, val);
2538 }
2539}
2540
2541void intel_psr_short_pulse(struct intel_dp *intel_dp)
2542{
2543 struct drm_i915_privateinteldrm_softc *dev_priv = dp_to_i915(intel_dp)to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2544 struct intel_psr *psr = &intel_dp->psr;
2545 u8 status, error_status;
2546 const u8 errors = DP_PSR_RFB_STORAGE_ERROR(1 << 1) |
2547 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR(1 << 2) |
2548 DP_PSR_LINK_CRC_ERROR(1 << 0);
2549
2550 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
2551 return;
2552
2553 mutex_lock(&psr->lock)rw_enter_write(&psr->lock);
2554
2555 if (!psr->enabled)
2556 goto exit;
2557
2558 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2559 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading PSR status or error status\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2560 "Error reading PSR status or error status\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Error reading PSR status or error status\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2561 goto exit;
2562 }
2563
2564 if (status == DP_PSR_SINK_INTERNAL_ERROR7 || (error_status & errors)) {
2565 intel_psr_disable_locked(intel_dp);
2566 psr->sink_not_reliable = true1;
2567 }
2568
2569 if (status == DP_PSR_SINK_INTERNAL_ERROR7 && !error_status)
2570 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR sink internal error, disabling PSR\n"
)
2571 "PSR sink internal error, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR sink internal error, disabling PSR\n"
)
;
2572 if (error_status & DP_PSR_RFB_STORAGE_ERROR(1 << 1))
2573 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR RFB storage error, disabling PSR\n"
)
2574 "PSR RFB storage error, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR RFB storage error, disabling PSR\n"
)
;
2575 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR(1 << 2))
2576 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR VSC SDP uncorrectable error, disabling PSR\n"
)
2577 "PSR VSC SDP uncorrectable error, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR VSC SDP uncorrectable error, disabling PSR\n"
)
;
2578 if (error_status & DP_PSR_LINK_CRC_ERROR(1 << 0))
2579 drm_dbg_kms(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR Link CRC error, disabling PSR\n"
)
2580 "PSR Link CRC error, disabling PSR\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_KMS, "PSR Link CRC error, disabling PSR\n"
)
;
2581
2582 if (error_status & ~errors)
2583 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status
& ~errors)
2584 "PSR_ERROR_STATUS unhandled errors %x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status
& ~errors)
2585 error_status & ~errors)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PSR_ERROR_STATUS unhandled errors %x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , error_status
& ~errors)
;
2586 /* clear status register */
2587 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS0x2006, error_status);
2588
2589 psr_alpm_check(intel_dp);
2590 psr_capability_changed_check(intel_dp);
2591
2592exit:
2593 mutex_unlock(&psr->lock)rw_exit_write(&psr->lock);
2594}
2595
2596bool_Bool intel_psr_enabled(struct intel_dp *intel_dp)
2597{
2598 bool_Bool ret;
2599
2600 if (!CAN_PSR(intel_dp)((intel_dp)->psr.sink_support && (intel_dp)->psr
.source_support)
)
2601 return false0;
2602
2603 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2604 ret = intel_dp->psr.enabled;
2605 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2606
2607 return ret;
2608}
2609
2610/**
2611 * intel_psr_lock - grab PSR lock
2612 * @crtc_state: the crtc state
2613 *
2614 * This is initially meant to be used by around CRTC update, when
2615 * vblank sensitive registers are updated and we need grab the lock
2616 * before it to avoid vblank evasion.
2617 */
2618void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2619{
2620 struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev);
2621 struct intel_encoder *encoder;
2622
2623 if (!crtc_state->has_psr)
2624 return;
2625
2626 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&i915->drm)->mode_config
.encoder_list)->next); (__typeof(*(encoder)) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*(encoder)), base.head) );}); &
(encoder)->base.head != (&(&i915->drm)->mode_config
.encoder_list); (encoder) = ({ const __typeof( ((__typeof(*(encoder
)) *)0)->base.head ) *__mptr = ((encoder)->base.head.next
); (__typeof(*(encoder)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(encoder)), base.head) );})) if (!(((crtc_state->
uapi.encoder_mask) & drm_encoder_mask(&(encoder)->
base)) && intel_encoder_can_psr(encoder))) {} else
2627 crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&i915->drm)->mode_config
.encoder_list)->next); (__typeof(*(encoder)) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*(encoder)), base.head) );}); &
(encoder)->base.head != (&(&i915->drm)->mode_config
.encoder_list); (encoder) = ({ const __typeof( ((__typeof(*(encoder
)) *)0)->base.head ) *__mptr = ((encoder)->base.head.next
); (__typeof(*(encoder)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(encoder)), base.head) );})) if (!(((crtc_state->
uapi.encoder_mask) & drm_encoder_mask(&(encoder)->
base)) && intel_encoder_can_psr(encoder))) {} else
{
2628 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2629
2630 mutex_lock(&intel_dp->psr.lock)rw_enter_write(&intel_dp->psr.lock);
2631 break;
2632 }
2633}
2634
2635/**
2636 * intel_psr_unlock - release PSR lock
2637 * @crtc_state: the crtc state
2638 *
2639 * Release the PSR lock that was held during pipe update.
2640 */
2641void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2642{
2643 struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev);
2644 struct intel_encoder *encoder;
2645
2646 if (!crtc_state->has_psr)
2647 return;
2648
2649 for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&i915->drm)->mode_config
.encoder_list)->next); (__typeof(*(encoder)) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*(encoder)), base.head) );}); &
(encoder)->base.head != (&(&i915->drm)->mode_config
.encoder_list); (encoder) = ({ const __typeof( ((__typeof(*(encoder
)) *)0)->base.head ) *__mptr = ((encoder)->base.head.next
); (__typeof(*(encoder)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(encoder)), base.head) );})) if (!(((crtc_state->
uapi.encoder_mask) & drm_encoder_mask(&(encoder)->
base)) && intel_encoder_can_psr(encoder))) {} else
2650 crtc_state->uapi.encoder_mask)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&(&i915->drm)->mode_config
.encoder_list)->next); (__typeof(*(encoder)) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*(encoder)), base.head) );}); &
(encoder)->base.head != (&(&i915->drm)->mode_config
.encoder_list); (encoder) = ({ const __typeof( ((__typeof(*(encoder
)) *)0)->base.head ) *__mptr = ((encoder)->base.head.next
); (__typeof(*(encoder)) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*(encoder)), base.head) );})) if (!(((crtc_state->
uapi.encoder_mask) & drm_encoder_mask(&(encoder)->
base)) && intel_encoder_can_psr(encoder))) {} else
{
2651 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2652
2653 mutex_unlock(&intel_dp->psr.lock)rw_exit_write(&intel_dp->psr.lock);
2654 break;
2655 }
2656}