Bug Summary

File:dev/pci/drm/i915/i915_irq.c
Warning:line 4551, column 6
Value stored to 'irq' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_irq.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_irq.c
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt)KBUILD_MODNAME ": " fmt KBUILD_MODNAME ": " fmt
30
31#include <linux/circ_buf.h>
32#include <linux/slab.h>
33#include <linux/sysrq.h>
34
35#include <drm/drm_drv.h>
36
37#include "display/icl_dsi_regs.h"
38#include "display/intel_de.h"
39#include "display/intel_display_trace.h"
40#include "display/intel_display_types.h"
41#include "display/intel_fifo_underrun.h"
42#include "display/intel_hotplug.h"
43#include "display/intel_lpe_audio.h"
44#include "display/intel_psr.h"
45
46#include "gt/intel_breadcrumbs.h"
47#include "gt/intel_gt.h"
48#include "gt/intel_gt_irq.h"
49#include "gt/intel_gt_pm_irq.h"
50#include "gt/intel_gt_regs.h"
51#include "gt/intel_rps.h"
52
53#include "i915_driver.h"
54#include "i915_drv.h"
55#include "i915_irq.h"
56#include "intel_pm.h"
57
58/**
59 * DOC: interrupt handling
60 *
61 * These functions provide the basic support for enabling and disabling the
62 * interrupt handling support. There's a lot more functionality in i915_irq.c
63 * and related files, but that will be described in separate chapters.
64 */
65
66/*
67 * Interrupt statistic for PMU. Increments the counter only if the
68 * interrupt originated from the GPU so interrupts from a device which
69 * shares the interrupt line are not accounted.
70 */
71static inline void pmu_irq_stats(struct drm_i915_privateinteldrm_softc *i915,
72 irqreturn_t res)
73{
74 if (unlikely(res != IRQ_HANDLED)__builtin_expect(!!(res != IRQ_HANDLED), 0))
75 return;
76
77 /*
78 * A clever compiler translates that into INC. A not so clever one
79 * should at least prevent store tearing.
80 */
81 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1)({ typeof(i915->pmu.irq_count) __tmp = (i915->pmu.irq_count
+ 1); *(volatile typeof(i915->pmu.irq_count) *)&(i915
->pmu.irq_count) = __tmp; __tmp; })
;
82}
83
84typedef bool_Bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85typedef u32 (*hotplug_enables_func)(struct drm_i915_privateinteldrm_softc *i915,
86 enum hpd_pin pin);
87
88static const u32 hpd_ilk[HPD_NUM_PINS] = {
89 [HPD_PORT_A] = DE_DP_A_HOTPLUG(1 << 19),
90};
91
92static const u32 hpd_ivb[HPD_NUM_PINS] = {
93 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB(1 << 27),
94};
95
96static const u32 hpd_bdw[HPD_NUM_PINS] = {
97 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)),
98};
99
100static const u32 hpd_ibx[HPD_NUM_PINS] = {
101 [HPD_CRT] = SDE_CRT_HOTPLUG(1 << 11),
102 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG(1 << 6),
103 [HPD_PORT_B] = SDE_PORTB_HOTPLUG(1 << 8),
104 [HPD_PORT_C] = SDE_PORTC_HOTPLUG(1 << 9),
105 [HPD_PORT_D] = SDE_PORTD_HOTPLUG(1 << 10),
106};
107
108static const u32 hpd_cpt[HPD_NUM_PINS] = {
109 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT(1 << 19),
110 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT(1 << 18),
111 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT(1 << 21),
112 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT(1 << 22),
113 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT(1 << 23),
114};
115
116static const u32 hpd_spt[HPD_NUM_PINS] = {
117 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT(1 << 24),
118 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT(1 << 21),
119 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT(1 << 22),
120 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT(1 << 23),
121 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT(1 << 25),
122};
123
124static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
125 [HPD_CRT] = CRT_HOTPLUG_INT_EN(1 << 9),
126 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN(1 << 26),
127 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN(1 << 25),
128 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN(1 << 29),
129 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN(1 << 28),
130 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN(1 << 27),
131};
132
133static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
134 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS(1 << 11),
135 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X(1 << 2),
136 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X(1 << 3),
137 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS(3 << 17),
138 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS(3 << 19),
139 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS(3 << 21),
140};
141
142static const u32 hpd_status_i915[HPD_NUM_PINS] = {
143 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS(1 << 11),
144 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915(1 << 6),
145 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915(1 << 7),
146 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS(3 << 17),
147 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS(3 << 19),
148 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS(3 << 21),
149};
150
151static const u32 hpd_bxt[HPD_NUM_PINS] = {
152 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)),
153 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B)((u32)((1UL << (3 + ((HPD_PORT_B) - HPD_PORT_A))) + 0)),
154 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)((u32)((1UL << (3 + ((HPD_PORT_C) - HPD_PORT_A))) + 0)),
155};
156
157static const u32 hpd_gen11[HPD_NUM_PINS] = {
158 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1)((u32)((1UL << (16 + ((HPD_PORT_TC1) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC1)((u32)((1UL << (((HPD_PORT_TC1) - HPD_PORT_TC1))) + 0)),
159 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2)((u32)((1UL << (16 + ((HPD_PORT_TC2) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC2)((u32)((1UL << (((HPD_PORT_TC2) - HPD_PORT_TC1))) + 0)),
160 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3)((u32)((1UL << (16 + ((HPD_PORT_TC3) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC3)((u32)((1UL << (((HPD_PORT_TC3) - HPD_PORT_TC1))) + 0)),
161 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4)((u32)((1UL << (16 + ((HPD_PORT_TC4) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC4)((u32)((1UL << (((HPD_PORT_TC4) - HPD_PORT_TC1))) + 0)),
162 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5)((u32)((1UL << (16 + ((HPD_PORT_TC5) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC5)((u32)((1UL << (((HPD_PORT_TC5) - HPD_PORT_TC1))) + 0)),
163 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6)((u32)((1UL << (16 + ((HPD_PORT_TC6) - HPD_PORT_TC1))) +
0))
| GEN11_TBT_HOTPLUG(HPD_PORT_TC6)((u32)((1UL << (((HPD_PORT_TC6) - HPD_PORT_TC1))) + 0)),
164};
165
166static const u32 hpd_icp[HPD_NUM_PINS] = {
167 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A)((u32)((1UL << (16 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)
)
,
168 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B)((u32)((1UL << (16 + ((HPD_PORT_B) - HPD_PORT_A))) + 0)
)
,
169 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C)((u32)((1UL << (16 + ((HPD_PORT_C) - HPD_PORT_A))) + 0)
)
,
170 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1)((u32)((1UL << (24 + ((HPD_PORT_TC1) - HPD_PORT_TC1))) +
0))
,
171 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2)((u32)((1UL << (24 + ((HPD_PORT_TC2) - HPD_PORT_TC1))) +
0))
,
172 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3)((u32)((1UL << (24 + ((HPD_PORT_TC3) - HPD_PORT_TC1))) +
0))
,
173 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4)((u32)((1UL << (24 + ((HPD_PORT_TC4) - HPD_PORT_TC1))) +
0))
,
174 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5)((u32)((1UL << (24 + ((HPD_PORT_TC5) - HPD_PORT_TC1))) +
0))
,
175 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6)((u32)((1UL << (24 + ((HPD_PORT_TC6) - HPD_PORT_TC1))) +
0))
,
176};
177
178static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
179 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A)((u32)((1UL << (16 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)
)
,
180 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B)((u32)((1UL << (16 + ((HPD_PORT_B) - HPD_PORT_A))) + 0)
)
,
181 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C)((u32)((1UL << (16 + ((HPD_PORT_C) - HPD_PORT_A))) + 0)
)
,
182 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D)((u32)((1UL << (16 + ((HPD_PORT_D) - HPD_PORT_A))) + 0)
)
,
183 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1)((u32)((1UL << (25 + ((HPD_PORT_TC1) - HPD_PORT_TC1))) +
0))
,
184};
185
186static void intel_hpd_init_pins(struct drm_i915_privateinteldrm_softc *dev_priv)
187{
188 struct intel_hotplug *hpd = &dev_priv->display.hotplug;
189
190 if (HAS_GMCH(dev_priv)((&(dev_priv)->__info)->display.has_gmch)) {
191 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
|| IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) ||
192 IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
193 hpd->hpd = hpd_status_g4x;
194 else
195 hpd->hpd = hpd_status_i915;
196 return;
197 }
198
199 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11)
200 hpd->hpd = hpd_gen11;
201 else if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON))
202 hpd->hpd = hpd_bxt;
203 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 8)
204 hpd->hpd = hpd_bdw;
205 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 7)
206 hpd->hpd = hpd_ivb;
207 else
208 hpd->hpd = hpd_ilk;
209
210 if ((INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) < PCH_DG1) &&
211 (!HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE) || HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP)))
212 return;
213
214 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_DG1)
215 hpd->pch_hpd = hpd_sde_dg1;
216 else if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
217 hpd->pch_hpd = hpd_icp;
218 else if (HAS_PCH_CNP(dev_priv)(((dev_priv)->pch_type) == PCH_CNP) || HAS_PCH_SPT(dev_priv)(((dev_priv)->pch_type) == PCH_SPT))
219 hpd->pch_hpd = hpd_spt;
220 else if (HAS_PCH_LPT(dev_priv)(((dev_priv)->pch_type) == PCH_LPT) || HAS_PCH_CPT(dev_priv)(((dev_priv)->pch_type) == PCH_CPT))
221 hpd->pch_hpd = hpd_cpt;
222 else if (HAS_PCH_IBX(dev_priv)(((dev_priv)->pch_type) == PCH_IBX))
223 hpd->pch_hpd = hpd_ibx;
224 else
225 MISSING_CASE(INTEL_PCH_TYPE(dev_priv))({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "((dev_priv)->pch_type)", (long)(((dev_priv)->pch_type
))); __builtin_expect(!!(__ret), 0); })
;
226}
227
228static void
229intel_handle_vblank(struct drm_i915_privateinteldrm_softc *dev_priv, enum pipe pipe)
230{
231 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
232
233 drm_crtc_handle_vblank(&crtc->base);
234}
235
236void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
237 i915_reg_t iir, i915_reg_t ier)
238{
239 intel_uncore_write(uncore, imr, 0xffffffff);
240 intel_uncore_posting_read(uncore, imr)((void)intel_uncore_read_notrace(uncore, imr));
241
242 intel_uncore_write(uncore, ier, 0);
243
244 /* IIR can theoretically queue up two events. Be paranoid. */
245 intel_uncore_write(uncore, iir, 0xffffffff);
246 intel_uncore_posting_read(uncore, iir)((void)intel_uncore_read_notrace(uncore, iir));
247 intel_uncore_write(uncore, iir, 0xffffffff);
248 intel_uncore_posting_read(uncore, iir)((void)intel_uncore_read_notrace(uncore, iir));
249}
250
251void gen2_irq_reset(struct intel_uncore *uncore)
252{
253 intel_uncore_write16(uncore, GEN2_IMR((const i915_reg_t){ .reg = (0x20a8) }), 0xffff);
254 intel_uncore_posting_read16(uncore, GEN2_IMR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a8) })))
;
255
256 intel_uncore_write16(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }), 0);
257
258 /* IIR can theoretically queue up two events. Be paranoid. */
259 intel_uncore_write16(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), 0xffff);
260 intel_uncore_posting_read16(uncore, GEN2_IIR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a4) })))
;
261 intel_uncore_write16(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), 0xffff);
262 intel_uncore_posting_read16(uncore, GEN2_IIR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a4) })))
;
263}
264
265/*
266 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
267 */
268static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
269{
270 u32 val = intel_uncore_read(uncore, reg);
271
272 if (val == 0)
273 return;
274
275 drm_WARN(&uncore->i915->drm, 1,({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(reg), val); __builtin_expect(!!(__ret)
, 0); })
276 "Interrupt register 0x%x is not zero: 0x%08x\n",({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(reg), val); __builtin_expect(!!(__ret)
, 0); })
277 i915_mmio_reg_offset(reg), val)({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(reg), val); __builtin_expect(!!(__ret)
, 0); })
;
278 intel_uncore_write(uncore, reg, 0xffffffff);
279 intel_uncore_posting_read(uncore, reg)((void)intel_uncore_read_notrace(uncore, reg));
280 intel_uncore_write(uncore, reg, 0xffffffff);
281 intel_uncore_posting_read(uncore, reg)((void)intel_uncore_read_notrace(uncore, reg));
282}
283
284static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
285{
286 u16 val = intel_uncore_read16(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }));
287
288 if (val == 0)
289 return;
290
291 drm_WARN(&uncore->i915->drm, 1,({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0x20a4) }
)), val); __builtin_expect(!!(__ret), 0); })
292 "Interrupt register 0x%x is not zero: 0x%08x\n",({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0x20a4) }
)), val); __builtin_expect(!!(__ret), 0); })
293 i915_mmio_reg_offset(GEN2_IIR), val)({ int __ret = !!(1); if (__ret) printf("%s %s: " "Interrupt register 0x%x is not zero: 0x%08x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0x20a4) }
)), val); __builtin_expect(!!(__ret), 0); })
;
294 intel_uncore_write16(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), 0xffff);
295 intel_uncore_posting_read16(uncore, GEN2_IIR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a4) })))
;
296 intel_uncore_write16(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), 0xffff);
297 intel_uncore_posting_read16(uncore, GEN2_IIR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a4) })))
;
298}
299
300void gen3_irq_init(struct intel_uncore *uncore,
301 i915_reg_t imr, u32 imr_val,
302 i915_reg_t ier, u32 ier_val,
303 i915_reg_t iir)
304{
305 gen3_assert_iir_is_zero(uncore, iir);
306
307 intel_uncore_write(uncore, ier, ier_val);
308 intel_uncore_write(uncore, imr, imr_val);
309 intel_uncore_posting_read(uncore, imr)((void)intel_uncore_read_notrace(uncore, imr));
310}
311
312void gen2_irq_init(struct intel_uncore *uncore,
313 u32 imr_val, u32 ier_val)
314{
315 gen2_assert_iir_is_zero(uncore);
316
317 intel_uncore_write16(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }), ier_val);
318 intel_uncore_write16(uncore, GEN2_IMR((const i915_reg_t){ .reg = (0x20a8) }), imr_val);
319 intel_uncore_posting_read16(uncore, GEN2_IMR)((void)intel_uncore_read16_notrace(uncore, ((const i915_reg_t
){ .reg = (0x20a8) })))
;
320}
321
322/* For display hotplug interrupt */
323static inline void
324i915_hotplug_interrupt_update_locked(struct drm_i915_privateinteldrm_softc *dev_priv,
325 u32 mask,
326 u32 bits)
327{
328 u32 val;
329
330 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
331 drm_WARN_ON(&dev_priv->drm, bits & ~mask)({ int __ret = !!((bits & ~mask)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON(" "bits & ~mask" ")"); __builtin_expect(!!
(__ret), 0); })
;
332
333 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61110) })
);
334 val &= ~mask;
335 val |= bits;
336 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61110) })
, val);
337}
338
339/**
340 * i915_hotplug_interrupt_update - update hotplug interrupt enable
341 * @dev_priv: driver private
342 * @mask: bits to update
343 * @bits: bits to enable
344 * NOTE: the HPD enable bits are modified both inside and outside
345 * of an interrupt context. To avoid that read-modify-write cycles
346 * interfer, these bits are protected by a spinlock. Since this
347 * function is usually not called from a context where the lock is
348 * held already, this function acquires the lock itself. A non-locking
349 * version is also available.
350 */
351void i915_hotplug_interrupt_update(struct drm_i915_privateinteldrm_softc *dev_priv,
352 u32 mask,
353 u32 bits)
354{
355 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
356 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
357 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
358}
359
360/**
361 * ilk_update_display_irq - update DEIMR
362 * @dev_priv: driver private
363 * @interrupt_mask: mask of interrupt bits to update
364 * @enabled_irq_mask: mask of interrupt bits to enable
365 */
366static void ilk_update_display_irq(struct drm_i915_privateinteldrm_softc *dev_priv,
367 u32 interrupt_mask, u32 enabled_irq_mask)
368{
369 u32 new_val;
370
371 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
372 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask)({ int __ret = !!((enabled_irq_mask & ~interrupt_mask)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "enabled_irq_mask & ~interrupt_mask"
")"); __builtin_expect(!!(__ret), 0); })
;
373
374 new_val = dev_priv->irq_mask;
375 new_val &= ~interrupt_mask;
376 new_val |= (~enabled_irq_mask & interrupt_mask);
377
378 if (new_val != dev_priv->irq_mask &&
379 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
) {
380 dev_priv->irq_mask = new_val;
381 intel_uncore_write(&dev_priv->uncore, DEIMR((const i915_reg_t){ .reg = (0x44004) }), dev_priv->irq_mask);
382 intel_uncore_posting_read(&dev_priv->uncore, DEIMR)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44004) })))
;
383 }
384}
385
386void ilk_enable_display_irq(struct drm_i915_privateinteldrm_softc *i915, u32 bits)
387{
388 ilk_update_display_irq(i915, bits, bits);
389}
390
391void ilk_disable_display_irq(struct drm_i915_privateinteldrm_softc *i915, u32 bits)
392{
393 ilk_update_display_irq(i915, bits, 0);
394}
395
396/**
397 * bdw_update_port_irq - update DE port interrupt
398 * @dev_priv: driver private
399 * @interrupt_mask: mask of interrupt bits to update
400 * @enabled_irq_mask: mask of interrupt bits to enable
401 */
402static void bdw_update_port_irq(struct drm_i915_privateinteldrm_softc *dev_priv,
403 u32 interrupt_mask,
404 u32 enabled_irq_mask)
405{
406 u32 new_val;
407 u32 old_val;
408
409 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
410
411 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask)({ int __ret = !!((enabled_irq_mask & ~interrupt_mask)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "enabled_irq_mask & ~interrupt_mask"
")"); __builtin_expect(!!(__ret), 0); })
;
412
413 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
)
414 return;
415
416 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR((const i915_reg_t){ .reg = (0x44444) }));
417
418 new_val = old_val;
419 new_val &= ~interrupt_mask;
420 new_val |= (~enabled_irq_mask & interrupt_mask);
421
422 if (new_val != old_val) {
423 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR((const i915_reg_t){ .reg = (0x44444) }), new_val);
424 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44444) })))
;
425 }
426}
427
428/**
429 * bdw_update_pipe_irq - update DE pipe interrupt
430 * @dev_priv: driver private
431 * @pipe: pipe whose interrupt to update
432 * @interrupt_mask: mask of interrupt bits to update
433 * @enabled_irq_mask: mask of interrupt bits to enable
434 */
435static void bdw_update_pipe_irq(struct drm_i915_privateinteldrm_softc *dev_priv,
436 enum pipe pipe, u32 interrupt_mask,
437 u32 enabled_irq_mask)
438{
439 u32 new_val;
440
441 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
442
443 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask)({ int __ret = !!((enabled_irq_mask & ~interrupt_mask)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "enabled_irq_mask & ~interrupt_mask"
")"); __builtin_expect(!!(__ret), 0); })
;
444
445 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
)
446 return;
447
448 new_val = dev_priv->de_irq_mask[pipe];
449 new_val &= ~interrupt_mask;
450 new_val |= (~enabled_irq_mask & interrupt_mask);
451
452 if (new_val != dev_priv->de_irq_mask[pipe]) {
453 dev_priv->de_irq_mask[pipe] = new_val;
454 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)((const i915_reg_t){ .reg = (0x44404 + (0x10 * (pipe))) }), dev_priv->de_irq_mask[pipe]);
455 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe))((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44404 + (0x10 * (pipe))) })))
;
456 }
457}
458
459void bdw_enable_pipe_irq(struct drm_i915_privateinteldrm_softc *i915,
460 enum pipe pipe, u32 bits)
461{
462 bdw_update_pipe_irq(i915, pipe, bits, bits);
463}
464
465void bdw_disable_pipe_irq(struct drm_i915_privateinteldrm_softc *i915,
466 enum pipe pipe, u32 bits)
467{
468 bdw_update_pipe_irq(i915, pipe, bits, 0);
469}
470
471/**
472 * ibx_display_interrupt_update - update SDEIMR
473 * @dev_priv: driver private
474 * @interrupt_mask: mask of interrupt bits to update
475 * @enabled_irq_mask: mask of interrupt bits to enable
476 */
477static void ibx_display_interrupt_update(struct drm_i915_privateinteldrm_softc *dev_priv,
478 u32 interrupt_mask,
479 u32 enabled_irq_mask)
480{
481 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR((const i915_reg_t){ .reg = (0xc4004) }));
482 sdeimr &= ~interrupt_mask;
483 sdeimr |= (~enabled_irq_mask & interrupt_mask);
484
485 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask)({ int __ret = !!((enabled_irq_mask & ~interrupt_mask)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "enabled_irq_mask & ~interrupt_mask"
")"); __builtin_expect(!!(__ret), 0); })
;
486
487 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
488
489 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
)
490 return;
491
492 intel_uncore_write(&dev_priv->uncore, SDEIMR((const i915_reg_t){ .reg = (0xc4004) }), sdeimr);
493 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0xc4004) })))
;
494}
495
496void ibx_enable_display_interrupt(struct drm_i915_privateinteldrm_softc *i915, u32 bits)
497{
498 ibx_display_interrupt_update(i915, bits, bits);
499}
500
501void ibx_disable_display_interrupt(struct drm_i915_privateinteldrm_softc *i915, u32 bits)
502{
503 ibx_display_interrupt_update(i915, bits, 0);
504}
505
506u32 i915_pipestat_enable_mask(struct drm_i915_privateinteldrm_softc *dev_priv,
507 enum pipe pipe)
508{
509 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
510 u32 enable_mask = status_mask << 16;
511
512 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
513
514 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) < 5)
515 goto out;
516
517 /*
518 * On pipe A we don't support the PSR interrupt yet,
519 * on pipe B and C the same bit MBZ.
520 */
521 if (drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((status_mask & (1UL
<< 6))); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON_ONCE(" "status_mask & (1UL << 6)" ")"
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
522 status_mask & PIPE_A_PSR_STATUS_VLV)({ static int __warned; int __ret = !!((status_mask & (1UL
<< 6))); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON_ONCE(" "status_mask & (1UL << 6)" ")"
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
523 return 0;
524 /*
525 * On pipe B and C we don't support the PSR interrupt yet, on pipe
526 * A the same bit is for perf counters which we don't use either.
527 */
528 if (drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((status_mask & (1UL
<< 3))); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON_ONCE(" "status_mask & (1UL << 3)" ")"
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
529 status_mask & PIPE_B_PSR_STATUS_VLV)({ static int __warned; int __ret = !!((status_mask & (1UL
<< 3))); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&dev_priv->drm))->dev), ""
, "drm_WARN_ON_ONCE(" "status_mask & (1UL << 3)" ")"
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
530 return 0;
531
532 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS(1UL << 31) |
533 SPRITE0_FLIP_DONE_INT_EN_VLV(1UL << 22) |
534 SPRITE1_FLIP_DONE_INT_EN_VLV(1UL << 30));
535 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV(1UL << 14))
536 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV(1UL << 22);
537 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV(1UL << 15))
538 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV(1UL << 30);
539
540out:
541 drm_WARN_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!(enable_mask & ~0x7fff0000
|| status_mask & ~0x0000ffff); if (__ret && !__warned
) { printf("%s %s: " "pipe %c: enable_mask=0x%x, status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), enable_mask, status_mask); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
542 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||({ static int __warned; int __ret = !!(enable_mask & ~0x7fff0000
|| status_mask & ~0x0000ffff); if (__ret && !__warned
) { printf("%s %s: " "pipe %c: enable_mask=0x%x, status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), enable_mask, status_mask); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
543 status_mask & ~PIPESTAT_INT_STATUS_MASK,({ static int __warned; int __ret = !!(enable_mask & ~0x7fff0000
|| status_mask & ~0x0000ffff); if (__ret && !__warned
) { printf("%s %s: " "pipe %c: enable_mask=0x%x, status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), enable_mask, status_mask); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
544 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",({ static int __warned; int __ret = !!(enable_mask & ~0x7fff0000
|| status_mask & ~0x0000ffff); if (__ret && !__warned
) { printf("%s %s: " "pipe %c: enable_mask=0x%x, status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), enable_mask, status_mask); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
545 pipe_name(pipe), enable_mask, status_mask)({ static int __warned; int __ret = !!(enable_mask & ~0x7fff0000
|| status_mask & ~0x0000ffff); if (__ret && !__warned
) { printf("%s %s: " "pipe %c: enable_mask=0x%x, status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), enable_mask, status_mask); __warned = 1; } __builtin_expect
(!!(__ret), 0); })
;
546
547 return enable_mask;
548}
549
550void i915_enable_pipestat(struct drm_i915_privateinteldrm_softc *dev_priv,
551 enum pipe pipe, u32 status_mask)
552{
553 i915_reg_t reg = PIPESTAT(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70024)) })
;
554 u32 enable_mask;
555
556 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
557 "pipe %c: status_mask=0x%x\n",({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
558 pipe_name(pipe), status_mask)({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
;
559
560 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
561 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
;
562
563 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
564 return;
565
566 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
567 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
568
569 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
570 intel_uncore_posting_read(&dev_priv->uncore, reg)((void)intel_uncore_read_notrace(&dev_priv->uncore, reg
))
;
571}
572
573void i915_disable_pipestat(struct drm_i915_privateinteldrm_softc *dev_priv,
574 enum pipe pipe, u32 status_mask)
575{
576 i915_reg_t reg = PIPESTAT(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70024)) })
;
577 u32 enable_mask;
578
579 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
580 "pipe %c: status_mask=0x%x\n",({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
581 pipe_name(pipe), status_mask)({ static int __warned; int __ret = !!(status_mask & ~0x0000ffff
); if (__ret && !__warned) { printf("%s %s: " "pipe %c: status_mask=0x%x\n"
, dev_driver_string((&dev_priv->drm)->dev), "", ((pipe
) + 'A'), status_mask); __warned = 1; } __builtin_expect(!!(__ret
), 0); })
;
582
583 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
584 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))({ int __ret = !!((!intel_irqs_enabled(dev_priv))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "!intel_irqs_enabled(dev_priv)"
")"); __builtin_expect(!!(__ret), 0); })
;
585
586 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
587 return;
588
589 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
590 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
591
592 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
593 intel_uncore_posting_read(&dev_priv->uncore, reg)((void)intel_uncore_read_notrace(&dev_priv->uncore, reg
))
;
594}
595
596static bool_Bool i915_has_asle(struct drm_i915_privateinteldrm_softc *dev_priv)
597{
598 if (!dev_priv->display.opregion.asle)
599 return false0;
600
601 return IS_PINEVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_PINEVIEW) || IS_MOBILE(dev_priv)((&(dev_priv)->__info)->is_mobile);
602}
603
604/**
605 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
606 * @dev_priv: i915 device private
607 */
608static void i915_enable_asle_pipestat(struct drm_i915_privateinteldrm_softc *dev_priv)
609{
610 if (!i915_has_asle(dev_priv))
611 return;
612
613 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
614
615 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS(1UL << 6));
616 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 4)
617 i915_enable_pipestat(dev_priv, PIPE_A,
618 PIPE_LEGACY_BLC_EVENT_STATUS(1UL << 6));
619
620 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
621}
622
623/*
624 * This timing diagram depicts the video signal in and
625 * around the vertical blanking period.
626 *
627 * Assumptions about the fictitious mode used in this example:
628 * vblank_start >= 3
629 * vsync_start = vblank_start + 1
630 * vsync_end = vblank_start + 2
631 * vtotal = vblank_start + 3
632 *
633 * start of vblank:
634 * latch double buffered registers
635 * increment frame counter (ctg+)
636 * generate start of vblank interrupt (gen4+)
637 * |
638 * | frame start:
639 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
640 * | may be shifted forward 1-3 extra lines via PIPECONF
641 * | |
642 * | | start of vsync:
643 * | | generate vsync interrupt
644 * | | |
645 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
646 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
647 * ----va---> <-----------------vb--------------------> <--------va-------------
648 * | | <----vs-----> |
649 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
650 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
651 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
652 * | | |
653 * last visible pixel first visible pixel
654 * | increment frame counter (gen3/4)
655 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
656 *
657 * x = horizontal active
658 * _ = horizontal blanking
659 * hs = horizontal sync
660 * va = vertical active
661 * vb = vertical blanking
662 * vs = vertical sync
663 * vbs = vblank_start (number)
664 *
665 * Summary:
666 * - most events happen at the start of horizontal sync
667 * - frame start happens at the start of horizontal blank, 1-4 lines
668 * (depending on PIPECONF settings) after the start of vblank
669 * - gen3/4 pixel and frame counter are synchronized with the start
670 * of horizontal active on the first line of vertical active
671 */
672
673/* Called from drm generic code, passed a 'crtc', which
674 * we use as a pipe index
675 */
676u32 i915_get_vblank_counter(struct drm_crtc *crtc)
677{
678 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
679 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
680 const struct drm_display_mode *mode = &vblank->hwmode;
681 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
682 i915_reg_t high_frame, low_frame;
683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
684 unsigned long irqflags;
685
686 /*
687 * On i965gm TV output the frame counter only works up to
688 * the point when we enable the TV encoder. After that the
689 * frame counter ceases to work and reads zero. We need a
690 * vblank wait before enabling the TV encoder and so we
691 * have to enable vblank interrupts while the frame counter
692 * is still in a working state. However the core vblank code
693 * does not like us returning non-zero frame counter values
694 * when we've told it that we don't have a working frame
695 * counter. Thus we must stop non-zero values leaking out.
696 */
697 if (!vblank->max_vblank_count)
698 return 0;
699
700 htotal = mode->crtc_htotal;
701 hsync_start = mode->crtc_hsync_start;
702 vbl_start = mode->crtc_vblank_start;
703 if (mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4))
704 vbl_start = DIV_ROUND_UP(vbl_start, 2)(((vbl_start) + ((2) - 1)) / (2));
705
706 /* Convert to pixel count */
707 vbl_start *= htotal;
708
709 /* Start of vblank event occurs at start of hsync */
710 vbl_start -= htotal - hsync_start;
711
712 high_frame = PIPEFRAME(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70040)) })
;
713 low_frame = PIPEFRAMEPIXEL(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70044)) })
;
714
715 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->uncore.lock); }
while (0)
;
716
717 /*
718 * High & low register fields aren't synchronized, so make sure
719 * we get a low value that's stable across two reads of the high
720 * register.
721 */
722 do {
723 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK0x0000ffff;
724 low = intel_de_read_fw(dev_priv, low_frame);
725 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK0x0000ffff;
726 } while (high1 != high2);
727
728 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->uncore.lock
); } while (0)
;
729
730 high1 >>= PIPE_FRAME_HIGH_SHIFT0;
731 pixel = low & PIPE_PIXEL_MASK0x00ffffff;
732 low >>= PIPE_FRAME_LOW_SHIFT24;
733
734 /*
735 * The frame counter increments at beginning of active.
736 * Cook up a vblank counter by also checking the pixel
737 * counter against vblank start.
738 */
739 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
740}
741
742u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
743{
744 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
745 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
746 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
747
748 if (!vblank->max_vblank_count)
749 return 0;
750
751 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70040)) })
);
752}
753
754static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
755{
756 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev);
757 struct drm_vblank_crtc *vblank =
758 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
759 const struct drm_display_mode *mode = &vblank->hwmode;
760 u32 htotal = mode->crtc_htotal;
761 u32 clock = mode->crtc_clock;
762 u32 scan_prev_time, scan_curr_time, scan_post_time;
763
764 /*
765 * To avoid the race condition where we might cross into the
766 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
767 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
768 * during the same frame.
769 */
770 do {
771 /*
772 * This field provides read back of the display
773 * pipe frame time stamp. The time stamp value
774 * is sampled at every start of vertical blank.
775 */
776 scan_prev_time = intel_de_read_fw(dev_priv,
777 PIPE_FRMTMSTMP(crtc->pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(crtc->pipe)] - (&(dev_priv)->
__info)->display.pipe_offsets[PIPE_A] + ((&(dev_priv)->
__info)->display.mmio_offset) + (0x70048)) })
);
778
779 /*
780 * The TIMESTAMP_CTR register has the current
781 * time stamp value.
782 */
783 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR((const i915_reg_t){ .reg = (0x44070) }));
784
785 scan_post_time = intel_de_read_fw(dev_priv,
786 PIPE_FRMTMSTMP(crtc->pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(crtc->pipe)] - (&(dev_priv)->
__info)->display.pipe_offsets[PIPE_A] + ((&(dev_priv)->
__info)->display.mmio_offset) + (0x70048)) })
);
787 } while (scan_post_time != scan_prev_time);
788
789 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
790 clock), 1000 * htotal);
791}
792
793/*
794 * On certain encoders on certain platforms, pipe
795 * scanline register will not work to get the scanline,
796 * since the timings are driven from the PORT or issues
797 * with scanline register updates.
798 * This function will use Framestamp and current
799 * timestamp registers to calculate the scanline.
800 */
801static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
802{
803 struct drm_vblank_crtc *vblank =
804 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
805 const struct drm_display_mode *mode = &vblank->hwmode;
806 u32 vblank_start = mode->crtc_vblank_start;
807 u32 vtotal = mode->crtc_vtotal;
808 u32 scanline;
809
810 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
811 scanline = min(scanline, vtotal - 1)(((scanline)<(vtotal - 1))?(scanline):(vtotal - 1));
812 scanline = (scanline + vblank_start) % vtotal;
813
814 return scanline;
815}
816
817/*
818 * intel_de_read_fw(), only for fast reads of display block, no need for
819 * forcewake etc.
820 */
821static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
822{
823 struct drm_device *dev = crtc->base.dev;
824 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev);
825 const struct drm_display_mode *mode;
826 struct drm_vblank_crtc *vblank;
827 enum pipe pipe = crtc->pipe;
828 int position, vtotal;
829
830 if (!crtc->active)
831 return 0;
832
833 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
834 mode = &vblank->hwmode;
835
836 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP(1<<1))
837 return __intel_get_crtc_scanline_from_timestamp(crtc);
838
839 vtotal = mode->crtc_vtotal;
840 if (mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4))
841 vtotal /= 2;
842
843 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70000)) })
) & PIPEDSL_LINE_MASK((u32)((((~0UL) >> (64 - (19) - 1)) & ((~0UL) <<
(0))) + 0))
;
844
845 /*
846 * On HSW, the DSL reg (0x70000) appears to return 0 if we
847 * read it just before the start of vblank. So try it again
848 * so we don't accidentally end up spanning a vblank frame
849 * increment, causing the pipe_update_end() code to squak at us.
850 *
851 * The nature of this problem means we can't simply check the ISR
852 * bit and return the vblank start value; nor can we use the scanline
853 * debug register in the transcoder as it appears to have the same
854 * problem. We may need to extend this to include other platforms,
855 * but so far testing only shows the problem on HSW.
856 */
857 if (HAS_DDI(dev_priv)((&(dev_priv)->__info)->display.has_ddi) && !position) {
858 int i, temp;
859
860 for (i = 0; i < 100; i++) {
861 udelay(1);
862 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70000)) })
) & PIPEDSL_LINE_MASK((u32)((((~0UL) >> (64 - (19) - 1)) & ((~0UL) <<
(0))) + 0))
;
863 if (temp != position) {
864 position = temp;
865 break;
866 }
867 }
868 }
869
870 /*
871 * See update_scanline_offset() for the details on the
872 * scanline_offset adjustment.
873 */
874 return (position + crtc->scanline_offset) % vtotal;
875}
876
877static bool_Bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
878 bool_Bool in_vblank_irq,
879 int *vpos, int *hpos,
880 ktime_t *stime, ktime_t *etime,
881 const struct drm_display_mode *mode)
882{
883 struct drm_device *dev = _crtc->dev;
884 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(dev);
885 struct intel_crtc *crtc = to_intel_crtc(_crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (_crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
;
886 enum pipe pipe = crtc->pipe;
887 int position;
888 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
889 unsigned long irqflags;
890 bool_Bool use_scanline_counter = DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 5 ||
891 IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
|| DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 2 ||
892 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER(1<<2);
893
894 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)({ int __ret = !!((!mode->crtc_clock)); if (__ret) printf(
"%s %s: " "%s", dev_driver_string(((&dev_priv->drm))->
dev), "", "drm_WARN_ON(" "!mode->crtc_clock" ")"); __builtin_expect
(!!(__ret), 0); })
) {
895 drm_dbg(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "trying to get scanoutpos for disabled "
"pipe %c\n", ((pipe) + 'A'))
896 "trying to get scanoutpos for disabled "__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "trying to get scanoutpos for disabled "
"pipe %c\n", ((pipe) + 'A'))
897 "pipe %c\n", pipe_name(pipe))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "trying to get scanoutpos for disabled "
"pipe %c\n", ((pipe) + 'A'))
;
898 return false0;
899 }
900
901 htotal = mode->crtc_htotal;
902 hsync_start = mode->crtc_hsync_start;
903 vtotal = mode->crtc_vtotal;
904 vbl_start = mode->crtc_vblank_start;
905 vbl_end = mode->crtc_vblank_end;
906
907 if (mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4)) {
908 vbl_start = DIV_ROUND_UP(vbl_start, 2)(((vbl_start) + ((2) - 1)) / (2));
909 vbl_end /= 2;
910 vtotal /= 2;
911 }
912
913 /*
914 * Lock uncore.lock, as we will do multiple timing critical raw
915 * register reads, potentially with preemption disabled, so the
916 * following code must not block on uncore.lock.
917 */
918 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->uncore.lock); }
while (0)
;
919
920 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
921
922 /* Get optional system timestamp before query. */
923 if (stime)
924 *stime = ktime_get();
925
926 if (crtc->mode_flags & I915_MODE_FLAG_VRR(1<<6)) {
927 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
928
929 position = __intel_get_crtc_scanline(crtc);
930
931 /*
932 * Already exiting vblank? If so, shift our position
933 * so it looks like we're already apporaching the full
934 * vblank end. This should make the generated timestamp
935 * more or less match when the active portion will start.
936 */
937 if (position >= vbl_start && scanlines < position)
938 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1)(((crtc->vmax_vblank_start + scanlines)<(vtotal - 1))?(
crtc->vmax_vblank_start + scanlines):(vtotal - 1))
;
939 } else if (use_scanline_counter) {
940 /* No obvious pixelcount register. Only query vertical
941 * scanout position from Display scan line register.
942 */
943 position = __intel_get_crtc_scanline(crtc);
944 } else {
945 /* Have access to pixelcount since start of frame.
946 * We can split this into vertical and horizontal
947 * scanout position.
948 */
949 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70044)) })
) & PIPE_PIXEL_MASK0x00ffffff) >> PIPE_PIXEL_SHIFT0;
950
951 /* convert to pixel counts */
952 vbl_start *= htotal;
953 vbl_end *= htotal;
954 vtotal *= htotal;
955
956 /*
957 * In interlaced modes, the pixel counter counts all pixels,
958 * so one field will have htotal more pixels. In order to avoid
959 * the reported position from jumping backwards when the pixel
960 * counter is beyond the length of the shorter field, just
961 * clamp the position the length of the shorter field. This
962 * matches how the scanline counter based position works since
963 * the scanline counter doesn't count the two half lines.
964 */
965 if (position >= vtotal)
966 position = vtotal - 1;
967
968 /*
969 * Start of vblank interrupt is triggered at start of hsync,
970 * just prior to the first active line of vblank. However we
971 * consider lines to start at the leading edge of horizontal
972 * active. So, should we get here before we've crossed into
973 * the horizontal active of the first line in vblank, we would
974 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
975 * always add htotal-hsync_start to the current pixel position.
976 */
977 position = (position + htotal - hsync_start) % vtotal;
978 }
979
980 /* Get optional system timestamp after query. */
981 if (etime)
982 *etime = ktime_get();
983
984 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
985
986 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->uncore.lock
); } while (0)
;
987
988 /*
989 * While in vblank, position will be negative
990 * counting up towards 0 at vbl_end. And outside
991 * vblank, position will be positive counting
992 * up since vbl_end.
993 */
994 if (position >= vbl_start)
995 position -= vbl_end;
996 else
997 position += vtotal - vbl_end;
998
999 if (use_scanline_counter) {
1000 *vpos = position;
1001 *hpos = 0;
1002 } else {
1003 *vpos = position / htotal;
1004 *hpos = position - (*vpos * htotal);
1005 }
1006
1007 return true1;
1008}
1009
1010bool_Bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1011 ktime_t *vblank_time, bool_Bool in_vblank_irq)
1012{
1013 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1014 crtc, max_error, vblank_time, in_vblank_irq,
1015 i915_get_crtc_scanoutpos);
1016}
1017
1018int intel_get_crtc_scanline(struct intel_crtc *crtc)
1019{
1020 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev);
1021 unsigned long irqflags;
1022 int position;
1023
1024 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->uncore.lock); }
while (0)
;
1025 position = __intel_get_crtc_scanline(crtc);
1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->uncore.lock
); } while (0)
;
1027
1028 return position;
1029}
1030
1031/**
1032 * ivb_parity_work - Workqueue called when a parity error interrupt
1033 * occurred.
1034 * @work: workqueue struct
1035 *
1036 * Doesn't actually do anything except notify userspace. As a consequence of
1037 * this event, userspace should try to remap the bad rows since statistically
1038 * it is likely the same row is more likely to go bad again.
1039 */
1040static void ivb_parity_work(struct work_struct *work)
1041{
1042 struct drm_i915_privateinteldrm_softc *dev_priv =
1043 container_of(work, typeof(*dev_priv), l3_parity.error_work)({ const __typeof( ((typeof(*dev_priv) *)0)->l3_parity.error_work
) *__mptr = (work); (typeof(*dev_priv) *)( (char *)__mptr - __builtin_offsetof
(typeof(*dev_priv), l3_parity.error_work) );})
;
1044 struct intel_gt *gt = to_gt(dev_priv);
1045 u32 error_status, row, bank, subbank;
1046 char *parity_event[6];
1047 u32 misccpctl;
1048 u8 slice = 0;
1049
1050 /* We must turn off DOP level clock gating to access the L3 registers.
1051 * In order to prevent a get/put style interface, acquire struct mutex
1052 * any time we access those registers.
1053 */
1054 mutex_lock(&dev_priv->drm.struct_mutex)rw_enter_write(&dev_priv->drm.struct_mutex);
1055
1056 /* If we've screwed up tracking, just let the interrupt fire again */
1057 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)({ int __ret = !!((!dev_priv->l3_parity.which_slice)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "!dev_priv->l3_parity.which_slice"
")"); __builtin_expect(!!(__ret), 0); })
)
1058 goto out;
1059
1060 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }));
1061 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE(1 << 0));
1062 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x9424) })))
;
1063
1064 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1065 i915_reg_t reg;
1066
1067 slice--;
1068 if (drm_WARN_ON_ONCE(&dev_priv->drm,({ static int __warned; int __ret = !!((slice >= ((IS_PLATFORM
(dev_priv, INTEL_HASWELL) && (&(dev_priv)->__info
)->gt == 3) ? 2 : ((&(dev_priv)->__info)->has_l3_dpf
)))); if (__ret && !__warned) { printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON_ONCE("
"slice >= ((IS_PLATFORM(dev_priv, INTEL_HASWELL) && (&(dev_priv)->__info)->gt == 3) ? 2 : ((&(dev_priv)->__info)->has_l3_dpf))"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1069 slice >= NUM_L3_SLICES(dev_priv))({ static int __warned; int __ret = !!((slice >= ((IS_PLATFORM
(dev_priv, INTEL_HASWELL) && (&(dev_priv)->__info
)->gt == 3) ? 2 : ((&(dev_priv)->__info)->has_l3_dpf
)))); if (__ret && !__warned) { printf("%s %s: " "%s"
, dev_driver_string(((&dev_priv->drm))->dev), "", "drm_WARN_ON_ONCE("
"slice >= ((IS_PLATFORM(dev_priv, INTEL_HASWELL) && (&(dev_priv)->__info)->gt == 3) ? 2 : ((&(dev_priv)->__info)->has_l3_dpf))"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
1070 break;
1071
1072 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1073
1074 reg = GEN7_L3CDERRST1(slice)((const i915_reg_t){ .reg = (0xB008 + (slice) * 0x200) });
1075
1076 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1077 row = GEN7_PARITY_ERROR_ROW(error_status)(((error_status) & (0x7ff << 14)) >> 14);
1078 bank = GEN7_PARITY_ERROR_BANK(error_status)(((error_status) & (3 << 11)) >> 11);
1079 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status)(((error_status) & (7 << 8)) >> 8);
1080
1081 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID(1 << 13) | GEN7_L3CDERRST1_ENABLE(1 << 7));
1082 intel_uncore_posting_read(&dev_priv->uncore, reg)((void)intel_uncore_read_notrace(&dev_priv->uncore, reg
))
;
1083
1084 parity_event[0] = I915_L3_PARITY_UEVENT"L3_PARITY_ERROR" "=1";
1085 parity_event[1] = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "ROW=%d", row);
1086 parity_event[2] = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "BANK=%d", bank);
1087 parity_event[3] = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "SUBBANK=%d", subbank);
1088 parity_event[4] = kasprintf(GFP_KERNEL(0x0001 | 0x0004), "SLICE=%d", slice);
1089 parity_event[5] = NULL((void *)0);
1090
1091 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1092 KOBJ_CHANGE, parity_event);
1093
1094 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",___drm_dbg(((void *)0), DRM_UT_CORE, "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n"
, slice, row, bank, subbank)
1095 slice, row, bank, subbank)___drm_dbg(((void *)0), DRM_UT_CORE, "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n"
, slice, row, bank, subbank)
;
1096
1097 kfree(parity_event[4]);
1098 kfree(parity_event[3]);
1099 kfree(parity_event[2]);
1100 kfree(parity_event[1]);
1101 }
1102
1103 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL((const i915_reg_t){ .reg = (0x9424) }), misccpctl);
1104
1105out:
1106 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice)({ int __ret = !!((dev_priv->l3_parity.which_slice)); if (
__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "dev_priv->l3_parity.which_slice"
")"); __builtin_expect(!!(__ret), 0); })
;
1107 spin_lock_irq(gt->irq_lock)mtx_enter(gt->irq_lock);
1108 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)((1 << 5) | (IS_PLATFORM(dev_priv, INTEL_HASWELL) ? (1 <<
11) : 0))
);
1109 spin_unlock_irq(gt->irq_lock)mtx_leave(gt->irq_lock);
1110
1111 mutex_unlock(&dev_priv->drm.struct_mutex)rw_exit_write(&dev_priv->drm.struct_mutex);
1112}
1113
1114static bool_Bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1115{
1116 switch (pin) {
1117 case HPD_PORT_TC1:
1118 case HPD_PORT_TC2:
1119 case HPD_PORT_TC3:
1120 case HPD_PORT_TC4:
1121 case HPD_PORT_TC5:
1122 case HPD_PORT_TC6:
1123 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin)(2 << (((pin) - HPD_PORT_TC1) * 4));
1124 default:
1125 return false0;
1126 }
1127}
1128
1129static bool_Bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1130{
1131 switch (pin) {
1132 case HPD_PORT_A:
1133 return val & PORTA_HOTPLUG_LONG_DETECT(2 << 24);
1134 case HPD_PORT_B:
1135 return val & PORTB_HOTPLUG_LONG_DETECT(2 << 0);
1136 case HPD_PORT_C:
1137 return val & PORTC_HOTPLUG_LONG_DETECT(2 << 8);
1138 default:
1139 return false0;
1140 }
1141}
1142
1143static bool_Bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1144{
1145 switch (pin) {
1146 case HPD_PORT_A:
1147 case HPD_PORT_B:
1148 case HPD_PORT_C:
1149 case HPD_PORT_D:
1150 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin)(0x2 << (((pin) - HPD_PORT_A) * 4));
1151 default:
1152 return false0;
1153 }
1154}
1155
1156static bool_Bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1157{
1158 switch (pin) {
1159 case HPD_PORT_TC1:
1160 case HPD_PORT_TC2:
1161 case HPD_PORT_TC3:
1162 case HPD_PORT_TC4:
1163 case HPD_PORT_TC5:
1164 case HPD_PORT_TC6:
1165 return val & ICP_TC_HPD_LONG_DETECT(pin)(2 << (((pin) - HPD_PORT_TC1) * 4));
1166 default:
1167 return false0;
1168 }
1169}
1170
1171static bool_Bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1172{
1173 switch (pin) {
1174 case HPD_PORT_E:
1175 return val & PORTE_HOTPLUG_LONG_DETECT(2 << 0);
1176 default:
1177 return false0;
1178 }
1179}
1180
1181static bool_Bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1182{
1183 switch (pin) {
1184 case HPD_PORT_A:
1185 return val & PORTA_HOTPLUG_LONG_DETECT(2 << 24);
1186 case HPD_PORT_B:
1187 return val & PORTB_HOTPLUG_LONG_DETECT(2 << 0);
1188 case HPD_PORT_C:
1189 return val & PORTC_HOTPLUG_LONG_DETECT(2 << 8);
1190 case HPD_PORT_D:
1191 return val & PORTD_HOTPLUG_LONG_DETECT(2 << 16);
1192 default:
1193 return false0;
1194 }
1195}
1196
1197static bool_Bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1198{
1199 switch (pin) {
1200 case HPD_PORT_A:
1201 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT(2 << 0);
1202 default:
1203 return false0;
1204 }
1205}
1206
1207static bool_Bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1208{
1209 switch (pin) {
1210 case HPD_PORT_B:
1211 return val & PORTB_HOTPLUG_LONG_DETECT(2 << 0);
1212 case HPD_PORT_C:
1213 return val & PORTC_HOTPLUG_LONG_DETECT(2 << 8);
1214 case HPD_PORT_D:
1215 return val & PORTD_HOTPLUG_LONG_DETECT(2 << 16);
1216 default:
1217 return false0;
1218 }
1219}
1220
1221static bool_Bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1222{
1223 switch (pin) {
1224 case HPD_PORT_B:
1225 return val & PORTB_HOTPLUG_INT_LONG_PULSE(2 << 17);
1226 case HPD_PORT_C:
1227 return val & PORTC_HOTPLUG_INT_LONG_PULSE(2 << 19);
1228 case HPD_PORT_D:
1229 return val & PORTD_HOTPLUG_INT_LONG_PULSE(2 << 21);
1230 default:
1231 return false0;
1232 }
1233}
1234
1235/*
1236 * Get a bit mask of pins that have triggered, and which ones may be long.
1237 * This can be called multiple times with the same masks to accumulate
1238 * hotplug detection results from several registers.
1239 *
1240 * Note that the caller is expected to zero out the masks initially.
1241 */
1242static void intel_get_hpd_pins(struct drm_i915_privateinteldrm_softc *dev_priv,
1243 u32 *pin_mask, u32 *long_mask,
1244 u32 hotplug_trigger, u32 dig_hotplug_reg,
1245 const u32 hpd[HPD_NUM_PINS],
1246 bool_Bool long_pulse_detect(enum hpd_pin pin, u32 val))
1247{
1248 enum hpd_pin pin;
1249
1250 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS)extern char _ctassert[(!((8 * sizeof(*pin_mask)) < HPD_NUM_PINS
)) ? 1 : -1 ] __attribute__((__unused__))
;
1251
1252 for_each_hpd_pin(pin)for ((pin) = (HPD_NONE + 1); (pin) < HPD_NUM_PINS; (pin)++
)
{
1253 if ((hpd[pin] & hotplug_trigger) == 0)
1254 continue;
1255
1256 *pin_mask |= BIT(pin)(1UL << (pin));
1257
1258 if (long_pulse_detect(pin, dig_hotplug_reg))
1259 *long_mask |= BIT(pin)(1UL << (pin));
1260 }
1261
1262 drm_dbg(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n"
, hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask)
1263 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n"
, hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask)
1264 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n"
, hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask)
;
1265
1266}
1267
1268static u32 intel_hpd_enabled_irqs(struct drm_i915_privateinteldrm_softc *dev_priv,
1269 const u32 hpd[HPD_NUM_PINS])
1270{
1271 struct intel_encoder *encoder;
1272 u32 enabled_irqs = 0;
1273
1274 for_each_intel_encoder(&dev_priv->drm, encoder)for (encoder = ({ const __typeof( ((__typeof(*encoder) *)0)->
base.head ) *__mptr = ((&(&dev_priv->drm)->mode_config
.encoder_list)->next); (__typeof(*encoder) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*encoder), base.head) );}); &
encoder->base.head != (&(&dev_priv->drm)->mode_config
.encoder_list); encoder = ({ const __typeof( ((__typeof(*encoder
) *)0)->base.head ) *__mptr = (encoder->base.head.next)
; (__typeof(*encoder) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*encoder), base.head) );}))
1275 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1276 enabled_irqs |= hpd[encoder->hpd_pin];
1277
1278 return enabled_irqs;
1279}
1280
1281static u32 intel_hpd_hotplug_irqs(struct drm_i915_privateinteldrm_softc *dev_priv,
1282 const u32 hpd[HPD_NUM_PINS])
1283{
1284 struct intel_encoder *encoder;
1285 u32 hotplug_irqs = 0;
1286
1287 for_each_intel_encoder(&dev_priv->drm, encoder)for (encoder = ({ const __typeof( ((__typeof(*encoder) *)0)->
base.head ) *__mptr = ((&(&dev_priv->drm)->mode_config
.encoder_list)->next); (__typeof(*encoder) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*encoder), base.head) );}); &
encoder->base.head != (&(&dev_priv->drm)->mode_config
.encoder_list); encoder = ({ const __typeof( ((__typeof(*encoder
) *)0)->base.head ) *__mptr = (encoder->base.head.next)
; (__typeof(*encoder) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*encoder), base.head) );}))
1288 hotplug_irqs |= hpd[encoder->hpd_pin];
1289
1290 return hotplug_irqs;
1291}
1292
1293static u32 intel_hpd_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
1294 hotplug_enables_func hotplug_enables)
1295{
1296 struct intel_encoder *encoder;
1297 u32 hotplug = 0;
1298
1299 for_each_intel_encoder(&i915->drm, encoder)for (encoder = ({ const __typeof( ((__typeof(*encoder) *)0)->
base.head ) *__mptr = ((&(&i915->drm)->mode_config
.encoder_list)->next); (__typeof(*encoder) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*encoder), base.head) );}); &
encoder->base.head != (&(&i915->drm)->mode_config
.encoder_list); encoder = ({ const __typeof( ((__typeof(*encoder
) *)0)->base.head ) *__mptr = (encoder->base.head.next)
; (__typeof(*encoder) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*encoder), base.head) );}))
1300 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1301
1302 return hotplug;
1303}
1304
1305static void gmbus_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv)
1306{
1307 wake_up_all(&dev_priv->display.gmbus.wait_queue)wake_up(&dev_priv->display.gmbus.wait_queue);
1308}
1309
1310static void dp_aux_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv)
1311{
1312 wake_up_all(&dev_priv->display.gmbus.wait_queue)wake_up(&dev_priv->display.gmbus.wait_queue);
1313}
1314
1315#if defined(CONFIG_DEBUG_FS)
1316static void display_pipe_crc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1317 enum pipe pipe,
1318 u32 crc0, u32 crc1,
1319 u32 crc2, u32 crc3,
1320 u32 crc4)
1321{
1322 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1323 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1324 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1325
1326 trace_intel_pipe_crc(crtc, crcs);
1327
1328 spin_lock(&pipe_crc->lock)mtx_enter(&pipe_crc->lock);
1329 /*
1330 * For some not yet identified reason, the first CRC is
1331 * bonkers. So let's just wait for the next vblank and read
1332 * out the buggy result.
1333 *
1334 * On GEN8+ sometimes the second CRC is bonkers as well, so
1335 * don't trust that one either.
1336 */
1337 if (pipe_crc->skipped <= 0 ||
1338 (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 8 && pipe_crc->skipped == 1)) {
1339 pipe_crc->skipped++;
1340 spin_unlock(&pipe_crc->lock)mtx_leave(&pipe_crc->lock);
1341 return;
1342 }
1343 spin_unlock(&pipe_crc->lock)mtx_leave(&pipe_crc->lock);
1344
1345 drm_crtc_add_crc_entry(&crtc->base, true1,
1346 drm_crtc_accurate_vblank_count(&crtc->base),
1347 crcs);
1348}
1349#else
1350static inline void
1351display_pipe_crc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1352 enum pipe pipe,
1353 u32 crc0, u32 crc1,
1354 u32 crc2, u32 crc3,
1355 u32 crc4) {}
1356#endif
1357
1358static void flip_done_handler(struct drm_i915_privateinteldrm_softc *i915,
1359 enum pipe pipe)
1360{
1361 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1362 struct drm_crtc_state *crtc_state = crtc->base.state;
1363 struct drm_pending_vblank_event *e = crtc_state->event;
1364 struct drm_device *dev = &i915->drm;
1365 unsigned long irqflags;
1366
1367 spin_lock_irqsave(&dev->event_lock, irqflags)do { irqflags = 0; mtx_enter(&dev->event_lock); } while
(0)
;
1368
1369 crtc_state->event = NULL((void *)0);
1370
1371 drm_crtc_send_vblank_event(&crtc->base, e);
1372
1373 spin_unlock_irqrestore(&dev->event_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev->event_lock); } while
(0)
;
1374}
1375
1376static void hsw_pipe_crc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1377 enum pipe pipe)
1378{
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60064)) })
),
1381 0, 0, 0, 0);
1382}
1383
1384static void ivb_pipe_crc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1385 enum pipe pipe)
1386{
1387 display_pipe_crc_irq_handler(dev_priv, pipe,
1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60064)) })
),
1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60068)) })
),
1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x6006c)) })
),
1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60070)) })
),
1392 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60074)) })
));
1393}
1394
1395static void i9xx_pipe_crc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1396 enum pipe pipe)
1397{
1398 u32 res1, res2;
1399
1400 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 3)
1401 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x6006c)) })
);
1402 else
1403 res1 = 0;
1404
1405 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 5 || IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
)
1406 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60080)) })
);
1407 else
1408 res2 = 0;
1409
1410 display_pipe_crc_irq_handler(dev_priv, pipe,
1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60060)) })
),
1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60064)) })
),
1413 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(pipe)] - (&(dev_priv)->__info)->
display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->__info
)->display.mmio_offset) + (0x60068)) })
),
1414 res1, res2);
1415}
1416
1417static void i9xx_pipestat_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
1418{
1419 enum pipe pipe;
1420
1421 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1422 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70024)) })
,
1423 PIPESTAT_INT_STATUS_MASK0x0000ffff |
1424 PIPE_FIFO_UNDERRUN_STATUS(1UL << 31));
1425
1426 dev_priv->pipestat_irq_mask[pipe] = 0;
1427 }
1428}
1429
1430static void i9xx_pipestat_irq_ack(struct drm_i915_privateinteldrm_softc *dev_priv,
1431 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1432{
1433 enum pipe pipe;
1434
1435 spin_lock(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
1436
1437 if (!dev_priv->display_irqs_enabled) {
1438 spin_unlock(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
1439 return;
1440 }
1441
1442 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1443 i915_reg_t reg;
1444 u32 status_mask, enable_mask, iir_bit = 0;
1445
1446 /*
1447 * PIPESTAT bits get signalled even when the interrupt is
1448 * disabled with the mask bits, and some of the status bits do
1449 * not generate interrupts at all (like the underrun bit). Hence
1450 * we need to be careful that we only handle what we want to
1451 * handle.
1452 */
1453
1454 /* fifo underruns are filterered in the underrun handler. */
1455 status_mask = PIPE_FIFO_UNDERRUN_STATUS(1UL << 31);
1456
1457 switch (pipe) {
1458 default:
1459 case PIPE_A:
1460 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6);
1461 break;
1462 case PIPE_B:
1463 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4);
1464 break;
1465 case PIPE_C:
1466 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT(1 << 9);
1467 break;
1468 }
1469 if (iir & iir_bit)
1470 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1471
1472 if (!status_mask)
1473 continue;
1474
1475 reg = PIPESTAT(pipe)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.pipe_offsets[(pipe)] - (&(dev_priv)->__info)->
display.pipe_offsets[PIPE_A] + ((&(dev_priv)->__info)->
display.mmio_offset) + (0x70024)) })
;
1476 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1477 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1478
1479 /*
1480 * Clear the PIPE*STAT regs before the IIR
1481 *
1482 * Toggle the enable bits to make sure we get an
1483 * edge in the ISR pipe event bit if we don't clear
1484 * all the enabled status bits. Otherwise the edge
1485 * triggered IIR on i965/g4x wouldn't notice that
1486 * an interrupt is still pending.
1487 */
1488 if (pipe_stats[pipe]) {
1489 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1490 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1491 }
1492 }
1493 spin_unlock(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
1494}
1495
1496static void i8xx_pipestat_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1497 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1498{
1499 enum pipe pipe;
1500
1501 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1502 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS(1UL << 1))
1503 intel_handle_vblank(dev_priv, pipe);
1504
1505 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12))
1506 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1507
1508 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS(1UL << 31))
1509 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1510 }
1511}
1512
1513static void i915_pipestat_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1514 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1515{
1516 bool_Bool blc_event = false0;
1517 enum pipe pipe;
1518
1519 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1520 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS(1UL << 1))
1521 intel_handle_vblank(dev_priv, pipe);
1522
1523 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS(1UL << 6))
1524 blc_event = true1;
1525
1526 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12))
1527 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1528
1529 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS(1UL << 31))
1530 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1531 }
1532
1533 if (blc_event || (iir & I915_ASLE_INTERRUPT(1 << 0)))
1534 intel_opregion_asle_intr(dev_priv);
1535}
1536
1537static void i965_pipestat_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1538 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1539{
1540 bool_Bool blc_event = false0;
1541 enum pipe pipe;
1542
1543 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1544 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS(1UL << 2))
1545 intel_handle_vblank(dev_priv, pipe);
1546
1547 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS(1UL << 6))
1548 blc_event = true1;
1549
1550 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12))
1551 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1552
1553 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS(1UL << 31))
1554 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1555 }
1556
1557 if (blc_event || (iir & I915_ASLE_INTERRUPT(1 << 0)))
1558 intel_opregion_asle_intr(dev_priv);
1559
1560 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS(1UL << 11))
1561 gmbus_irq_handler(dev_priv);
1562}
1563
1564static void valleyview_pipestat_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1565 u32 pipe_stats[I915_MAX_PIPES])
1566{
1567 enum pipe pipe;
1568
1569 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1570 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS(1UL << 2))
1571 intel_handle_vblank(dev_priv, pipe);
1572
1573 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV(1UL << 10))
1574 flip_done_handler(dev_priv, pipe);
1575
1576 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12))
1577 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1578
1579 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS(1UL << 31))
1580 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1581 }
1582
1583 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS(1UL << 11))
1584 gmbus_irq_handler(dev_priv);
1585}
1586
1587static u32 i9xx_hpd_irq_ack(struct drm_i915_privateinteldrm_softc *dev_priv)
1588{
1589 u32 hotplug_status = 0, hotplug_status_mask;
1590 int i;
1591
1592 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
||
1593 IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
1594 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X((1 << 11) | (1 << 2) | (1 << 3) | (3 <<
17) | (3 << 19) | (3 << 21))
|
1595 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X(7 << 4);
1596 else
1597 hotplug_status_mask = HOTPLUG_INT_STATUS_I915((1 << 11) | (1 << 6) | (1 << 7) | (3 <<
17) | (3 << 19) | (3 << 21))
;
1598
1599 /*
1600 * We absolutely have to clear all the pending interrupt
1601 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1602 * interrupt bit won't have an edge, and the i965/g4x
1603 * edge triggered IIR will not notice that an interrupt
1604 * is still pending. We can't use PORT_HOTPLUG_EN to
1605 * guarantee the edge as the act of toggling the enable
1606 * bits can itself generate a new hotplug interrupt :(
1607 */
1608 for (i = 0; i < 10; i++) {
1609 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
) & hotplug_status_mask;
1610
1611 if (tmp == 0)
1612 return hotplug_status;
1613
1614 hotplug_status |= tmp;
1615 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
, hotplug_status);
1616 }
1617
1618 drm_WARN_ONCE(&dev_priv->drm, 1,({ static int __warned; int __ret = !!(1); if (__ret &&
!__warned) { printf("%s %s: " "PORT_HOTPLUG_STAT did not clear (0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", intel_uncore_read
(&dev_priv->uncore, ((const i915_reg_t){ .reg = (((&
(dev_priv)->__info)->display.mmio_offset) + 0x61114) })
)); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1619 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",({ static int __warned; int __ret = !!(1); if (__ret &&
!__warned) { printf("%s %s: " "PORT_HOTPLUG_STAT did not clear (0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", intel_uncore_read
(&dev_priv->uncore, ((const i915_reg_t){ .reg = (((&
(dev_priv)->__info)->display.mmio_offset) + 0x61114) })
)); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1620 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT))({ static int __warned; int __ret = !!(1); if (__ret &&
!__warned) { printf("%s %s: " "PORT_HOTPLUG_STAT did not clear (0x%08x)\n"
, dev_driver_string((&dev_priv->drm)->dev), "", intel_uncore_read
(&dev_priv->uncore, ((const i915_reg_t){ .reg = (((&
(dev_priv)->__info)->display.mmio_offset) + 0x61114) })
)); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
1621
1622 return hotplug_status;
1623}
1624
1625static void i9xx_hpd_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1626 u32 hotplug_status)
1627{
1628 u32 pin_mask = 0, long_mask = 0;
1629 u32 hotplug_trigger;
1630
1631 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
||
1632 IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
1633 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X((1 << 11) | (1 << 2) | (1 << 3) | (3 <<
17) | (3 << 19) | (3 << 21))
;
1634 else
1635 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915((1 << 11) | (1 << 6) | (1 << 7) | (3 <<
17) | (3 << 19) | (3 << 21))
;
1636
1637 if (hotplug_trigger) {
1638 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1639 hotplug_trigger, hotplug_trigger,
1640 dev_priv->display.hotplug.hpd,
1641 i9xx_port_hotplug_long_detect);
1642
1643 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1644 }
1645
1646 if ((IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
||
1647 IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)) &&
1648 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X(7 << 4))
1649 dp_aux_irq_handler(dev_priv);
1650}
1651
1652static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1653{
1654 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
1655 irqreturn_t ret = IRQ_NONE;
1656
1657 if (!intel_irqs_enabled(dev_priv))
1658 return IRQ_NONE;
1659
1660 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1661 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1662
1663 do {
1664 u32 iir, gt_iir, pm_iir;
1665 u32 pipe_stats[I915_MAX_PIPES] = {};
1666 u32 hotplug_status = 0;
1667 u32 ier = 0;
1668
1669 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR((const i915_reg_t){ .reg = (0x44018) }));
1670 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR((const i915_reg_t){ .reg = (0x44028) }));
1671 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR((const i915_reg_t){ .reg = (0x180000 + 0x20a4) }));
1672
1673 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1674 break;
1675
1676 ret = IRQ_HANDLED;
1677
1678 /*
1679 * Theory on interrupt generation, based on empirical evidence:
1680 *
1681 * x = ((VLV_IIR & VLV_IER) ||
1682 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1683 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1684 *
1685 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1686 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1687 * guarantee the CPU interrupt will be raised again even if we
1688 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1689 * bits this time around.
1690 */
1691 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER((const i915_reg_t){ .reg = (0x4400c) }), 0);
1692 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }));
1693 intel_uncore_write(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }), 0);
1694
1695 if (gt_iir)
1696 intel_uncore_write(&dev_priv->uncore, GTIIR((const i915_reg_t){ .reg = (0x44018) }), gt_iir);
1697 if (pm_iir)
1698 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR((const i915_reg_t){ .reg = (0x44028) }), pm_iir);
1699
1700 if (iir & I915_DISPLAY_PORT_INTERRUPT(1 << 17))
1701 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1702
1703 /* Call regardless, as some status bits might not be
1704 * signalled in iir */
1705 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1706
1707 if (iir & (I915_LPE_PIPE_A_INTERRUPT(1 << 20) |
1708 I915_LPE_PIPE_B_INTERRUPT(1 << 21)))
1709 intel_lpe_audio_irq_handler(dev_priv);
1710
1711 /*
1712 * VLV_IIR is single buffered, and reflects the level
1713 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1714 */
1715 if (iir)
1716 intel_uncore_write(&dev_priv->uncore, VLV_IIR((const i915_reg_t){ .reg = (0x180000 + 0x20a4) }), iir);
1717
1718 intel_uncore_write(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }), ier);
1719 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER((const i915_reg_t){ .reg = (0x4400c) }), MASTER_INTERRUPT_ENABLE(1 << 31));
1720
1721 if (gt_iir)
1722 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1723 if (pm_iir)
1724 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1725
1726 if (hotplug_status)
1727 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1728
1729 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1730 } while (0);
1731
1732 pmu_irq_stats(dev_priv, ret);
1733
1734 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1735
1736 return ret;
1737}
1738
1739static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1740{
1741 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
1742 irqreturn_t ret = IRQ_NONE;
1743
1744 if (!intel_irqs_enabled(dev_priv))
1745 return IRQ_NONE;
1746
1747 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1748 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1749
1750 do {
1751 u32 master_ctl, iir;
1752 u32 pipe_stats[I915_MAX_PIPES] = {};
1753 u32 hotplug_status = 0;
1754 u32 ier = 0;
1755
1756 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ((const i915_reg_t){ .reg = (0x44200) })) & ~GEN8_MASTER_IRQ_CONTROL(1 << 31);
1757 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR((const i915_reg_t){ .reg = (0x180000 + 0x20a4) }));
1758
1759 if (master_ctl == 0 && iir == 0)
1760 break;
1761
1762 ret = IRQ_HANDLED;
1763
1764 /*
1765 * Theory on interrupt generation, based on empirical evidence:
1766 *
1767 * x = ((VLV_IIR & VLV_IER) ||
1768 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1769 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1770 *
1771 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1772 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1773 * guarantee the CPU interrupt will be raised again even if we
1774 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1775 * bits this time around.
1776 */
1777 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ((const i915_reg_t){ .reg = (0x44200) }), 0);
1778 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }));
1779 intel_uncore_write(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }), 0);
1780
1781 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1782
1783 if (iir & I915_DISPLAY_PORT_INTERRUPT(1 << 17))
1784 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1785
1786 /* Call regardless, as some status bits might not be
1787 * signalled in iir */
1788 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1789
1790 if (iir & (I915_LPE_PIPE_A_INTERRUPT(1 << 20) |
1791 I915_LPE_PIPE_B_INTERRUPT(1 << 21) |
1792 I915_LPE_PIPE_C_INTERRUPT(1 << 12)))
1793 intel_lpe_audio_irq_handler(dev_priv);
1794
1795 /*
1796 * VLV_IIR is single buffered, and reflects the level
1797 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1798 */
1799 if (iir)
1800 intel_uncore_write(&dev_priv->uncore, VLV_IIR((const i915_reg_t){ .reg = (0x180000 + 0x20a4) }), iir);
1801
1802 intel_uncore_write(&dev_priv->uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }), ier);
1803 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ((const i915_reg_t){ .reg = (0x44200) }), GEN8_MASTER_IRQ_CONTROL(1 << 31));
1804
1805 if (hotplug_status)
1806 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1807
1808 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1809 } while (0);
1810
1811 pmu_irq_stats(dev_priv, ret);
1812
1813 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1814
1815 return ret;
1816}
1817
1818static void ibx_hpd_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
1819 u32 hotplug_trigger)
1820{
1821 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1822
1823 /*
1824 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1825 * unless we touch the hotplug register, even if hotplug_trigger is
1826 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1827 * errors.
1828 */
1829 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
1830 if (!hotplug_trigger) {
1831 u32 mask = PORTA_HOTPLUG_STATUS_MASK(3 << 24) |
1832 PORTD_HOTPLUG_STATUS_MASK(3 << 16) |
1833 PORTC_HOTPLUG_STATUS_MASK(3 << 8) |
1834 PORTB_HOTPLUG_STATUS_MASK(3 << 0);
1835 dig_hotplug_reg &= ~mask;
1836 }
1837
1838 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), dig_hotplug_reg);
1839 if (!hotplug_trigger)
1840 return;
1841
1842 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1843 hotplug_trigger, dig_hotplug_reg,
1844 dev_priv->display.hotplug.pch_hpd,
1845 pch_port_hotplug_long_detect);
1846
1847 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1848}
1849
1850static void ibx_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 pch_iir)
1851{
1852 enum pipe pipe;
1853 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK((1 << 11) | (1 << 6) | (1 << 8) | (1 <<
9) | (1 << 10))
;
1854
1855 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1856
1857 if (pch_iir & SDE_AUDIO_POWER_MASK(7 << (25))) {
1858 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK(7 << (25))) >>
1859 SDE_AUDIO_POWER_SHIFT(25));
1860 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH audio power change on port %d\n"
, ((port) + 'A'))
1861 port_name(port))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH audio power change on port %d\n"
, ((port) + 'A'))
;
1862 }
1863
1864 if (pch_iir & SDE_AUX_MASK(7 << 13))
1865 dp_aux_irq_handler(dev_priv);
1866
1867 if (pch_iir & SDE_GMBUS(1 << 24))
1868 gmbus_irq_handler(dev_priv);
1869
1870 if (pch_iir & SDE_AUDIO_HDCP_MASK(3 << 22))
1871 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH HDCP audio interrupt\n"
)
;
1872
1873 if (pch_iir & SDE_AUDIO_TRANS_MASK(3 << 20))
1874 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH transcoder audio interrupt\n"
)
;
1875
1876 if (pch_iir & SDE_POISON(1 << 19))
1877 drm_err(&dev_priv->drm, "PCH poison interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PCH poison interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1878
1879 if (pch_iir & SDE_FDI_MASK(3 << 16)) {
1880 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
1881 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
1882 pipe_name(pipe),__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
1883 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
;
1884 }
1885
1886 if (pch_iir & (SDE_TRANSB_CRC_DONE(1 << 5) | SDE_TRANSA_CRC_DONE(1 << 2)))
1887 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH transcoder CRC done interrupt\n"
)
;
1888
1889 if (pch_iir & (SDE_TRANSB_CRC_ERR(1 << 4) | SDE_TRANSA_CRC_ERR(1 << 1)))
1890 drm_dbg(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH transcoder CRC error interrupt\n"
)
1891 "PCH transcoder CRC error interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH transcoder CRC error interrupt\n"
)
;
1892
1893 if (pch_iir & SDE_TRANSA_FIFO_UNDER(1 << 0))
1894 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1895
1896 if (pch_iir & SDE_TRANSB_FIFO_UNDER(1 << 3))
1897 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1898}
1899
1900static void ivb_err_int_handler(struct drm_i915_privateinteldrm_softc *dev_priv)
1901{
1902 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT((const i915_reg_t){ .reg = (0x44040) }));
1903 enum pipe pipe;
1904
1905 if (err_int & ERR_INT_POISON(1 << 31))
1906 drm_err(&dev_priv->drm, "Poison interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Poison interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1907
1908 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
1909 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)(1 << ((pipe) * 3)))
1910 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1911
1912 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)(1 << (2 + (pipe) * 3))) {
1913 if (IS_IVYBRIDGE(dev_priv)IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE))
1914 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1915 else
1916 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1917 }
1918 }
1919
1920 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT((const i915_reg_t){ .reg = (0x44040) }), err_int);
1921}
1922
1923static void cpt_serr_int_handler(struct drm_i915_privateinteldrm_softc *dev_priv)
1924{
1925 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT((const i915_reg_t){ .reg = (0xc4040) }));
1926 enum pipe pipe;
1927
1928 if (serr_int & SERR_INT_POISON(1 << 31))
1929 drm_err(&dev_priv->drm, "PCH poison interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "PCH poison interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1930
1931 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
1932 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)(1 << ((pipe) * 3)))
1933 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1934
1935 intel_uncore_write(&dev_priv->uncore, SERR_INT((const i915_reg_t){ .reg = (0xc4040) }), serr_int);
1936}
1937
1938static void cpt_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 pch_iir)
1939{
1940 enum pipe pipe;
1941 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT((1 << 19) | (1 << 18) | (1 << 23) | (1 <<
22) | (1 << 21))
;
1942
1943 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1944
1945 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT(7 << 29)) {
1946 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT(7 << 29)) >>
1947 SDE_AUDIO_POWER_SHIFT_CPT29);
1948 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH audio power change on port %c\n"
, ((port) + 'A'))
1949 port_name(port))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "PCH audio power change on port %c\n"
, ((port) + 'A'))
;
1950 }
1951
1952 if (pch_iir & SDE_AUX_MASK_CPT(7 << 25))
1953 dp_aux_irq_handler(dev_priv);
1954
1955 if (pch_iir & SDE_GMBUS_CPT(1 << 17))
1956 gmbus_irq_handler(dev_priv);
1957
1958 if (pch_iir & SDE_AUDIO_CP_REQ_CPT((1 << 10) | (1 << 6) | (1 << 2)))
1959 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Audio CP request interrupt\n"
)
;
1960
1961 if (pch_iir & SDE_AUDIO_CP_CHG_CPT((1 << 9) | (1 << 5) | (1 << 1)))
1962 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "Audio CP change interrupt\n"
)
;
1963
1964 if (pch_iir & SDE_FDI_MASK_CPT((1 << 8) | (1 << 4) | (1 << 0))) {
1965 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
1966 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
1967 pipe_name(pipe),__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
1968 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)))__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, " pipe %c FDI IIR: 0x%08x\n"
, ((pipe) + 'A'), intel_uncore_read(&dev_priv->uncore,
((const i915_reg_t){ .reg = (((0xf0014) + (pipe) * ((0xf1014
) - (0xf0014)))) })))
;
1969 }
1970
1971 if (pch_iir & SDE_ERROR_CPT(1 << 16))
1972 cpt_serr_int_handler(dev_priv);
1973}
1974
1975static void icp_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 pch_iir)
1976{
1977 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP(((u32)((1UL << (16 + ((HPD_PORT_D) - HPD_PORT_A))) + 0
)) | ((u32)((1UL << (16 + ((HPD_PORT_C) - HPD_PORT_A)))
+ 0)) | ((u32)((1UL << (16 + ((HPD_PORT_B) - HPD_PORT_A
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_A) - HPD_PORT_A
))) + 0)))
;
1978 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP(((u32)((1UL << (24 + ((HPD_PORT_TC6) - HPD_PORT_TC1)))
+ 0)) | ((u32)((1UL << (24 + ((HPD_PORT_TC5) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (24 + ((HPD_PORT_TC4) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (24 + ((HPD_PORT_TC3) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (24 + ((HPD_PORT_TC2) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (24 + ((HPD_PORT_TC1) - HPD_PORT_TC1
))) + 0)))
;
1979 u32 pin_mask = 0, long_mask = 0;
1980
1981 if (ddi_hotplug_trigger) {
1982 u32 dig_hotplug_reg;
1983
1984 /* Locking due to DSI native GPIO sequences */
1985 spin_lock(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
1986 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI((const i915_reg_t){ .reg = (0xc4030) }));
1987 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI((const i915_reg_t){ .reg = (0xc4030) }), dig_hotplug_reg);
1988 spin_unlock(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
1989
1990 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1991 ddi_hotplug_trigger, dig_hotplug_reg,
1992 dev_priv->display.hotplug.pch_hpd,
1993 icp_ddi_port_hotplug_long_detect);
1994 }
1995
1996 if (tc_hotplug_trigger) {
1997 u32 dig_hotplug_reg;
1998
1999 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC((const i915_reg_t){ .reg = (0xc4034) }));
2000 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC((const i915_reg_t){ .reg = (0xc4034) }), dig_hotplug_reg);
2001
2002 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2003 tc_hotplug_trigger, dig_hotplug_reg,
2004 dev_priv->display.hotplug.pch_hpd,
2005 icp_tc_port_hotplug_long_detect);
2006 }
2007
2008 if (pin_mask)
2009 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2010
2011 if (pch_iir & SDE_GMBUS_ICP(1 << 23))
2012 gmbus_irq_handler(dev_priv);
2013}
2014
2015static void spt_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 pch_iir)
2016{
2017 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT((1 << 25) | (1 << 23) | (1 << 22) | (1 <<
21) | (1 << 24))
&
2018 ~SDE_PORTE_HOTPLUG_SPT(1 << 25);
2019 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT(1 << 25);
2020 u32 pin_mask = 0, long_mask = 0;
2021
2022 if (hotplug_trigger) {
2023 u32 dig_hotplug_reg;
2024
2025 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
2026 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), dig_hotplug_reg);
2027
2028 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2029 hotplug_trigger, dig_hotplug_reg,
2030 dev_priv->display.hotplug.pch_hpd,
2031 spt_port_hotplug_long_detect);
2032 }
2033
2034 if (hotplug2_trigger) {
2035 u32 dig_hotplug_reg;
2036
2037 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2((const i915_reg_t){ .reg = (0xc403C) }));
2038 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2((const i915_reg_t){ .reg = (0xc403C) }), dig_hotplug_reg);
2039
2040 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2041 hotplug2_trigger, dig_hotplug_reg,
2042 dev_priv->display.hotplug.pch_hpd,
2043 spt_port_hotplug2_long_detect);
2044 }
2045
2046 if (pin_mask)
2047 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2048
2049 if (pch_iir & SDE_GMBUS_CPT(1 << 17))
2050 gmbus_irq_handler(dev_priv);
2051}
2052
2053static void ilk_hpd_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
2054 u32 hotplug_trigger)
2055{
2056 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2057
2058 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL((const i915_reg_t){ .reg = (0x44030) }));
2059 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL((const i915_reg_t){ .reg = (0x44030) }), dig_hotplug_reg);
2060
2061 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2062 hotplug_trigger, dig_hotplug_reg,
2063 dev_priv->display.hotplug.hpd,
2064 ilk_port_hotplug_long_detect);
2065
2066 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2067}
2068
2069static void ilk_display_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
2070 u32 de_iir)
2071{
2072 enum pipe pipe;
2073 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG(1 << 19);
2074
2075 if (hotplug_trigger)
2076 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2077
2078 if (de_iir & DE_AUX_CHANNEL_A(1 << 20))
2079 dp_aux_irq_handler(dev_priv);
2080
2081 if (de_iir & DE_GSE(1 << 18))
2082 intel_opregion_asle_intr(dev_priv);
2083
2084 if (de_iir & DE_POISON(1 << 23))
2085 drm_err(&dev_priv->drm, "Poison interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Poison interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2086
2087 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
2088 if (de_iir & DE_PIPE_VBLANK(pipe)(1 << (7 + 8 * (pipe))))
2089 intel_handle_vblank(dev_priv, pipe);
2090
2091 if (de_iir & DE_PLANE_FLIP_DONE(pipe)(1 << (26 + (pipe))))
2092 flip_done_handler(dev_priv, pipe);
2093
2094 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)(1 << (8 * (pipe))))
2095 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2096
2097 if (de_iir & DE_PIPE_CRC_DONE(pipe)(1 << (2 + 8 * (pipe))))
2098 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2099 }
2100
2101 /* check event from PCH */
2102 if (de_iir & DE_PCH_EVENT(1 << 21)) {
2103 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }));
2104
2105 if (HAS_PCH_CPT(dev_priv)(((dev_priv)->pch_type) == PCH_CPT))
2106 cpt_irq_handler(dev_priv, pch_iir);
2107 else
2108 ibx_irq_handler(dev_priv, pch_iir);
2109
2110 /* should clear PCH hotplug event before clear CPU irq */
2111 intel_uncore_write(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }), pch_iir);
2112 }
2113
2114 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 5 && de_iir & DE_PCU_EVENT(1 << 25))
2115 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2116}
2117
2118static void ivb_display_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
2119 u32 de_iir)
2120{
2121 enum pipe pipe;
2122 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB(1 << 27);
2123
2124 if (hotplug_trigger)
2125 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2126
2127 if (de_iir & DE_ERR_INT_IVB(1 << 30))
2128 ivb_err_int_handler(dev_priv);
2129
2130 if (de_iir & DE_AUX_CHANNEL_A_IVB(1 << 26))
2131 dp_aux_irq_handler(dev_priv);
2132
2133 if (de_iir & DE_GSE_IVB(1 << 29))
2134 intel_opregion_asle_intr(dev_priv);
2135
2136 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
2137 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)(1 << ((pipe) * 5)))
2138 intel_handle_vblank(dev_priv, pipe);
2139
2140 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)(1 << (3 + 5 * (pipe))))
2141 flip_done_handler(dev_priv, pipe);
2142 }
2143
2144 /* check event from PCH */
2145 if (!HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP) && (de_iir & DE_PCH_EVENT_IVB(1 << 28))) {
2146 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }));
2147
2148 cpt_irq_handler(dev_priv, pch_iir);
2149
2150 /* clear PCH hotplug event before clear CPU irq */
2151 intel_uncore_write(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }), pch_iir);
2152 }
2153}
2154
2155/*
2156 * To handle irqs with the minimum potential races with fresh interrupts, we:
2157 * 1 - Disable Master Interrupt Control.
2158 * 2 - Find the source(s) of the interrupt.
2159 * 3 - Clear the Interrupt Identity bits (IIR).
2160 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2161 * 5 - Re-enable Master Interrupt Control.
2162 */
2163static irqreturn_t ilk_irq_handler(int irq, void *arg)
2164{
2165 struct drm_i915_privateinteldrm_softc *i915 = arg;
2166 void __iomem * const regs = i915->uncore.regs;
2167 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2168 irqreturn_t ret = IRQ_NONE;
2169
2170 if (unlikely(!intel_irqs_enabled(i915))__builtin_expect(!!(!intel_irqs_enabled(i915)), 0))
2171 return IRQ_NONE;
2172
2173 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2174 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2175
2176 /* disable master interrupt before clearing iir */
2177 de_ier = raw_reg_read(regs, DEIER)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x4400c) })))
;
2178 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL)iowrite32(de_ier & ~(1 << 31), regs + i915_mmio_reg_offset
(((const i915_reg_t){ .reg = (0x4400c) })))
;
2179
2180 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2181 * interrupts will will be stored on its back queue, and then we'll be
2182 * able to process them after we restore SDEIER (as soon as we restore
2183 * it, we'll get an interrupt if SDEIIR still has something to process
2184 * due to its back queue). */
2185 if (!HAS_PCH_NOP(i915)(((i915)->pch_type) == PCH_NOP)) {
2186 sde_ier = raw_reg_read(regs, SDEIER)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0xc400c) })))
;
2187 raw_reg_write(regs, SDEIER, 0)iowrite32(0, regs + i915_mmio_reg_offset(((const i915_reg_t){
.reg = (0xc400c) })))
;
2188 }
2189
2190 /* Find, clear, then process each source of interrupt */
2191
2192 gt_iir = raw_reg_read(regs, GTIIR)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x44018) })))
;
2193 if (gt_iir) {
2194 raw_reg_write(regs, GTIIR, gt_iir)iowrite32(gt_iir, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x44018) })))
;
2195 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6)
2196 gen6_gt_irq_handler(to_gt(i915), gt_iir);
2197 else
2198 gen5_gt_irq_handler(to_gt(i915), gt_iir);
2199 ret = IRQ_HANDLED;
2200 }
2201
2202 de_iir = raw_reg_read(regs, DEIIR)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x44008) })))
;
2203 if (de_iir) {
2204 raw_reg_write(regs, DEIIR, de_iir)iowrite32(de_iir, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x44008) })))
;
2205 if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 7)
2206 ivb_display_irq_handler(i915, de_iir);
2207 else
2208 ilk_display_irq_handler(i915, de_iir);
2209 ret = IRQ_HANDLED;
2210 }
2211
2212 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
2213 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x44028) })))
;
2214 if (pm_iir) {
2215 raw_reg_write(regs, GEN6_PMIIR, pm_iir)iowrite32(pm_iir, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x44028) })))
;
2216 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2217 ret = IRQ_HANDLED;
2218 }
2219 }
2220
2221 raw_reg_write(regs, DEIER, de_ier)iowrite32(de_ier, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x4400c) })))
;
2222 if (sde_ier)
2223 raw_reg_write(regs, SDEIER, sde_ier)iowrite32(sde_ier, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0xc400c) })))
;
2224
2225 pmu_irq_stats(i915, ret);
2226
2227 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2228 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2229
2230 return ret;
2231}
2232
2233static void bxt_hpd_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
2234 u32 hotplug_trigger)
2235{
2236 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2237
2238 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
2239 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), dig_hotplug_reg);
2240
2241 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2242 hotplug_trigger, dig_hotplug_reg,
2243 dev_priv->display.hotplug.hpd,
2244 bxt_port_hotplug_long_detect);
2245
2246 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2247}
2248
2249static void gen11_hpd_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 iir)
2250{
2251 u32 pin_mask = 0, long_mask = 0;
2252 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK(((u32)((1UL << (16 + ((HPD_PORT_TC6) - HPD_PORT_TC1)))
+ 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC5) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC4) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC3) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC2) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC1) - HPD_PORT_TC1
))) + 0)))
;
2253 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK(((u32)((1UL << (((HPD_PORT_TC6) - HPD_PORT_TC1))) + 0)
) | ((u32)((1UL << (((HPD_PORT_TC5) - HPD_PORT_TC1))) +
0)) | ((u32)((1UL << (((HPD_PORT_TC4) - HPD_PORT_TC1))
) + 0)) | ((u32)((1UL << (((HPD_PORT_TC3) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (((HPD_PORT_TC2) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (((HPD_PORT_TC1) - HPD_PORT_TC1
))) + 0)))
;
2254
2255 if (trigger_tc) {
2256 u32 dig_hotplug_reg;
2257
2258 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44038) }));
2259 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44038) }), dig_hotplug_reg);
2260
2261 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2262 trigger_tc, dig_hotplug_reg,
2263 dev_priv->display.hotplug.hpd,
2264 gen11_port_hotplug_long_detect);
2265 }
2266
2267 if (trigger_tbt) {
2268 u32 dig_hotplug_reg;
2269
2270 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44030) }));
2271 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44030) }), dig_hotplug_reg);
2272
2273 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2274 trigger_tbt, dig_hotplug_reg,
2275 dev_priv->display.hotplug.hpd,
2276 gen11_port_hotplug_long_detect);
2277 }
2278
2279 if (pin_mask)
2280 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2281 else
2282 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected DE HPD interrupt 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , iir)
2283 "Unexpected DE HPD interrupt 0x%08x\n", iir)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected DE HPD interrupt 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , iir)
;
2284}
2285
2286static u32 gen8_de_port_aux_mask(struct drm_i915_privateinteldrm_softc *dev_priv)
2287{
2288 u32 mask;
2289
2290 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 13)
2291 return TGL_DE_PORT_AUX_DDIA((u32)((1UL << (0)) + 0)) |
2292 TGL_DE_PORT_AUX_DDIB((u32)((1UL << (1)) + 0)) |
2293 TGL_DE_PORT_AUX_DDIC((u32)((1UL << (2)) + 0)) |
2294 XELPD_DE_PORT_AUX_DDID((u32)((1UL << (12)) + 0)) |
2295 XELPD_DE_PORT_AUX_DDIE((u32)((1UL << (13)) + 0)) |
2296 TGL_DE_PORT_AUX_USBC1((u32)((1UL << (8)) + 0)) |
2297 TGL_DE_PORT_AUX_USBC2((u32)((1UL << (9)) + 0)) |
2298 TGL_DE_PORT_AUX_USBC3((u32)((1UL << (10)) + 0)) |
2299 TGL_DE_PORT_AUX_USBC4((u32)((1UL << (11)) + 0));
2300 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
2301 return TGL_DE_PORT_AUX_DDIA((u32)((1UL << (0)) + 0)) |
2302 TGL_DE_PORT_AUX_DDIB((u32)((1UL << (1)) + 0)) |
2303 TGL_DE_PORT_AUX_DDIC((u32)((1UL << (2)) + 0)) |
2304 TGL_DE_PORT_AUX_USBC1((u32)((1UL << (8)) + 0)) |
2305 TGL_DE_PORT_AUX_USBC2((u32)((1UL << (9)) + 0)) |
2306 TGL_DE_PORT_AUX_USBC3((u32)((1UL << (10)) + 0)) |
2307 TGL_DE_PORT_AUX_USBC4((u32)((1UL << (11)) + 0)) |
2308 TGL_DE_PORT_AUX_USBC5((u32)((1UL << (12)) + 0)) |
2309 TGL_DE_PORT_AUX_USBC6((u32)((1UL << (13)) + 0));
2310
2311
2312 mask = GEN8_AUX_CHANNEL_A(1 << 0);
2313 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9)
2314 mask |= GEN9_AUX_CHANNEL_B(1 << 25) |
2315 GEN9_AUX_CHANNEL_C(1 << 26) |
2316 GEN9_AUX_CHANNEL_D(1 << 27);
2317
2318 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) == 11) {
2319 mask |= ICL_AUX_CHANNEL_F(1 << 28);
2320 mask |= ICL_AUX_CHANNEL_E(1 << 29);
2321 }
2322
2323 return mask;
2324}
2325
2326static u32 gen8_de_pipe_fault_mask(struct drm_i915_privateinteldrm_softc *dev_priv)
2327{
2328 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)(IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) || IS_PLATFORM(dev_priv
, INTEL_ALDERLAKE_S))
)
2329 return RKL_DE_PIPE_IRQ_FAULT_ERRORS(((1 << 11) | (1 << 10) | (1 << 9) | (1 <<
8) | (1 << 7)) | (1 << 20))
;
2330 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11)
2331 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS(((1 << 11) | (1 << 10) | (1 << 9) | (1 <<
8) | (1 << 7)) | (1 << 22) | (1 << 21) | (
1 << 20))
;
2332 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 9)
2333 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS((1 << 11) | (1 << 10) | (1 << 9) | (1 <<
8) | (1 << 7))
;
2334 else
2335 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS((1 << 10) | (1 << 9) | (1 << 8));
2336}
2337
2338static void
2339gen8_de_misc_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 iir)
2340{
2341 bool_Bool found = false0;
2342
2343 if (iir & GEN8_DE_MISC_GSE(1 << 27)) {
2344 intel_opregion_asle_intr(dev_priv);
2345 found = true1;
2346 }
2347
2348 if (iir & GEN8_DE_EDP_PSR(1 << 19)) {
2349 struct intel_encoder *encoder;
2350 u32 psr_iir;
2351 i915_reg_t iir_reg;
2352
2353 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder)for ((encoder) = ({ const __typeof( ((__typeof(*(encoder)) *)
0)->base.head ) *__mptr = ((&((&dev_priv->drm))
->mode_config.encoder_list)->next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );}); &(encoder)->base.head != (&((
&dev_priv->drm))->mode_config.encoder_list); (encoder
) = ({ const __typeof( ((__typeof(*(encoder)) *)0)->base.head
) *__mptr = ((encoder)->base.head.next); (__typeof(*(encoder
)) *)( (char *)__mptr - __builtin_offsetof(__typeof(*(encoder
)), base.head) );})) if (!(intel_encoder_can_psr(encoder))) {
} else
{
2354 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2355
2356 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12)
2357 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(intel_dp->psr.transcoder)] - (&
(dev_priv)->__info)->display.trans_offsets[TRANSCODER_A
] + ((&(dev_priv)->__info)->display.mmio_offset) + (
0x60818)) })
;
2358 else
2359 iir_reg = EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) });
2360
2361 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2362 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2363
2364 if (psr_iir)
2365 found = true1;
2366
2367 intel_psr_irq_handler(intel_dp, psr_iir);
2368
2369 /* prior GEN12 only have one EDP PSR */
2370 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) < 12)
2371 break;
2372 }
2373 }
2374
2375 if (!found)
2376 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected DE Misc interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2377}
2378
2379static void gen11_dsi_te_interrupt_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
2380 u32 te_trigger)
2381{
2382 enum pipe pipe = INVALID_PIPE;
2383 enum transcoder dsi_trans;
2384 enum port port;
2385 u32 val, tmp;
2386
2387 /*
2388 * Incase of dual link, TE comes from DSI_1
2389 * this is to check if dual link is enabled
2390 */
2391 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(TRANSCODER_DSI_0)] - (&(dev_priv)->
__info)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv
)->__info)->display.mmio_offset) + (0x60404)) })
);
2392 val &= PORT_SYNC_MODE_ENABLE((u32)((1UL << (4)) + 0));
2393
2394 /*
2395 * if dual link is enabled, then read DSI_0
2396 * transcoder registers
2397 */
2398 port = ((te_trigger & DSI1_TE(1 << 24) && val) || (te_trigger & DSI0_TE(1 << 23))) ?
2399 PORT_A : PORT_B;
2400 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2401
2402 /* Check if DSI configured in command mode */
2403 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)((const i915_reg_t){ .reg = (((0x6b030) + ((dsi_trans) - TRANSCODER_DSI_0
) * ((0x6b830) - (0x6b030)))) })
);
2404 val = val & OP_MODE_MASK(0x3 << 28);
2405
2406 if (val != CMD_MODE_NO_GATE(0x0 << 28) && val != CMD_MODE_TE_GATE(0x1 << 28)) {
2407 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "DSI trancoder not configured in command mode\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2408 return;
2409 }
2410
2411 /* Get PIPE for handling VBLANK event */
2412 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(dsi_trans)] - (&(dev_priv)->__info
)->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)
->__info)->display.mmio_offset) + (0x60400)) })
);
2413 switch (val & TRANS_DDI_EDP_INPUT_MASK(7 << 12)) {
2414 case TRANS_DDI_EDP_INPUT_A_ON(0 << 12):
2415 pipe = PIPE_A;
2416 break;
2417 case TRANS_DDI_EDP_INPUT_B_ONOFF(5 << 12):
2418 pipe = PIPE_B;
2419 break;
2420 case TRANS_DDI_EDP_INPUT_C_ONOFF(6 << 12):
2421 pipe = PIPE_C;
2422 break;
2423 default:
2424 drm_err(&dev_priv->drm, "Invalid PIPE\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Invalid PIPE\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2425 return;
2426 }
2427
2428 intel_handle_vblank(dev_priv, pipe);
2429
2430 /* clear TE in dsi IIR */
2431 port = (te_trigger & DSI1_TE(1 << 24)) ? PORT_B : PORT_A;
2432 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)((const i915_reg_t){ .reg = (((0x6b074) + (port) * ((0x6b874)
- (0x6b074)))) })
);
2433 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)((const i915_reg_t){ .reg = (((0x6b074) + (port) * ((0x6b874)
- (0x6b074)))) })
, tmp);
2434}
2435
2436static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_privateinteldrm_softc *i915)
2437{
2438 if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 9)
2439 return GEN9_PIPE_PLANE1_FLIP_DONE(1 << 3);
2440 else
2441 return GEN8_PIPE_PRIMARY_FLIP_DONE(1 << 4);
2442}
2443
2444u32 gen8_de_pipe_underrun_mask(struct drm_i915_privateinteldrm_softc *dev_priv)
2445{
2446 u32 mask = GEN8_PIPE_FIFO_UNDERRUN(1 << 31);
2447
2448 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 13)
2449 mask |= XELPD_PIPE_SOFT_UNDERRUN(1 << 22) |
2450 XELPD_PIPE_HARD_UNDERRUN(1 << 21);
2451
2452 return mask;
2453}
2454
2455static irqreturn_t
2456gen8_de_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv, u32 master_ctl)
2457{
2458 irqreturn_t ret = IRQ_NONE;
2459 u32 iir;
2460 enum pipe pipe;
2461
2462 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv))({ static int __warned; int __ret = !!((!((&(dev_priv)->
__runtime)->pipe_mask != 0))); if (__ret && !__warned
) { printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON_ONCE(" "!((&(dev_priv)->__runtime)->pipe_mask != 0)"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
2463
2464 if (master_ctl & GEN8_DE_MISC_IRQ(1 << 22)) {
2465 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR((const i915_reg_t){ .reg = (0x44468) }));
2466 if (iir) {
2467 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR((const i915_reg_t){ .reg = (0x44468) }), iir);
2468 ret = IRQ_HANDLED;
2469 gen8_de_misc_irq_handler(dev_priv, iir);
2470 } else {
2471 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE MISC)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2472 "The master control interrupt lied (DE MISC)!\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE MISC)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2473 }
2474 }
2475
2476 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ(1 << 21))) {
2477 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR((const i915_reg_t){ .reg = (0x44478) }));
2478 if (iir) {
2479 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR((const i915_reg_t){ .reg = (0x44478) }), iir);
2480 ret = IRQ_HANDLED;
2481 gen11_hpd_irq_handler(dev_priv, iir);
2482 } else {
2483 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied, (DE HPD)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2484 "The master control interrupt lied, (DE HPD)!\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied, (DE HPD)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2485 }
2486 }
2487
2488 if (master_ctl & GEN8_DE_PORT_IRQ(1 << 20)) {
2489 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR((const i915_reg_t){ .reg = (0x44448) }));
2490 if (iir) {
2491 bool_Bool found = false0;
2492
2493 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR((const i915_reg_t){ .reg = (0x44448) }), iir);
2494 ret = IRQ_HANDLED;
2495
2496 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2497 dp_aux_irq_handler(dev_priv);
2498 found = true1;
2499 }
2500
2501 if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) {
2502 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK(((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)
) | ((u32)((1UL << (3 + ((HPD_PORT_B) - HPD_PORT_A))) +
0)) | ((u32)((1UL << (3 + ((HPD_PORT_C) - HPD_PORT_A))
) + 0)))
;
2503
2504 if (hotplug_trigger) {
2505 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2506 found = true1;
2507 }
2508 } else if (IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL)) {
2509 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0));
2510
2511 if (hotplug_trigger) {
2512 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2513 found = true1;
2514 }
2515 }
2516
2517 if ((IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON)) &&
2518 (iir & BXT_DE_PORT_GMBUS(1 << 1))) {
2519 gmbus_irq_handler(dev_priv);
2520 found = true1;
2521 }
2522
2523 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11) {
2524 u32 te_trigger = iir & (DSI0_TE(1 << 23) | DSI1_TE(1 << 24));
2525
2526 if (te_trigger) {
2527 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2528 found = true1;
2529 }
2530 }
2531
2532 if (!found)
2533 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected DE Port interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2534 "Unexpected DE Port interrupt\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Unexpected DE Port interrupt\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2535 }
2536 else
2537 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE PORT)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2538 "The master control interrupt lied (DE PORT)!\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE PORT)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2539 }
2540
2541 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
2542 u32 fault_errors;
2543
2544 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)(1 << (16 + (pipe)))))
2545 continue;
2546
2547 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)((const i915_reg_t){ .reg = (0x44408 + (0x10 * (pipe))) }));
2548 if (!iir) {
2549 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE PIPE)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
2550 "The master control interrupt lied (DE PIPE)!\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "The master control interrupt lied (DE PIPE)!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2551 continue;
2552 }
2553
2554 ret = IRQ_HANDLED;
2555 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)((const i915_reg_t){ .reg = (0x44408 + (0x10 * (pipe))) }), iir);
2556
2557 if (iir & GEN8_PIPE_VBLANK(1 << 0))
2558 intel_handle_vblank(dev_priv, pipe);
2559
2560 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2561 flip_done_handler(dev_priv, pipe);
2562
2563 if (iir & GEN8_PIPE_CDCLK_CRC_DONE(1 << 28))
2564 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2565
2566 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2567 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2568
2569 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2570 if (fault_errors)
2571 drm_err(&dev_priv->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Fault errors on pipe %c: 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ((pipe
) + 'A'), fault_errors)
2572 "Fault errors on pipe %c: 0x%08x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Fault errors on pipe %c: 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ((pipe
) + 'A'), fault_errors)
2573 pipe_name(pipe),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Fault errors on pipe %c: 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ((pipe
) + 'A'), fault_errors)
2574 fault_errors)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Fault errors on pipe %c: 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ((pipe
) + 'A'), fault_errors)
;
2575 }
2576
2577 if (HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE) && !HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP) &&
2578 master_ctl & GEN8_DE_PCH_IRQ(1 << 23)) {
2579 /*
2580 * FIXME(BDW): Assume for now that the new interrupt handling
2581 * scheme also closed the SDE interrupt handling race we've seen
2582 * on older pch-split platforms. But this needs testing.
2583 */
2584 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }));
2585 if (iir) {
2586 intel_uncore_write(&dev_priv->uncore, SDEIIR((const i915_reg_t){ .reg = (0xc4008) }), iir);
2587 ret = IRQ_HANDLED;
2588
2589 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
2590 icp_irq_handler(dev_priv, iir);
2591 else if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_SPT)
2592 spt_irq_handler(dev_priv, iir);
2593 else
2594 cpt_irq_handler(dev_priv, iir);
2595 } else {
2596 /*
2597 * Like on previous PCH there seems to be something
2598 * fishy going on with forwarding PCH interrupts.
2599 */
2600 drm_dbg(&dev_priv->drm,__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "The master control interrupt lied (SDE)!\n"
)
2601 "The master control interrupt lied (SDE)!\n")__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "The master control interrupt lied (SDE)!\n"
)
;
2602 }
2603 }
2604
2605 return ret;
2606}
2607
2608static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2609{
2610 raw_reg_write(regs, GEN8_MASTER_IRQ, 0)iowrite32(0, regs + i915_mmio_reg_offset(((const i915_reg_t){
.reg = (0x44200) })))
;
2611
2612 /*
2613 * Now with master disabled, get a sample of level indications
2614 * for this interrupt. Indications will be cleared on related acks.
2615 * New indications can and will light up during processing,
2616 * and will generate new interrupt after enabling master.
2617 */
2618 return raw_reg_read(regs, GEN8_MASTER_IRQ)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x44200) })))
;
2619}
2620
2621static inline void gen8_master_intr_enable(void __iomem * const regs)
2622{
2623 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL)iowrite32((1 << 31), regs + i915_mmio_reg_offset(((const
i915_reg_t){ .reg = (0x44200) })))
;
2624}
2625
2626static irqreturn_t gen8_irq_handler(int irq, void *arg)
2627{
2628 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
2629 void __iomem * const regs = dev_priv->uncore.regs;
2630 u32 master_ctl;
2631
2632 if (!intel_irqs_enabled(dev_priv))
2633 return IRQ_NONE;
2634
2635 master_ctl = gen8_master_intr_disable(regs);
2636 if (!master_ctl) {
2637 gen8_master_intr_enable(regs);
2638 return IRQ_NONE;
2639 }
2640
2641 /* Find, queue (onto bottom-halves), then clear each source */
2642 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2643
2644 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2645 if (master_ctl & ~GEN8_GT_IRQS((1 << 0) | (1 << 1) | (1 << 2) | (1 <<
3) | (1 << 6) | (1 << 4) | (1 << 5))
) {
2646 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2647 gen8_de_irq_handler(dev_priv, master_ctl);
2648 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2649 }
2650
2651 gen8_master_intr_enable(regs);
2652
2653 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2654
2655 return IRQ_HANDLED;
2656}
2657
2658static u32
2659gen11_gu_misc_irq_ack(struct drm_i915_privateinteldrm_softc *i915, const u32 master_ctl)
2660{
2661 void __iomem * const regs = i915->uncore.regs;
2662 u32 iir;
2663
2664 if (!(master_ctl & GEN11_GU_MISC_IRQ(1 << 29)))
2665 return 0;
2666
2667 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x444f8) })))
;
2668 if (likely(iir)__builtin_expect(!!(iir), 1))
2669 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir)iowrite32(iir, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x444f8) })))
;
2670
2671 return iir;
2672}
2673
2674static void
2675gen11_gu_misc_irq_handler(struct drm_i915_privateinteldrm_softc *i915, const u32 iir)
2676{
2677 if (iir & GEN11_GU_MISC_GSE(1 << 27))
2678 intel_opregion_asle_intr(i915);
2679}
2680
2681static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2682{
2683 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0)iowrite32(0, regs + i915_mmio_reg_offset(((const i915_reg_t){
.reg = (0x190010) })))
;
2684
2685 /*
2686 * Now with master disabled, get a sample of level indications
2687 * for this interrupt. Indications will be cleared on related acks.
2688 * New indications can and will light up during processing,
2689 * and will generate new interrupt after enabling master.
2690 */
2691 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x190010) })))
;
2692}
2693
2694static inline void gen11_master_intr_enable(void __iomem * const regs)
2695{
2696 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ)iowrite32((1 << 31), regs + i915_mmio_reg_offset(((const
i915_reg_t){ .reg = (0x190010) })))
;
2697}
2698
2699static void
2700gen11_display_irq_handler(struct drm_i915_privateinteldrm_softc *i915)
2701{
2702 void __iomem * const regs = i915->uncore.regs;
2703 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x44200) })))
;
2704
2705 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2706 /*
2707 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2708 * for the display related bits.
2709 */
2710 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0)iowrite32(0x0, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x44200) })))
;
2711 gen8_de_irq_handler(i915, disp_ctl);
2712 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,iowrite32((1 << 31), regs + i915_mmio_reg_offset(((const
i915_reg_t){ .reg = (0x44200) })))
2713 GEN11_DISPLAY_IRQ_ENABLE)iowrite32((1 << 31), regs + i915_mmio_reg_offset(((const
i915_reg_t){ .reg = (0x44200) })))
;
2714
2715 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2716}
2717
2718static irqreturn_t gen11_irq_handler(int irq, void *arg)
2719{
2720 struct drm_i915_privateinteldrm_softc *i915 = arg;
2721 void __iomem * const regs = i915->uncore.regs;
2722 struct intel_gt *gt = to_gt(i915);
2723 u32 master_ctl;
2724 u32 gu_misc_iir;
2725
2726 if (!intel_irqs_enabled(i915))
2727 return IRQ_NONE;
2728
2729 master_ctl = gen11_master_intr_disable(regs);
2730 if (!master_ctl) {
2731 gen11_master_intr_enable(regs);
2732 return IRQ_NONE;
2733 }
2734
2735 /* Find, queue (onto bottom-halves), then clear each source */
2736 gen11_gt_irq_handler(gt, master_ctl);
2737
2738 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2739 if (master_ctl & GEN11_DISPLAY_IRQ(1 << 16))
2740 gen11_display_irq_handler(i915);
2741
2742 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2743
2744 gen11_master_intr_enable(regs);
2745
2746 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2747
2748 pmu_irq_stats(i915, IRQ_HANDLED);
2749
2750 return IRQ_HANDLED;
2751}
2752
2753static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2754{
2755 u32 val;
2756
2757 /* First disable interrupts */
2758 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0)iowrite32(0, regs + i915_mmio_reg_offset(((const i915_reg_t){
.reg = (0x190008) })))
;
2759
2760 /* Get the indication levels and ack the master unit */
2761 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x190008) })))
;
2762 if (unlikely(!val)__builtin_expect(!!(!val), 0))
2763 return 0;
2764
2765 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val)iowrite32(val, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x190008) })))
;
2766
2767 return val;
2768}
2769
2770static inline void dg1_master_intr_enable(void __iomem * const regs)
2771{
2772 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ)iowrite32(((u32)((1UL << (31)) + 0)), regs + i915_mmio_reg_offset
(((const i915_reg_t){ .reg = (0x190008) })))
;
2773}
2774
2775static irqreturn_t dg1_irq_handler(int irq, void *arg)
2776{
2777 struct drm_i915_privateinteldrm_softc * const i915 = arg;
2778 struct intel_gt *gt = to_gt(i915);
2779 void __iomem * const regs = gt->uncore->regs;
2780 u32 master_tile_ctl, master_ctl;
2781 u32 gu_misc_iir;
2782
2783 if (!intel_irqs_enabled(i915))
2784 return IRQ_NONE;
2785
2786 master_tile_ctl = dg1_master_intr_disable(regs);
2787 if (!master_tile_ctl) {
2788 dg1_master_intr_enable(regs);
2789 return IRQ_NONE;
2790 }
2791
2792 /* FIXME: we only support tile 0 for now. */
2793 if (master_tile_ctl & DG1_MSTR_TILE(0)((u32)((1UL << (0)) + 0))) {
2794 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ)ioread32(regs + i915_mmio_reg_offset(((const i915_reg_t){ .reg
= (0x190010) })))
;
2795 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl)iowrite32(master_ctl, regs + i915_mmio_reg_offset(((const i915_reg_t
){ .reg = (0x190010) })))
;
2796 } else {
2797 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl)__drm_err("Tile not supported: 0x%08x\n", master_tile_ctl);
2798 dg1_master_intr_enable(regs);
2799 return IRQ_NONE;
2800 }
2801
2802 gen11_gt_irq_handler(gt, master_ctl);
2803
2804 if (master_ctl & GEN11_DISPLAY_IRQ(1 << 16))
2805 gen11_display_irq_handler(i915);
2806
2807 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2808
2809 dg1_master_intr_enable(regs);
2810
2811 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2812
2813 pmu_irq_stats(i915, IRQ_HANDLED);
2814
2815 return IRQ_HANDLED;
2816}
2817
2818/* Called from drm generic code, passed 'crtc' which
2819 * we use as a pipe index
2820 */
2821int i8xx_enable_vblank(struct drm_crtc *crtc)
2822{
2823 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2824 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2825 unsigned long irqflags;
2826
2827 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2828 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS(1UL << 1));
2829 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2830
2831 return 0;
2832}
2833
2834int i915gm_enable_vblank(struct drm_crtc *crtc)
2835{
2836 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2837
2838 /*
2839 * Vblank interrupts fail to wake the device up from C2+.
2840 * Disabling render clock gating during C-states avoids
2841 * the problem. There is a small power cost so we do this
2842 * only when vblank interrupts are actually enabled.
2843 */
2844 if (dev_priv->vblank_enabled++ == 0)
2845 intel_uncore_write(&dev_priv->uncore, SCPD0((const i915_reg_t){ .reg = (0x209c) }), _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)({ typeof((1 << 5)) _a = ((1 << 5)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })
);
2846
2847 return i8xx_enable_vblank(crtc);
2848}
2849
2850int i965_enable_vblank(struct drm_crtc *crtc)
2851{
2852 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2853 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2854 unsigned long irqflags;
2855
2856 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2857 i915_enable_pipestat(dev_priv, pipe,
2858 PIPE_START_VBLANK_INTERRUPT_STATUS(1UL << 2));
2859 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2860
2861 return 0;
2862}
2863
2864int ilk_enable_vblank(struct drm_crtc *crtc)
2865{
2866 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2867 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2868 unsigned long irqflags;
2869 u32 bit = DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 7 ?
2870 DE_PIPE_VBLANK_IVB(pipe)(1 << ((pipe) * 5)) : DE_PIPE_VBLANK(pipe)(1 << (7 + 8 * (pipe)));
2871
2872 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2873 ilk_enable_display_irq(dev_priv, bit);
2874 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2875
2876 /* Even though there is no DMC, frame counter can get stuck when
2877 * PSR is active as no frames are generated.
2878 */
2879 if (HAS_PSR(dev_priv)((&(dev_priv)->__info)->display.has_psr))
2880 drm_crtc_vblank_restore(crtc);
2881
2882 return 0;
2883}
2884
2885static bool_Bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2886 bool_Bool enable)
2887{
2888 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(intel_crtc->base.dev);
2889 enum port port;
2890 u32 tmp;
2891
2892 if (!(intel_crtc->mode_flags &
2893 (I915_MODE_FLAG_DSI_USE_TE1(1<<4) | I915_MODE_FLAG_DSI_USE_TE0(1<<3))))
2894 return false0;
2895
2896 /* for dual link cases we consider TE from slave */
2897 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1(1<<4))
2898 port = PORT_B;
2899 else
2900 port = PORT_A;
2901
2902 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)((const i915_reg_t){ .reg = (((0x6b070) + (port) * ((0x6b870)
- (0x6b070)))) })
);
2903 if (enable)
2904 tmp &= ~DSI_TE_EVENT(1 << 31);
2905 else
2906 tmp |= DSI_TE_EVENT(1 << 31);
2907
2908 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port)((const i915_reg_t){ .reg = (((0x6b070) + (port) * ((0x6b870)
- (0x6b070)))) })
, tmp);
2909
2910 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)((const i915_reg_t){ .reg = (((0x6b074) + (port) * ((0x6b874)
- (0x6b074)))) })
);
2911 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)((const i915_reg_t){ .reg = (((0x6b074) + (port) * ((0x6b874)
- (0x6b074)))) })
, tmp);
2912
2913 return true1;
2914}
2915
2916int bdw_enable_vblank(struct drm_crtc *_crtc)
2917{
2918 struct intel_crtc *crtc = to_intel_crtc(_crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (_crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
;
2919 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev);
2920 enum pipe pipe = crtc->pipe;
2921 unsigned long irqflags;
2922
2923 if (gen11_dsi_configure_te(crtc, true1))
2924 return 0;
2925
2926 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2927 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK(1 << 0));
2928 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2929
2930 /* Even if there is no DMC, frame counter can get stuck when
2931 * PSR is active as no frames are generated, so check only for PSR.
2932 */
2933 if (HAS_PSR(dev_priv)((&(dev_priv)->__info)->display.has_psr))
2934 drm_crtc_vblank_restore(&crtc->base);
2935
2936 return 0;
2937}
2938
2939/* Called from drm generic code, passed 'crtc' which
2940 * we use as a pipe index
2941 */
2942void i8xx_disable_vblank(struct drm_crtc *crtc)
2943{
2944 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2945 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2946 unsigned long irqflags;
2947
2948 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2949 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS(1UL << 1));
2950 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2951}
2952
2953void i915gm_disable_vblank(struct drm_crtc *crtc)
2954{
2955 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2956
2957 i8xx_disable_vblank(crtc);
2958
2959 if (--dev_priv->vblank_enabled == 0)
2960 intel_uncore_write(&dev_priv->uncore, SCPD0((const i915_reg_t){ .reg = (0x209c) }), _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)(({ if (__builtin_constant_p(((1 << 5)))) do { } while (
0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((1 << 5))) && __builtin_constant_p(0)) do { }
while (0); ((((1 << 5))) << 16 | (0)); }))
);
2961}
2962
2963void i965_disable_vblank(struct drm_crtc *crtc)
2964{
2965 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2966 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2967 unsigned long irqflags;
2968
2969 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2970 i915_disable_pipestat(dev_priv, pipe,
2971 PIPE_START_VBLANK_INTERRUPT_STATUS(1UL << 2));
2972 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2973}
2974
2975void ilk_disable_vblank(struct drm_crtc *crtc)
2976{
2977 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->dev);
2978 enum pipe pipe = to_intel_crtc(crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
->pipe;
2979 unsigned long irqflags;
2980 u32 bit = DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 7 ?
2981 DE_PIPE_VBLANK_IVB(pipe)(1 << ((pipe) * 5)) : DE_PIPE_VBLANK(pipe)(1 << (7 + 8 * (pipe)));
2982
2983 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2984 ilk_disable_display_irq(dev_priv, bit);
2985 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
2986}
2987
2988void bdw_disable_vblank(struct drm_crtc *_crtc)
2989{
2990 struct intel_crtc *crtc = to_intel_crtc(_crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr
= (_crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof
(struct intel_crtc, base) );})
;
2991 struct drm_i915_privateinteldrm_softc *dev_priv = to_i915(crtc->base.dev);
2992 enum pipe pipe = crtc->pipe;
2993 unsigned long irqflags;
2994
2995 if (gen11_dsi_configure_te(crtc, false0))
2996 return;
2997
2998 spin_lock_irqsave(&dev_priv->irq_lock, irqflags)do { irqflags = 0; mtx_enter(&dev_priv->irq_lock); } while
(0)
;
2999 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK(1 << 0));
3000 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags)do { (void)(irqflags); mtx_leave(&dev_priv->irq_lock);
} while (0)
;
3001}
3002
3003static void ibx_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3004{
3005 struct intel_uncore *uncore = &dev_priv->uncore;
3006
3007 if (HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP))
3008 return;
3009
3010 GEN3_IRQ_RESET(uncore, SDE)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0xc4004
) }), ((const i915_reg_t){ .reg = (0xc4008) }), ((const i915_reg_t
){ .reg = (0xc400c) }))
;
3011
3012 if (HAS_PCH_CPT(dev_priv)(((dev_priv)->pch_type) == PCH_CPT) || HAS_PCH_LPT(dev_priv)(((dev_priv)->pch_type) == PCH_LPT))
3013 intel_uncore_write(&dev_priv->uncore, SERR_INT((const i915_reg_t){ .reg = (0xc4040) }), 0xffffffff);
3014}
3015
3016static void vlv_display_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3017{
3018 struct intel_uncore *uncore = &dev_priv->uncore;
3019
3020 if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
3021 intel_uncore_write(uncore, DPINVGTT((const i915_reg_t){ .reg = (0x180000 + 0x7002c) }), DPINVGTT_STATUS_MASK_CHV((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) <<
(0))) + 0))
);
3022 else
3023 intel_uncore_write(uncore, DPINVGTT((const i915_reg_t){ .reg = (0x180000 + 0x7002c) }), DPINVGTT_STATUS_MASK_VLV((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) <<
(0))) + 0))
);
3024
3025 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3026 intel_uncore_write(uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
));
3027
3028 i9xx_pipestat_irq_reset(dev_priv);
3029
3030 GEN3_IRQ_RESET(uncore, VLV_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x180000
+ 0x20a8) }), ((const i915_reg_t){ .reg = (0x180000 + 0x20a4
) }), ((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }))
;
3031 dev_priv->irq_mask = ~0u;
3032}
3033
3034static void vlv_display_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3035{
3036 struct intel_uncore *uncore = &dev_priv->uncore;
3037
3038 u32 pipestat_mask;
3039 u32 enable_mask;
3040 enum pipe pipe;
3041
3042 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12);
3043
3044 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS(1UL << 11));
3045 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
3046 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3047
3048 enable_mask = I915_DISPLAY_PORT_INTERRUPT(1 << 17) |
3049 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
3050 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
3051 I915_LPE_PIPE_A_INTERRUPT(1 << 20) |
3052 I915_LPE_PIPE_B_INTERRUPT(1 << 21);
3053
3054 if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
3055 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT(1 << 9) |
3056 I915_LPE_PIPE_C_INTERRUPT(1 << 12);
3057
3058 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u)({ int __ret = !!((dev_priv->irq_mask != ~0u)); if (__ret)
printf("%s %s: " "%s", dev_driver_string(((&dev_priv->
drm))->dev), "", "drm_WARN_ON(" "dev_priv->irq_mask != ~0u"
")"); __builtin_expect(!!(__ret), 0); })
;
3059
3060 dev_priv->irq_mask = ~enable_mask;
3061
3062 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x180000
+ 0x20a8) }), dev_priv->irq_mask, ((const i915_reg_t){ .reg
= (0x180000 + 0x20a0) }), enable_mask, ((const i915_reg_t){ .
reg = (0x180000 + 0x20a4) }))
;
3063}
3064
3065/* drm_dma.h hooks
3066*/
3067static void ilk_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3068{
3069 struct intel_uncore *uncore = &dev_priv->uncore;
3070
3071 GEN3_IRQ_RESET(uncore, DE)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44004
) }), ((const i915_reg_t){ .reg = (0x44008) }), ((const i915_reg_t
){ .reg = (0x4400c) }))
;
3072 dev_priv->irq_mask = ~0u;
3073
3074 if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 7)
3075 intel_uncore_write(uncore, GEN7_ERR_INT((const i915_reg_t){ .reg = (0x44040) }), 0xffffffff);
3076
3077 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) {
3078 intel_uncore_write(uncore, EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) }), 0xffffffff);
3079 intel_uncore_write(uncore, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }), 0xffffffff);
3080 }
3081
3082 gen5_gt_irq_reset(to_gt(dev_priv));
3083
3084 ibx_irq_reset(dev_priv);
3085}
3086
3087static void valleyview_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3088{
3089 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER((const i915_reg_t){ .reg = (0x4400c) }), 0);
3090 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x4400c) })))
;
3091
3092 gen5_gt_irq_reset(to_gt(dev_priv));
3093
3094 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3095 if (dev_priv->display_irqs_enabled)
3096 vlv_display_irq_reset(dev_priv);
3097 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3098}
3099
3100static void gen8_display_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3101{
3102 struct intel_uncore *uncore = &dev_priv->uncore;
3103 enum pipe pipe;
3104
3105 if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0))
3106 return;
3107
3108 intel_uncore_write(uncore, EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) }), 0xffffffff);
3109 intel_uncore_write(uncore, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }), 0xffffffff);
3110
3111 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
3112 if (intel_display_power_is_enabled(dev_priv,
3113 POWER_DOMAIN_PIPE(pipe)((pipe) + POWER_DOMAIN_PIPE_A)))
3114 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe)({ unsigned int which_ = pipe; gen3_irq_reset((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x4440c + (0x10 * (which_))) })); })
;
3115
3116 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44444
) }), ((const i915_reg_t){ .reg = (0x44448) }), ((const i915_reg_t
){ .reg = (0x4444c) }))
;
3117 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44464
) }), ((const i915_reg_t){ .reg = (0x44468) }), ((const i915_reg_t
){ .reg = (0x4446c) }))
;
3118}
3119
3120static void gen8_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3121{
3122 struct intel_uncore *uncore = &dev_priv->uncore;
3123
3124 gen8_master_intr_disable(dev_priv->uncore.regs);
3125
3126 gen8_gt_irq_reset(to_gt(dev_priv));
3127 gen8_display_irq_reset(dev_priv);
3128 GEN3_IRQ_RESET(uncore, GEN8_PCU_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444e4
) }), ((const i915_reg_t){ .reg = (0x444e8) }), ((const i915_reg_t
){ .reg = (0x444ec) }))
;
3129
3130 if (HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE))
3131 ibx_irq_reset(dev_priv);
3132
3133}
3134
3135static void gen11_display_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3136{
3137 struct intel_uncore *uncore = &dev_priv->uncore;
3138 enum pipe pipe;
3139 u32 trans_mask = BIT(TRANSCODER_A)(1UL << (TRANSCODER_A)) | BIT(TRANSCODER_B)(1UL << (TRANSCODER_B)) |
3140 BIT(TRANSCODER_C)(1UL << (TRANSCODER_C)) | BIT(TRANSCODER_D)(1UL << (TRANSCODER_D));
3141
3142 if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0))
3143 return;
3144
3145 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL((const i915_reg_t){ .reg = (0x44200) }), 0);
3146
3147 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
3148 enum transcoder trans;
3149
3150 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask)for ((trans) = 0; (trans) < I915_MAX_TRANSCODERS; (trans)++
) if (!((&(dev_priv)->__runtime)->cpu_transcoder_mask
& (1UL << (trans)))) {} else if (!((trans_mask) &
(1UL << (trans)))) {} else
{
3151 enum intel_display_power_domain domain;
3152
3153 domain = POWER_DOMAIN_TRANSCODER(trans)((trans) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : (trans
) + POWER_DOMAIN_TRANSCODER_A)
;
3154 if (!intel_display_power_is_enabled(dev_priv, domain))
3155 continue;
3156
3157 intel_uncore_write(uncore, TRANS_PSR_IMR(trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(trans)] - (&(dev_priv)->__info)
->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->
__info)->display.mmio_offset) + (0x60814)) })
, 0xffffffff);
3158 intel_uncore_write(uncore, TRANS_PSR_IIR(trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(trans)] - (&(dev_priv)->__info)
->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->
__info)->display.mmio_offset) + (0x60818)) })
, 0xffffffff);
3159 }
3160 } else {
3161 intel_uncore_write(uncore, EDP_PSR_IMR((const i915_reg_t){ .reg = (0x64834) }), 0xffffffff);
3162 intel_uncore_write(uncore, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }), 0xffffffff);
3163 }
3164
3165 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
3166 if (intel_display_power_is_enabled(dev_priv,
3167 POWER_DOMAIN_PIPE(pipe)((pipe) + POWER_DOMAIN_PIPE_A)))
3168 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe)({ unsigned int which_ = pipe; gen3_irq_reset((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x4440c + (0x10 * (which_))) })); })
;
3169
3170 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44444
) }), ((const i915_reg_t){ .reg = (0x44448) }), ((const i915_reg_t
){ .reg = (0x4444c) }))
;
3171 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44464
) }), ((const i915_reg_t){ .reg = (0x44468) }), ((const i915_reg_t
){ .reg = (0x4446c) }))
;
3172 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x44474
) }), ((const i915_reg_t){ .reg = (0x44478) }), ((const i915_reg_t
){ .reg = (0x4447c) }))
;
3173
3174 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
3175 GEN3_IRQ_RESET(uncore, SDE)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0xc4004
) }), ((const i915_reg_t){ .reg = (0xc4008) }), ((const i915_reg_t
){ .reg = (0xc400c) }))
;
3176}
3177
3178static void gen11_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3179{
3180 struct intel_gt *gt = to_gt(dev_priv);
3181 struct intel_uncore *uncore = gt->uncore;
3182
3183 gen11_master_intr_disable(dev_priv->uncore.regs);
3184
3185 gen11_gt_irq_reset(gt);
3186 gen11_display_irq_reset(dev_priv);
3187
3188 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444f4
) }), ((const i915_reg_t){ .reg = (0x444f8) }), ((const i915_reg_t
){ .reg = (0x444fc) }))
;
3189 GEN3_IRQ_RESET(uncore, GEN8_PCU_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444e4
) }), ((const i915_reg_t){ .reg = (0x444e8) }), ((const i915_reg_t
){ .reg = (0x444ec) }))
;
3190}
3191
3192static void dg1_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3193{
3194 struct intel_gt *gt = to_gt(dev_priv);
3195 struct intel_uncore *uncore = gt->uncore;
3196
3197 dg1_master_intr_disable(dev_priv->uncore.regs);
3198
3199 gen11_gt_irq_reset(gt);
3200 gen11_display_irq_reset(dev_priv);
3201
3202 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444f4
) }), ((const i915_reg_t){ .reg = (0x444f8) }), ((const i915_reg_t
){ .reg = (0x444fc) }))
;
3203 GEN3_IRQ_RESET(uncore, GEN8_PCU_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444e4
) }), ((const i915_reg_t){ .reg = (0x444e8) }), ((const i915_reg_t
){ .reg = (0x444ec) }))
;
3204}
3205
3206void gen8_irq_power_well_post_enable(struct drm_i915_privateinteldrm_softc *dev_priv,
3207 u8 pipe_mask)
3208{
3209 struct intel_uncore *uncore = &dev_priv->uncore;
3210 u32 extra_ier = GEN8_PIPE_VBLANK(1 << 0) |
3211 gen8_de_pipe_underrun_mask(dev_priv) |
3212 gen8_de_pipe_flip_done_mask(dev_priv);
3213 enum pipe pipe;
3214
3215 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3216
3217 if (!intel_irqs_enabled(dev_priv)) {
3218 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3219 return;
3220 }
3221
3222 for_each_pipe_masked(dev_priv, pipe, pipe_mask)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else if (!((pipe_mask) & (1UL << (pipe
)))) {} else
3223 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), ~dev_priv->de_irq_mask[pipe] | extra_ier
, ((const i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }
)); })
3224 dev_priv->de_irq_mask[pipe],({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), ~dev_priv->de_irq_mask[pipe] | extra_ier
, ((const i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }
)); })
3225 ~dev_priv->de_irq_mask[pipe] | extra_ier)({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), ~dev_priv->de_irq_mask[pipe] | extra_ier
, ((const i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }
)); })
;
3226
3227 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3228}
3229
3230void gen8_irq_power_well_pre_disable(struct drm_i915_privateinteldrm_softc *dev_priv,
3231 u8 pipe_mask)
3232{
3233 struct intel_uncore *uncore = &dev_priv->uncore;
3234 enum pipe pipe;
3235
3236 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3237
3238 if (!intel_irqs_enabled(dev_priv)) {
3239 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3240 return;
3241 }
3242
3243 for_each_pipe_masked(dev_priv, pipe, pipe_mask)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else if (!((pipe_mask) & (1UL << (pipe
)))) {} else
3244 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe)({ unsigned int which_ = pipe; gen3_irq_reset((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x44408 + (0x10 * (which_))) }), ((const
i915_reg_t){ .reg = (0x4440c + (0x10 * (which_))) })); })
;
3245
3246 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3247
3248 /* make sure we're done processing display irqs */
3249 intel_synchronize_irq(dev_priv);
3250}
3251
3252static void cherryview_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3253{
3254 struct intel_uncore *uncore = &dev_priv->uncore;
3255
3256 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ((const i915_reg_t){ .reg = (0x44200) }), 0);
3257 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44200) })))
;
3258
3259 gen8_gt_irq_reset(to_gt(dev_priv));
3260
3261 GEN3_IRQ_RESET(uncore, GEN8_PCU_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x444e4
) }), ((const i915_reg_t){ .reg = (0x444e8) }), ((const i915_reg_t
){ .reg = (0x444ec) }))
;
3262
3263 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3264 if (dev_priv->display_irqs_enabled)
3265 vlv_display_irq_reset(dev_priv);
3266 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3267}
3268
3269static u32 ibx_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3270 enum hpd_pin pin)
3271{
3272 switch (pin) {
3273 case HPD_PORT_A:
3274 /*
3275 * When CPU and PCH are on the same package, port A
3276 * HPD must be enabled in both north and south.
3277 */
3278 return HAS_PCH_LPT_LP(i915)(((i915)->pch_id) == 0x9c00 || ((i915)->pch_id) == 0x9c80
)
?
3279 PORTA_HOTPLUG_ENABLE(1 << 28) : 0;
3280 case HPD_PORT_B:
3281 return PORTB_HOTPLUG_ENABLE(1 << 4) |
3282 PORTB_PULSE_DURATION_2ms(0 << 2);
3283 case HPD_PORT_C:
3284 return PORTC_HOTPLUG_ENABLE(1 << 12) |
3285 PORTC_PULSE_DURATION_2ms(0 << 10);
3286 case HPD_PORT_D:
3287 return PORTD_HOTPLUG_ENABLE(1 << 20) |
3288 PORTD_PULSE_DURATION_2ms(0 << 18);
3289 default:
3290 return 0;
3291 }
3292}
3293
3294static void ibx_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3295{
3296 u32 hotplug;
3297
3298 /*
3299 * Enable digital hotplug on the PCH, and configure the DP short pulse
3300 * duration to 2ms (which is the minimum in the Display Port spec).
3301 * The pulse duration bits are reserved on LPT+.
3302 */
3303 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
3304 hotplug &= ~(PORTA_HOTPLUG_ENABLE(1 << 28) |
3305 PORTB_HOTPLUG_ENABLE(1 << 4) |
3306 PORTC_HOTPLUG_ENABLE(1 << 12) |
3307 PORTD_HOTPLUG_ENABLE(1 << 20) |
3308 PORTB_PULSE_DURATION_MASK(3 << 2) |
3309 PORTC_PULSE_DURATION_MASK(3 << 10) |
3310 PORTD_PULSE_DURATION_MASK(3 << 18));
3311 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3312 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), hotplug);
3313}
3314
3315static void ibx_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3316{
3317 u32 hotplug_irqs, enabled_irqs;
3318
3319 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3320 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3321
3322 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3323
3324 ibx_hpd_detection_setup(dev_priv);
3325}
3326
3327static u32 icp_ddi_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3328 enum hpd_pin pin)
3329{
3330 switch (pin) {
3331 case HPD_PORT_A:
3332 case HPD_PORT_B:
3333 case HPD_PORT_C:
3334 case HPD_PORT_D:
3335 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin)(0x8 << (((pin) - HPD_PORT_A) * 4));
3336 default:
3337 return 0;
3338 }
3339}
3340
3341static u32 icp_tc_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3342 enum hpd_pin pin)
3343{
3344 switch (pin) {
3345 case HPD_PORT_TC1:
3346 case HPD_PORT_TC2:
3347 case HPD_PORT_TC3:
3348 case HPD_PORT_TC4:
3349 case HPD_PORT_TC5:
3350 case HPD_PORT_TC6:
3351 return ICP_TC_HPD_ENABLE(pin)(8 << (((pin) - HPD_PORT_TC1) * 4));
3352 default:
3353 return 0;
3354 }
3355}
3356
3357static void icp_ddi_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3358{
3359 u32 hotplug;
3360
3361 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI((const i915_reg_t){ .reg = (0xc4030) }));
3362 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A)(0x8 << (((HPD_PORT_A) - HPD_PORT_A) * 4)) |
3363 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B)(0x8 << (((HPD_PORT_B) - HPD_PORT_A) * 4)) |
3364 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C)(0x8 << (((HPD_PORT_C) - HPD_PORT_A) * 4)) |
3365 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)(0x8 << (((HPD_PORT_D) - HPD_PORT_A) * 4)));
3366 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3367 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI((const i915_reg_t){ .reg = (0xc4030) }), hotplug);
3368}
3369
3370static void icp_tc_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3371{
3372 u32 hotplug;
3373
3374 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC((const i915_reg_t){ .reg = (0xc4034) }));
3375 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1)(8 << (((HPD_PORT_TC1) - HPD_PORT_TC1) * 4)) |
3376 ICP_TC_HPD_ENABLE(HPD_PORT_TC2)(8 << (((HPD_PORT_TC2) - HPD_PORT_TC1) * 4)) |
3377 ICP_TC_HPD_ENABLE(HPD_PORT_TC3)(8 << (((HPD_PORT_TC3) - HPD_PORT_TC1) * 4)) |
3378 ICP_TC_HPD_ENABLE(HPD_PORT_TC4)(8 << (((HPD_PORT_TC4) - HPD_PORT_TC1) * 4)) |
3379 ICP_TC_HPD_ENABLE(HPD_PORT_TC5)(8 << (((HPD_PORT_TC5) - HPD_PORT_TC1) * 4)) |
3380 ICP_TC_HPD_ENABLE(HPD_PORT_TC6)(8 << (((HPD_PORT_TC6) - HPD_PORT_TC1) * 4)));
3381 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3382 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC((const i915_reg_t){ .reg = (0xc4034) }), hotplug);
3383}
3384
3385static void icp_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3386{
3387 u32 hotplug_irqs, enabled_irqs;
3388
3389 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3390 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3391
3392 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) <= PCH_TGP)
3393 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT((const i915_reg_t){ .reg = (0xc4038) }), SHPD_FILTER_CNT_500_ADJ0x001D9);
3394
3395 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3396
3397 icp_ddi_hpd_detection_setup(dev_priv);
3398 icp_tc_hpd_detection_setup(dev_priv);
3399}
3400
3401static u32 gen11_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3402 enum hpd_pin pin)
3403{
3404 switch (pin) {
3405 case HPD_PORT_TC1:
3406 case HPD_PORT_TC2:
3407 case HPD_PORT_TC3:
3408 case HPD_PORT_TC4:
3409 case HPD_PORT_TC5:
3410 case HPD_PORT_TC6:
3411 return GEN11_HOTPLUG_CTL_ENABLE(pin)(8 << (((pin) - HPD_PORT_TC1) * 4));
3412 default:
3413 return 0;
3414 }
3415}
3416
3417static void dg1_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3418{
3419 u32 val;
3420
3421 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }));
3422 val |= (INVERT_DDIA_HPD(1 << 15) |
3423 INVERT_DDIB_HPD(1 << 16) |
3424 INVERT_DDIC_HPD(1 << 17) |
3425 INVERT_DDID_HPD(1 << 18));
3426 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }), val);
3427
3428 icp_hpd_irq_setup(dev_priv);
3429}
3430
3431static void gen11_tc_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3432{
3433 u32 hotplug;
3434
3435 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44038) }));
3436 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1)(8 << (((HPD_PORT_TC1) - HPD_PORT_TC1) * 4)) |
3437 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2)(8 << (((HPD_PORT_TC2) - HPD_PORT_TC1) * 4)) |
3438 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3)(8 << (((HPD_PORT_TC3) - HPD_PORT_TC1) * 4)) |
3439 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4)(8 << (((HPD_PORT_TC4) - HPD_PORT_TC1) * 4)) |
3440 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5)(8 << (((HPD_PORT_TC5) - HPD_PORT_TC1) * 4)) |
3441 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)(8 << (((HPD_PORT_TC6) - HPD_PORT_TC1) * 4)));
3442 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3443 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44038) }), hotplug);
3444}
3445
3446static void gen11_tbt_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3447{
3448 u32 hotplug;
3449
3450 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44030) }));
3451 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1)(8 << (((HPD_PORT_TC1) - HPD_PORT_TC1) * 4)) |
3452 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2)(8 << (((HPD_PORT_TC2) - HPD_PORT_TC1) * 4)) |
3453 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3)(8 << (((HPD_PORT_TC3) - HPD_PORT_TC1) * 4)) |
3454 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4)(8 << (((HPD_PORT_TC4) - HPD_PORT_TC1) * 4)) |
3455 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5)(8 << (((HPD_PORT_TC5) - HPD_PORT_TC1) * 4)) |
3456 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)(8 << (((HPD_PORT_TC6) - HPD_PORT_TC1) * 4)));
3457 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3458 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL((const i915_reg_t){ .reg = (0x44030) }), hotplug);
3459}
3460
3461static void gen11_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3462{
3463 u32 hotplug_irqs, enabled_irqs;
3464 u32 val;
3465
3466 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3467 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3468
3469 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR((const i915_reg_t){ .reg = (0x44474) }));
3470 val &= ~hotplug_irqs;
3471 val |= ~enabled_irqs & hotplug_irqs;
3472 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR((const i915_reg_t){ .reg = (0x44474) }), val);
3473 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44474) })))
;
3474
3475 gen11_tc_hpd_detection_setup(dev_priv);
3476 gen11_tbt_hpd_detection_setup(dev_priv);
3477
3478 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
3479 icp_hpd_irq_setup(dev_priv);
3480}
3481
3482static u32 spt_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3483 enum hpd_pin pin)
3484{
3485 switch (pin) {
3486 case HPD_PORT_A:
3487 return PORTA_HOTPLUG_ENABLE(1 << 28);
3488 case HPD_PORT_B:
3489 return PORTB_HOTPLUG_ENABLE(1 << 4);
3490 case HPD_PORT_C:
3491 return PORTC_HOTPLUG_ENABLE(1 << 12);
3492 case HPD_PORT_D:
3493 return PORTD_HOTPLUG_ENABLE(1 << 20);
3494 default:
3495 return 0;
3496 }
3497}
3498
3499static u32 spt_hotplug2_enables(struct drm_i915_privateinteldrm_softc *i915,
3500 enum hpd_pin pin)
3501{
3502 switch (pin) {
3503 case HPD_PORT_E:
3504 return PORTE_HOTPLUG_ENABLE(1 << 4);
3505 default:
3506 return 0;
3507 }
3508}
3509
3510static void spt_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3511{
3512 u32 val, hotplug;
3513
3514 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3515 if (HAS_PCH_CNP(dev_priv)(((dev_priv)->pch_type) == PCH_CNP)) {
3516 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }));
3517 val &= ~CHASSIS_CLK_REQ_DURATION_MASK(0xf << 8);
3518 val |= CHASSIS_CLK_REQ_DURATION(0xf)((0xf) << 8);
3519 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1((const i915_reg_t){ .reg = (0xc2000) }), val);
3520 }
3521
3522 /* Enable digital hotplug on the PCH */
3523 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
3524 hotplug &= ~(PORTA_HOTPLUG_ENABLE(1 << 28) |
3525 PORTB_HOTPLUG_ENABLE(1 << 4) |
3526 PORTC_HOTPLUG_ENABLE(1 << 12) |
3527 PORTD_HOTPLUG_ENABLE(1 << 20));
3528 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3529 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), hotplug);
3530
3531 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2((const i915_reg_t){ .reg = (0xc403C) }));
3532 hotplug &= ~PORTE_HOTPLUG_ENABLE(1 << 4);
3533 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3534 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2((const i915_reg_t){ .reg = (0xc403C) }), hotplug);
3535}
3536
3537static void spt_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3538{
3539 u32 hotplug_irqs, enabled_irqs;
3540
3541 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_CNP)
3542 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT((const i915_reg_t){ .reg = (0xc4038) }), SHPD_FILTER_CNT_500_ADJ0x001D9);
3543
3544 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3545 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3546
3547 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3548
3549 spt_hpd_detection_setup(dev_priv);
3550}
3551
3552static u32 ilk_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3553 enum hpd_pin pin)
3554{
3555 switch (pin) {
3556 case HPD_PORT_A:
3557 return DIGITAL_PORTA_HOTPLUG_ENABLE(1 << 4) |
3558 DIGITAL_PORTA_PULSE_DURATION_2ms(0 << 2);
3559 default:
3560 return 0;
3561 }
3562}
3563
3564static void ilk_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3565{
3566 u32 hotplug;
3567
3568 /*
3569 * Enable digital hotplug on the CPU, and configure the DP short pulse
3570 * duration to 2ms (which is the minimum in the Display Port spec)
3571 * The pulse duration bits are reserved on HSW+.
3572 */
3573 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL((const i915_reg_t){ .reg = (0x44030) }));
3574 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE(1 << 4) |
3575 DIGITAL_PORTA_PULSE_DURATION_MASK(3 << 2));
3576 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3577 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL((const i915_reg_t){ .reg = (0x44030) }), hotplug);
3578}
3579
3580static void ilk_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3581{
3582 u32 hotplug_irqs, enabled_irqs;
3583
3584 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3585 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3586
3587 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 8)
3588 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3589 else
3590 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3591
3592 ilk_hpd_detection_setup(dev_priv);
3593
3594 ibx_hpd_irq_setup(dev_priv);
3595}
3596
3597static u32 bxt_hotplug_enables(struct drm_i915_privateinteldrm_softc *i915,
3598 enum hpd_pin pin)
3599{
3600 u32 hotplug;
3601
3602 switch (pin) {
3603 case HPD_PORT_A:
3604 hotplug = PORTA_HOTPLUG_ENABLE(1 << 28);
3605 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3606 hotplug |= BXT_DDIA_HPD_INVERT(1 << 27);
3607 return hotplug;
3608 case HPD_PORT_B:
3609 hotplug = PORTB_HOTPLUG_ENABLE(1 << 4);
3610 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3611 hotplug |= BXT_DDIB_HPD_INVERT(1 << 3);
3612 return hotplug;
3613 case HPD_PORT_C:
3614 hotplug = PORTC_HOTPLUG_ENABLE(1 << 12);
3615 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3616 hotplug |= BXT_DDIC_HPD_INVERT(1 << 11);
3617 return hotplug;
3618 default:
3619 return 0;
3620 }
3621}
3622
3623static void bxt_hpd_detection_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3624{
3625 u32 hotplug;
3626
3627 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }));
3628 hotplug &= ~(PORTA_HOTPLUG_ENABLE(1 << 28) |
3629 PORTB_HOTPLUG_ENABLE(1 << 4) |
3630 PORTC_HOTPLUG_ENABLE(1 << 12) |
3631 BXT_DDIA_HPD_INVERT(1 << 27) |
3632 BXT_DDIB_HPD_INVERT(1 << 3) |
3633 BXT_DDIC_HPD_INVERT(1 << 11));
3634 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3635 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG((const i915_reg_t){ .reg = (0xc4030) }), hotplug);
3636}
3637
3638static void bxt_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
3639{
3640 u32 hotplug_irqs, enabled_irqs;
3641
3642 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3643 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3644
3645 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3646
3647 bxt_hpd_detection_setup(dev_priv);
3648}
3649
3650/*
3651 * SDEIER is also touched by the interrupt handler to work around missed PCH
3652 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3653 * instead we unconditionally enable all PCH interrupt sources here, but then
3654 * only unmask them as needed with SDEIMR.
3655 *
3656 * Note that we currently do this after installing the interrupt handler,
3657 * but before we enable the master interrupt. That should be sufficient
3658 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3659 * interrupts could still race.
3660 */
3661static void ibx_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3662{
3663 struct intel_uncore *uncore = &dev_priv->uncore;
3664 u32 mask;
3665
3666 if (HAS_PCH_NOP(dev_priv)(((dev_priv)->pch_type) == PCH_NOP))
3667 return;
3668
3669 if (HAS_PCH_IBX(dev_priv)(((dev_priv)->pch_type) == PCH_IBX))
3670 mask = SDE_GMBUS(1 << 24) | SDE_AUX_MASK(7 << 13) | SDE_POISON(1 << 19);
3671 else if (HAS_PCH_CPT(dev_priv)(((dev_priv)->pch_type) == PCH_CPT) || HAS_PCH_LPT(dev_priv)(((dev_priv)->pch_type) == PCH_LPT))
3672 mask = SDE_GMBUS_CPT(1 << 17) | SDE_AUX_MASK_CPT(7 << 25);
3673 else
3674 mask = SDE_GMBUS_CPT(1 << 17);
3675
3676 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0xc4004)
}), ~mask, ((const i915_reg_t){ .reg = (0xc400c) }), 0xffffffff
, ((const i915_reg_t){ .reg = (0xc4008) }))
;
3677}
3678
3679static void ilk_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3680{
3681 struct intel_uncore *uncore = &dev_priv->uncore;
3682 u32 display_mask, extra_mask;
3683
3684 if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 7) {
3685 display_mask = (DE_MASTER_IRQ_CONTROL(1 << 31) | DE_GSE_IVB(1 << 29) |
3686 DE_PCH_EVENT_IVB(1 << 28) | DE_AUX_CHANNEL_A_IVB(1 << 26));
3687 extra_mask = (DE_PIPEC_VBLANK_IVB(1 << 10) | DE_PIPEB_VBLANK_IVB(1 << 5) |
3688 DE_PIPEA_VBLANK_IVB(1 << 0) | DE_ERR_INT_IVB(1 << 30) |
3689 DE_PLANE_FLIP_DONE_IVB(PLANE_C)(1 << (3 + 5 * (PLANE_C))) |
3690 DE_PLANE_FLIP_DONE_IVB(PLANE_B)(1 << (3 + 5 * (PLANE_B))) |
3691 DE_PLANE_FLIP_DONE_IVB(PLANE_A)(1 << (3 + 5 * (PLANE_A))) |
3692 DE_DP_A_HOTPLUG_IVB(1 << 27));
3693 } else {
3694 display_mask = (DE_MASTER_IRQ_CONTROL(1 << 31) | DE_GSE(1 << 18) | DE_PCH_EVENT(1 << 21) |
3695 DE_AUX_CHANNEL_A(1 << 20) | DE_PIPEB_CRC_DONE(1 << 10) |
3696 DE_PIPEA_CRC_DONE(1 << 2) | DE_POISON(1 << 23));
3697 extra_mask = (DE_PIPEA_VBLANK(1 << 7) | DE_PIPEB_VBLANK(1 << 15) |
3698 DE_PIPEB_FIFO_UNDERRUN(1 << 8) | DE_PIPEA_FIFO_UNDERRUN(1 << 0) |
3699 DE_PLANE_FLIP_DONE(PLANE_A)(1 << (26 + (PLANE_A))) |
3700 DE_PLANE_FLIP_DONE(PLANE_B)(1 << (26 + (PLANE_B))) |
3701 DE_DP_A_HOTPLUG(1 << 19));
3702 }
3703
3704 if (IS_HASWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_HASWELL)) {
3705 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }));
3706 display_mask |= DE_EDP_PSR_INT_HSW(1 << 19);
3707 }
3708
3709 if (IS_IRONLAKE_M(dev_priv)(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && ((&(dev_priv
)->__info)->is_mobile))
)
3710 extra_mask |= DE_PCU_EVENT(1 << 25);
3711
3712 dev_priv->irq_mask = ~display_mask;
3713
3714 ibx_irq_postinstall(dev_priv);
3715
3716 gen5_gt_irq_postinstall(to_gt(dev_priv));
3717
3718 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44004)
}), dev_priv->irq_mask, ((const i915_reg_t){ .reg = (0x4400c
) }), display_mask | extra_mask, ((const i915_reg_t){ .reg = (
0x44008) }))
3719 display_mask | extra_mask)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44004)
}), dev_priv->irq_mask, ((const i915_reg_t){ .reg = (0x4400c
) }), display_mask | extra_mask, ((const i915_reg_t){ .reg = (
0x44008) }))
;
3720}
3721
3722void valleyview_enable_display_irqs(struct drm_i915_privateinteldrm_softc *dev_priv)
3723{
3724 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
3725
3726 if (dev_priv->display_irqs_enabled)
3727 return;
3728
3729 dev_priv->display_irqs_enabled = true1;
3730
3731 if (intel_irqs_enabled(dev_priv)) {
3732 vlv_display_irq_reset(dev_priv);
3733 vlv_display_irq_postinstall(dev_priv);
3734 }
3735}
3736
3737void valleyview_disable_display_irqs(struct drm_i915_privateinteldrm_softc *dev_priv)
3738{
3739 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
3740
3741 if (!dev_priv->display_irqs_enabled)
3742 return;
3743
3744 dev_priv->display_irqs_enabled = false0;
3745
3746 if (intel_irqs_enabled(dev_priv))
3747 vlv_display_irq_reset(dev_priv);
3748}
3749
3750
3751static void valleyview_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3752{
3753 gen5_gt_irq_postinstall(to_gt(dev_priv));
3754
3755 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3756 if (dev_priv->display_irqs_enabled)
3757 vlv_display_irq_postinstall(dev_priv);
3758 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3759
3760 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER((const i915_reg_t){ .reg = (0x4400c) }), MASTER_INTERRUPT_ENABLE(1 << 31));
3761 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x4400c) })))
;
3762}
3763
3764static void gen8_de_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3765{
3766 struct intel_uncore *uncore = &dev_priv->uncore;
3767
3768 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3769 GEN8_PIPE_CDCLK_CRC_DONE(1 << 28);
3770 u32 de_pipe_enables;
3771 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3772 u32 de_port_enables;
3773 u32 de_misc_masked = GEN8_DE_EDP_PSR(1 << 19);
3774 u32 trans_mask = BIT(TRANSCODER_A)(1UL << (TRANSCODER_A)) | BIT(TRANSCODER_B)(1UL << (TRANSCODER_B)) |
3775 BIT(TRANSCODER_C)(1UL << (TRANSCODER_C)) | BIT(TRANSCODER_D)(1UL << (TRANSCODER_D));
3776 enum pipe pipe;
3777
3778 if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0))
3779 return;
3780
3781 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) <= 10)
3782 de_misc_masked |= GEN8_DE_MISC_GSE(1 << 27);
3783
3784 if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON))
3785 de_port_masked |= BXT_DE_PORT_GMBUS(1 << 1);
3786
3787 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11) {
3788 enum port port;
3789
3790 if (intel_bios_is_dsi_present(dev_priv, &port))
3791 de_port_masked |= DSI0_TE(1 << 23) | DSI1_TE(1 << 24);
3792 }
3793
3794 de_pipe_enables = de_pipe_masked |
3795 GEN8_PIPE_VBLANK(1 << 0) |
3796 gen8_de_pipe_underrun_mask(dev_priv) |
3797 gen8_de_pipe_flip_done_mask(dev_priv);
3798
3799 de_port_enables = de_port_masked;
3800 if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON))
3801 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK(((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0)
) | ((u32)((1UL << (3 + ((HPD_PORT_B) - HPD_PORT_A))) +
0)) | ((u32)((1UL << (3 + ((HPD_PORT_C) - HPD_PORT_A))
) + 0)))
;
3802 else if (IS_BROADWELL(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROADWELL))
3803 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK((u32)((1UL << (3 + ((HPD_PORT_A) - HPD_PORT_A))) + 0));
3804
3805 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 12) {
3806 enum transcoder trans;
3807
3808 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask)for ((trans) = 0; (trans) < I915_MAX_TRANSCODERS; (trans)++
) if (!((&(dev_priv)->__runtime)->cpu_transcoder_mask
& (1UL << (trans)))) {} else if (!((trans_mask) &
(1UL << (trans)))) {} else
{
3809 enum intel_display_power_domain domain;
3810
3811 domain = POWER_DOMAIN_TRANSCODER(trans)((trans) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : (trans
) + POWER_DOMAIN_TRANSCODER_A)
;
3812 if (!intel_display_power_is_enabled(dev_priv, domain))
3813 continue;
3814
3815 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)((const i915_reg_t){ .reg = ((&(dev_priv)->__info)->
display.trans_offsets[(trans)] - (&(dev_priv)->__info)
->display.trans_offsets[TRANSCODER_A] + ((&(dev_priv)->
__info)->display.mmio_offset) + (0x60818)) })
);
3816 }
3817 } else {
3818 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR((const i915_reg_t){ .reg = (0x64838) }));
3819 }
3820
3821 for_each_pipe(dev_priv, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!(
(&(dev_priv)->__runtime)->pipe_mask & (1UL <<
(pipe)))) {} else
{
3822 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3823
3824 if (intel_display_power_is_enabled(dev_priv,
3825 POWER_DOMAIN_PIPE(pipe)((pipe) + POWER_DOMAIN_PIPE_A)))
3826 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), de_pipe_enables, ((const i915_reg_t){
.reg = (0x44408 + (0x10 * (which_))) })); })
3827 dev_priv->de_irq_mask[pipe],({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), de_pipe_enables, ((const i915_reg_t){
.reg = (0x44408 + (0x10 * (which_))) })); })
3828 de_pipe_enables)({ unsigned int which_ = pipe; gen3_irq_init((uncore), ((const
i915_reg_t){ .reg = (0x44404 + (0x10 * (which_))) }), dev_priv
->de_irq_mask[pipe], ((const i915_reg_t){ .reg = (0x4440c +
(0x10 * (which_))) }), de_pipe_enables, ((const i915_reg_t){
.reg = (0x44408 + (0x10 * (which_))) })); })
;
3829 }
3830
3831 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44444)
}), ~de_port_masked, ((const i915_reg_t){ .reg = (0x4444c) }
), de_port_enables, ((const i915_reg_t){ .reg = (0x44448) }))
;
3832 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44464)
}), ~de_misc_masked, ((const i915_reg_t){ .reg = (0x4446c) }
), de_misc_masked, ((const i915_reg_t){ .reg = (0x44468) }))
;
3833
3834 if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11) {
3835 u32 de_hpd_masked = 0;
3836 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK(((u32)((1UL << (16 + ((HPD_PORT_TC6) - HPD_PORT_TC1)))
+ 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC5) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC4) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC3) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC2) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (16 + ((HPD_PORT_TC1) - HPD_PORT_TC1
))) + 0)))
|
3837 GEN11_DE_TBT_HOTPLUG_MASK(((u32)((1UL << (((HPD_PORT_TC6) - HPD_PORT_TC1))) + 0)
) | ((u32)((1UL << (((HPD_PORT_TC5) - HPD_PORT_TC1))) +
0)) | ((u32)((1UL << (((HPD_PORT_TC4) - HPD_PORT_TC1))
) + 0)) | ((u32)((1UL << (((HPD_PORT_TC3) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (((HPD_PORT_TC2) - HPD_PORT_TC1
))) + 0)) | ((u32)((1UL << (((HPD_PORT_TC1) - HPD_PORT_TC1
))) + 0)))
;
3838
3839 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44474)
}), ~de_hpd_masked, ((const i915_reg_t){ .reg = (0x4447c) })
, de_hpd_enables, ((const i915_reg_t){ .reg = (0x44478) }))
3840 de_hpd_enables)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x44474)
}), ~de_hpd_masked, ((const i915_reg_t){ .reg = (0x4447c) })
, de_hpd_enables, ((const i915_reg_t){ .reg = (0x44478) }))
;
3841 }
3842}
3843
3844static void icp_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3845{
3846 struct intel_uncore *uncore = &dev_priv->uncore;
3847 u32 mask = SDE_GMBUS_ICP(1 << 23);
3848
3849 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0xc4004)
}), ~mask, ((const i915_reg_t){ .reg = (0xc400c) }), 0xffffffff
, ((const i915_reg_t){ .reg = (0xc4008) }))
;
3850}
3851
3852static void gen8_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3853{
3854 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
3855 icp_irq_postinstall(dev_priv);
3856 else if (HAS_PCH_SPLIT(dev_priv)(((dev_priv)->pch_type) != PCH_NONE))
3857 ibx_irq_postinstall(dev_priv);
3858
3859 gen8_gt_irq_postinstall(to_gt(dev_priv));
3860 gen8_de_irq_postinstall(dev_priv);
3861
3862 gen8_master_intr_enable(dev_priv->uncore.regs);
3863}
3864
3865static void gen11_de_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3866{
3867 if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0))
3868 return;
3869
3870 gen8_de_irq_postinstall(dev_priv);
3871
3872 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL((const i915_reg_t){ .reg = (0x44200) }),
3873 GEN11_DISPLAY_IRQ_ENABLE(1 << 31));
3874}
3875
3876static void gen11_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3877{
3878 struct intel_gt *gt = to_gt(dev_priv);
3879 struct intel_uncore *uncore = gt->uncore;
3880 u32 gu_misc_masked = GEN11_GU_MISC_GSE(1 << 27);
3881
3882 if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
3883 icp_irq_postinstall(dev_priv);
3884
3885 gen11_gt_irq_postinstall(gt);
3886 gen11_de_irq_postinstall(dev_priv);
3887
3888 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x444f4)
}), ~gu_misc_masked, ((const i915_reg_t){ .reg = (0x444fc) }
), gu_misc_masked, ((const i915_reg_t){ .reg = (0x444f8) }))
;
3889
3890 gen11_master_intr_enable(uncore->regs);
3891 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x190010) })))
;
3892}
3893
3894static void dg1_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3895{
3896 struct intel_gt *gt = to_gt(dev_priv);
3897 struct intel_uncore *uncore = gt->uncore;
3898 u32 gu_misc_masked = GEN11_GU_MISC_GSE(1 << 27);
3899
3900 gen11_gt_irq_postinstall(gt);
3901
3902 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x444f4)
}), ~gu_misc_masked, ((const i915_reg_t){ .reg = (0x444fc) }
), gu_misc_masked, ((const i915_reg_t){ .reg = (0x444f8) }))
;
3903
3904 if (HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0)) {
3905 icp_irq_postinstall(dev_priv);
3906 gen8_de_irq_postinstall(dev_priv);
3907 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL((const i915_reg_t){ .reg = (0x44200) }),
3908 GEN11_DISPLAY_IRQ_ENABLE(1 << 31));
3909 }
3910
3911 dg1_master_intr_enable(uncore->regs);
3912 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR)((void)intel_uncore_read_notrace(uncore, ((const i915_reg_t){
.reg = (0x190008) })))
;
3913}
3914
3915static void cherryview_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3916{
3917 gen8_gt_irq_postinstall(to_gt(dev_priv));
3918
3919 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3920 if (dev_priv->display_irqs_enabled)
3921 vlv_display_irq_postinstall(dev_priv);
3922 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3923
3924 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ((const i915_reg_t){ .reg = (0x44200) }), GEN8_MASTER_IRQ_CONTROL(1 << 31));
3925 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ)((void)intel_uncore_read_notrace(&dev_priv->uncore, ((
const i915_reg_t){ .reg = (0x44200) })))
;
3926}
3927
3928static void i8xx_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
3929{
3930 struct intel_uncore *uncore = &dev_priv->uncore;
3931
3932 i9xx_pipestat_irq_reset(dev_priv);
3933
3934 GEN2_IRQ_RESET(uncore)gen2_irq_reset(uncore);
3935 dev_priv->irq_mask = ~0u;
3936}
3937
3938static void i8xx_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
3939{
3940 struct intel_uncore *uncore = &dev_priv->uncore;
3941 u16 enable_mask;
3942
3943 intel_uncore_write16(uncore,
3944 EMR((const i915_reg_t){ .reg = (0x20b4) }),
3945 ~(I915_ERROR_PAGE_TABLE(1 << 4) |
3946 I915_ERROR_MEMORY_REFRESH(1 << 1)));
3947
3948 /* Unmask the interrupts that we always want on. */
3949 dev_priv->irq_mask =
3950 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
3951 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
3952 I915_MASTER_ERROR_INTERRUPT(1 << 15));
3953
3954 enable_mask =
3955 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
3956 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
3957 I915_MASTER_ERROR_INTERRUPT(1 << 15) |
3958 I915_USER_INTERRUPT(1 << 1);
3959
3960 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask)gen2_irq_init((uncore), dev_priv->irq_mask, enable_mask);
3961
3962 /* Interrupt setup is already guaranteed to be single-threaded, this is
3963 * just to make the assert_spin_locked check happy. */
3964 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
3965 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
3966 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
3967 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
3968}
3969
3970static void i8xx_error_irq_ack(struct drm_i915_privateinteldrm_softc *i915,
3971 u16 *eir, u16 *eir_stuck)
3972{
3973 struct intel_uncore *uncore = &i915->uncore;
3974 u16 emr;
3975
3976 *eir = intel_uncore_read16(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
3977
3978 if (*eir)
3979 intel_uncore_write16(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }), *eir);
3980
3981 *eir_stuck = intel_uncore_read16(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
3982 if (*eir_stuck == 0)
3983 return;
3984
3985 /*
3986 * Toggle all EMR bits to make sure we get an edge
3987 * in the ISR master error bit if we don't clear
3988 * all the EIR bits. Otherwise the edge triggered
3989 * IIR on i965/g4x wouldn't notice that an interrupt
3990 * is still pending. Also some EIR bits can't be
3991 * cleared except by handling the underlying error
3992 * (or by a GPU reset) so we mask any bit that
3993 * remains set.
3994 */
3995 emr = intel_uncore_read16(uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }));
3996 intel_uncore_write16(uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), 0xffff);
3997 intel_uncore_write16(uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), emr | *eir_stuck);
3998}
3999
4000static void i8xx_error_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
4001 u16 eir, u16 eir_stuck)
4002{
4003 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir)___drm_dbg(((void *)0), DRM_UT_CORE, "Master Error: EIR 0x%04x\n"
, eir)
;
4004
4005 if (eir_stuck)
4006 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "EIR stuck: 0x%04x, masked\n"
, eir_stuck)
4007 eir_stuck)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "EIR stuck: 0x%04x, masked\n"
, eir_stuck)
;
4008}
4009
4010static void i9xx_error_irq_ack(struct drm_i915_privateinteldrm_softc *dev_priv,
4011 u32 *eir, u32 *eir_stuck)
4012{
4013 u32 emr;
4014
4015 *eir = intel_uncore_read(&dev_priv->uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
4016
4017 intel_uncore_write(&dev_priv->uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }), *eir);
4018
4019 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
4020 if (*eir_stuck == 0)
4021 return;
4022
4023 /*
4024 * Toggle all EMR bits to make sure we get an edge
4025 * in the ISR master error bit if we don't clear
4026 * all the EIR bits. Otherwise the edge triggered
4027 * IIR on i965/g4x wouldn't notice that an interrupt
4028 * is still pending. Also some EIR bits can't be
4029 * cleared except by handling the underlying error
4030 * (or by a GPU reset) so we mask any bit that
4031 * remains set.
4032 */
4033 emr = intel_uncore_read(&dev_priv->uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }));
4034 intel_uncore_write(&dev_priv->uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), 0xffffffff);
4035 intel_uncore_write(&dev_priv->uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), emr | *eir_stuck);
4036}
4037
4038static void i9xx_error_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv,
4039 u32 eir, u32 eir_stuck)
4040{
4041 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir)___drm_dbg(((void *)0), DRM_UT_CORE, "Master Error, EIR 0x%08x\n"
, eir)
;
4042
4043 if (eir_stuck)
4044 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "EIR stuck: 0x%08x, masked\n"
, eir_stuck)
4045 eir_stuck)__drm_dev_dbg(((void *)0), (&dev_priv->drm) ? (&dev_priv
->drm)->dev : ((void *)0), DRM_UT_DRIVER, "EIR stuck: 0x%08x, masked\n"
, eir_stuck)
;
4046}
4047
4048static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4049{
4050 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
4051 irqreturn_t ret = IRQ_NONE;
4052
4053 if (!intel_irqs_enabled(dev_priv))
4054 return IRQ_NONE;
4055
4056 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4057 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4058
4059 do {
4060 u32 pipe_stats[I915_MAX_PIPES] = {};
4061 u16 eir = 0, eir_stuck = 0;
4062 u16 iir;
4063
4064 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }));
4065 if (iir == 0)
4066 break;
4067
4068 ret = IRQ_HANDLED;
4069
4070 /* Call regardless, as some status bits might not be
4071 * signalled in iir */
4072 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4073
4074 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4075 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4076
4077 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), iir);
4078
4079 if (iir & I915_USER_INTERRUPT(1 << 1))
4080 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4081
4082 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4083 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4084
4085 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4086 } while (0);
4087
4088 pmu_irq_stats(dev_priv, ret);
4089
4090 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4091
4092 return ret;
4093}
4094
4095static void i915_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
4096{
4097 struct intel_uncore *uncore = &dev_priv->uncore;
4098
4099 if (I915_HAS_HOTPLUG(dev_priv)((&(dev_priv)->__info)->display.has_hotplug)) {
4100 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4101 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
));
4102 }
4103
4104 i9xx_pipestat_irq_reset(dev_priv);
4105
4106 GEN3_IRQ_RESET(uncore, GEN2_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x20a8)
}), ((const i915_reg_t){ .reg = (0x20a4) }), ((const i915_reg_t
){ .reg = (0x20a0) }))
;
4107 dev_priv->irq_mask = ~0u;
4108}
4109
4110static void i915_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
4111{
4112 struct intel_uncore *uncore = &dev_priv->uncore;
4113 u32 enable_mask;
4114
4115 intel_uncore_write(&dev_priv->uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), ~(I915_ERROR_PAGE_TABLE(1 << 4) |
4116 I915_ERROR_MEMORY_REFRESH(1 << 1)));
4117
4118 /* Unmask the interrupts that we always want on. */
4119 dev_priv->irq_mask =
4120 ~(I915_ASLE_INTERRUPT(1 << 0) |
4121 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
4122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
4123 I915_MASTER_ERROR_INTERRUPT(1 << 15));
4124
4125 enable_mask =
4126 I915_ASLE_INTERRUPT(1 << 0) |
4127 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
4128 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
4129 I915_MASTER_ERROR_INTERRUPT(1 << 15) |
4130 I915_USER_INTERRUPT(1 << 1);
4131
4132 if (I915_HAS_HOTPLUG(dev_priv)((&(dev_priv)->__info)->display.has_hotplug)) {
4133 /* Enable in IER... */
4134 enable_mask |= I915_DISPLAY_PORT_INTERRUPT(1 << 17);
4135 /* and unmask in IMR */
4136 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT(1 << 17);
4137 }
4138
4139 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x20a8) }
), dev_priv->irq_mask, ((const i915_reg_t){ .reg = (0x20a0
) }), enable_mask, ((const i915_reg_t){ .reg = (0x20a4) }))
;
4140
4141 /* Interrupt setup is already guaranteed to be single-threaded, this is
4142 * just to make the assert_spin_locked check happy. */
4143 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
4144 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
4145 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
4146 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
4147
4148 i915_enable_asle_pipestat(dev_priv);
4149}
4150
4151static irqreturn_t i915_irq_handler(int irq, void *arg)
4152{
4153 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
4154 irqreturn_t ret = IRQ_NONE;
4155
4156 if (!intel_irqs_enabled(dev_priv))
4157 return IRQ_NONE;
4158
4159 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4160 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4161
4162 do {
4163 u32 pipe_stats[I915_MAX_PIPES] = {};
4164 u32 eir = 0, eir_stuck = 0;
4165 u32 hotplug_status = 0;
4166 u32 iir;
4167
4168 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }));
4169 if (iir == 0)
4170 break;
4171
4172 ret = IRQ_HANDLED;
4173
4174 if (I915_HAS_HOTPLUG(dev_priv)((&(dev_priv)->__info)->display.has_hotplug) &&
4175 iir & I915_DISPLAY_PORT_INTERRUPT(1 << 17))
4176 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4177
4178 /* Call regardless, as some status bits might not be
4179 * signalled in iir */
4180 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4181
4182 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4183 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4184
4185 intel_uncore_write(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), iir);
4186
4187 if (iir & I915_USER_INTERRUPT(1 << 1))
4188 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4189
4190 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4191 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4192
4193 if (hotplug_status)
4194 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4195
4196 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4197 } while (0);
4198
4199 pmu_irq_stats(dev_priv, ret);
4200
4201 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4202
4203 return ret;
4204}
4205
4206static void i965_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
4207{
4208 struct intel_uncore *uncore = &dev_priv->uncore;
4209
4210 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4211 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT((const i915_reg_t){ .reg = (((&(dev_priv)->__info)->
display.mmio_offset) + 0x61114) })
));
4212
4213 i9xx_pipestat_irq_reset(dev_priv);
4214
4215 GEN3_IRQ_RESET(uncore, GEN2_)gen3_irq_reset((uncore), ((const i915_reg_t){ .reg = (0x20a8)
}), ((const i915_reg_t){ .reg = (0x20a4) }), ((const i915_reg_t
){ .reg = (0x20a0) }))
;
4216 dev_priv->irq_mask = ~0u;
4217}
4218
4219static void i965_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
4220{
4221 struct intel_uncore *uncore = &dev_priv->uncore;
4222 u32 enable_mask;
4223 u32 error_mask;
4224
4225 /*
4226 * Enable some error detection, note the instruction error mask
4227 * bit is reserved, so we leave it masked.
4228 */
4229 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
) {
4230 error_mask = ~(GM45_ERROR_PAGE_TABLE(1 << 5) |
4231 GM45_ERROR_MEM_PRIV(1 << 4) |
4232 GM45_ERROR_CP_PRIV(1 << 3) |
4233 I915_ERROR_MEMORY_REFRESH(1 << 1));
4234 } else {
4235 error_mask = ~(I915_ERROR_PAGE_TABLE(1 << 4) |
4236 I915_ERROR_MEMORY_REFRESH(1 << 1));
4237 }
4238 intel_uncore_write(&dev_priv->uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), error_mask);
4239
4240 /* Unmask the interrupts that we always want on. */
4241 dev_priv->irq_mask =
4242 ~(I915_ASLE_INTERRUPT(1 << 0) |
4243 I915_DISPLAY_PORT_INTERRUPT(1 << 17) |
4244 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
4245 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
4246 I915_MASTER_ERROR_INTERRUPT(1 << 15));
4247
4248 enable_mask =
4249 I915_ASLE_INTERRUPT(1 << 0) |
4250 I915_DISPLAY_PORT_INTERRUPT(1 << 17) |
4251 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT(1 << 6) |
4252 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT(1 << 4) |
4253 I915_MASTER_ERROR_INTERRUPT(1 << 15) |
4254 I915_USER_INTERRUPT(1 << 1);
4255
4256 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
)
4257 enable_mask |= I915_BSD_USER_INTERRUPT(1 << 25);
4258
4259 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask)gen3_irq_init((uncore), ((const i915_reg_t){ .reg = (0x20a8) }
), dev_priv->irq_mask, ((const i915_reg_t){ .reg = (0x20a0
) }), enable_mask, ((const i915_reg_t){ .reg = (0x20a4) }))
;
4260
4261 /* Interrupt setup is already guaranteed to be single-threaded, this is
4262 * just to make the assert_spin_locked check happy. */
4263 spin_lock_irq(&dev_priv->irq_lock)mtx_enter(&dev_priv->irq_lock);
4264 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS(1UL << 11));
4265 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
4266 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS(1UL << 12));
4267 spin_unlock_irq(&dev_priv->irq_lock)mtx_leave(&dev_priv->irq_lock);
4268
4269 i915_enable_asle_pipestat(dev_priv);
4270}
4271
4272static void i915_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *dev_priv)
4273{
4274 u32 hotplug_en;
4275
4276 lockdep_assert_held(&dev_priv->irq_lock)do { (void)(&dev_priv->irq_lock); } while(0);
4277
4278 /* Note HDMI and DP share hotplug bits */
4279 /* enable bits are the same for all generations */
4280 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4281 /* Programming the CRT detection parameters tends
4282 to generate a spurious hotplug event about three
4283 seconds later. So just do it once.
4284 */
4285 if (IS_G4X(dev_priv)(IS_PLATFORM(dev_priv, INTEL_G45) || IS_PLATFORM(dev_priv, INTEL_GM45
))
)
4286 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64(1 << 8);
4287 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50(1 << 5);
4288
4289 /* Ignore TV since it's buggy */
4290 i915_hotplug_interrupt_update_locked(dev_priv,
4291 HOTPLUG_INT_EN_MASK((1 << 29) | (1 << 28) | (1 << 27) | (1 <<
25) | (1 << 26) | (1 << 9))
|
4292 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK(3 << 5) |
4293 CRT_HOTPLUG_ACTIVATION_PERIOD_64(1 << 8),
4294 hotplug_en);
4295}
4296
4297static irqreturn_t i965_irq_handler(int irq, void *arg)
4298{
4299 struct drm_i915_privateinteldrm_softc *dev_priv = arg;
4300 irqreturn_t ret = IRQ_NONE;
4301
4302 if (!intel_irqs_enabled(dev_priv))
4303 return IRQ_NONE;
4304
4305 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4306 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4307
4308 do {
4309 u32 pipe_stats[I915_MAX_PIPES] = {};
4310 u32 eir = 0, eir_stuck = 0;
4311 u32 hotplug_status = 0;
4312 u32 iir;
4313
4314 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }));
4315 if (iir == 0)
4316 break;
4317
4318 ret = IRQ_HANDLED;
4319
4320 if (iir & I915_DISPLAY_PORT_INTERRUPT(1 << 17))
4321 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4322
4323 /* Call regardless, as some status bits might not be
4324 * signalled in iir */
4325 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4326
4327 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4328 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4329
4330 intel_uncore_write(&dev_priv->uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }), iir);
4331
4332 if (iir & I915_USER_INTERRUPT(1 << 1))
4333 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4334 iir);
4335
4336 if (iir & I915_BSD_USER_INTERRUPT(1 << 25))
4337 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4338 iir >> 25);
4339
4340 if (iir & I915_MASTER_ERROR_INTERRUPT(1 << 15))
4341 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4342
4343 if (hotplug_status)
4344 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4345
4346 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4347 } while (0);
4348
4349 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4350
4351 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4352
4353 return ret;
4354}
4355
4356struct intel_hotplug_funcs {
4357 void (*hpd_irq_setup)(struct drm_i915_privateinteldrm_softc *i915);
4358};
4359
4360#define HPD_FUNCS(platform) \
4361static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4362 .hpd_irq_setup = platform##_hpd_irq_setup, \
4363}
4364
4365HPD_FUNCS(i915);
4366HPD_FUNCS(dg1);
4367HPD_FUNCS(gen11);
4368HPD_FUNCS(bxt);
4369HPD_FUNCS(icp);
4370HPD_FUNCS(spt);
4371HPD_FUNCS(ilk);
4372#undef HPD_FUNCS
4373
4374void intel_hpd_irq_setup(struct drm_i915_privateinteldrm_softc *i915)
4375{
4376 if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4377 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4378}
4379
4380/**
4381 * intel_irq_init - initializes irq support
4382 * @dev_priv: i915 device instance
4383 *
4384 * This function initializes all the irq support including work items, timers
4385 * and all the vtables. It does not setup the interrupt itself though.
4386 */
4387void intel_irq_init(struct drm_i915_privateinteldrm_softc *dev_priv)
4388{
4389 struct drm_device *dev = &dev_priv->drm;
4390 int i;
4391
4392 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4393 for (i = 0; i < MAX_L3_SLICES2; ++i)
4394 dev_priv->l3_parity.remap_info[i] = NULL((void *)0);
4395
4396 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4397 if (HAS_GT_UC(dev_priv)((&(dev_priv)->__info)->has_gt_uc) && GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) < 11)
4398 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST(1UL << (15)) << 16;
4399
4400 if (!HAS_DISPLAY(dev_priv)((&(dev_priv)->__runtime)->pipe_mask != 0))
4401 return;
4402
4403 intel_hpd_init_pins(dev_priv);
4404
4405 intel_hpd_init_work(dev_priv);
4406
4407 dev->vblank_disable_immediate = true1;
4408
4409 /* Most platforms treat the display irq block as an always-on
4410 * power domain. vlv/chv can disable it at runtime and need
4411 * special care to avoid writing any of the display block registers
4412 * outside of the power domain. We defer setting up the display irqs
4413 * in this case to the runtime pm.
4414 */
4415 dev_priv->display_irqs_enabled = true1;
4416 if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
4417 dev_priv->display_irqs_enabled = false0;
4418
4419 dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD50;
4420 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4421 * detection, as short HPD storms will occur as a natural part of
4422 * sideband messaging with MST.
4423 * On older platforms however, IRQ storms can occur with both long and
4424 * short pulses, as seen on some G4x systems.
4425 */
4426 dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv)((&(dev_priv)->__info)->display.has_dp_mst);
4427
4428 if (HAS_GMCH(dev_priv)((&(dev_priv)->__info)->display.has_gmch)) {
4429 if (I915_HAS_HOTPLUG(dev_priv)((&(dev_priv)->__info)->display.has_hotplug))
4430 dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4431 } else {
4432 if (HAS_PCH_DG2(dev_priv)(((dev_priv)->pch_type) == PCH_DG2))
4433 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4434 else if (HAS_PCH_DG1(dev_priv)(((dev_priv)->pch_type) == PCH_DG1))
4435 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4436 else if (DISPLAY_VER(dev_priv)((&(dev_priv)->__runtime)->display.ip.ver) >= 11)
4437 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4438 else if (IS_GEMINILAKE(dev_priv)IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) || IS_BROXTON(dev_priv)IS_PLATFORM(dev_priv, INTEL_BROXTON))
4439 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4440 else if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_ICP)
4441 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4442 else if (INTEL_PCH_TYPE(dev_priv)((dev_priv)->pch_type) >= PCH_SPT)
4443 dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4444 else
4445 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4446 }
4447}
4448
4449/**
4450 * intel_irq_fini - deinitializes IRQ support
4451 * @i915: i915 device instance
4452 *
4453 * This function deinitializes all the IRQ support.
4454 */
4455void intel_irq_fini(struct drm_i915_privateinteldrm_softc *i915)
4456{
4457 int i;
4458
4459 for (i = 0; i < MAX_L3_SLICES2; ++i)
4460 kfree(i915->l3_parity.remap_info[i]);
4461}
4462
4463static irq_handler_t intel_irq_handler(struct drm_i915_privateinteldrm_softc *dev_priv)
4464{
4465 if (HAS_GMCH(dev_priv)((&(dev_priv)->__info)->display.has_gmch)) {
4466 if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
4467 return cherryview_irq_handler;
4468 else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW))
4469 return valleyview_irq_handler;
4470 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 4)
4471 return i965_irq_handler;
4472 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 3)
4473 return i915_irq_handler;
4474 else
4475 return i8xx_irq_handler;
4476 } else {
4477 if (GRAPHICS_VER_FULL(dev_priv)(((&(dev_priv)->__runtime)->graphics.ip.ver) <<
8 | ((&(dev_priv)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 10)((12) << 8 | (10)))
4478 return dg1_irq_handler;
4479 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 11)
4480 return gen11_irq_handler;
4481 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 8)
4482 return gen8_irq_handler;
4483 else
4484 return ilk_irq_handler;
4485 }
4486}
4487
4488static void intel_irq_reset(struct drm_i915_privateinteldrm_softc *dev_priv)
4489{
4490 if (HAS_GMCH(dev_priv)((&(dev_priv)->__info)->display.has_gmch)) {
4491 if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
4492 cherryview_irq_reset(dev_priv);
4493 else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW))
4494 valleyview_irq_reset(dev_priv);
4495 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 4)
4496 i965_irq_reset(dev_priv);
4497 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 3)
4498 i915_irq_reset(dev_priv);
4499 else
4500 i8xx_irq_reset(dev_priv);
4501 } else {
4502 if (GRAPHICS_VER_FULL(dev_priv)(((&(dev_priv)->__runtime)->graphics.ip.ver) <<
8 | ((&(dev_priv)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 10)((12) << 8 | (10)))
4503 dg1_irq_reset(dev_priv);
4504 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 11)
4505 gen11_irq_reset(dev_priv);
4506 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 8)
4507 gen8_irq_reset(dev_priv);
4508 else
4509 ilk_irq_reset(dev_priv);
4510 }
4511}
4512
4513static void intel_irq_postinstall(struct drm_i915_privateinteldrm_softc *dev_priv)
4514{
4515 if (HAS_GMCH(dev_priv)((&(dev_priv)->__info)->display.has_gmch)) {
4516 if (IS_CHERRYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW))
4517 cherryview_irq_postinstall(dev_priv);
4518 else if (IS_VALLEYVIEW(dev_priv)IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW))
4519 valleyview_irq_postinstall(dev_priv);
4520 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 4)
4521 i965_irq_postinstall(dev_priv);
4522 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) == 3)
4523 i915_irq_postinstall(dev_priv);
4524 else
4525 i8xx_irq_postinstall(dev_priv);
4526 } else {
4527 if (GRAPHICS_VER_FULL(dev_priv)(((&(dev_priv)->__runtime)->graphics.ip.ver) <<
8 | ((&(dev_priv)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 10)((12) << 8 | (10)))
4528 dg1_irq_postinstall(dev_priv);
4529 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 11)
4530 gen11_irq_postinstall(dev_priv);
4531 else if (GRAPHICS_VER(dev_priv)((&(dev_priv)->__runtime)->graphics.ip.ver) >= 8)
4532 gen8_irq_postinstall(dev_priv);
4533 else
4534 ilk_irq_postinstall(dev_priv);
4535 }
4536}
4537
4538/**
4539 * intel_irq_install - enables the hardware interrupt
4540 * @dev_priv: i915 device instance
4541 *
4542 * This function enables the hardware interrupt handling, but leaves the hotplug
4543 * handling still disabled. It is called after intel_irq_init().
4544 *
4545 * In the driver load and resume code we need working interrupts in a few places
4546 * but don't want to deal with the hassle of concurrent probe and hotplug
4547 * workers. Hence the split into this two-stage approach.
4548 */
4549int intel_irq_install(struct drm_i915_privateinteldrm_softc *dev_priv)
4550{
4551 int irq = dev_priv->drm.pdev->irq;
Value stored to 'irq' during its initialization is never read
4552 int ret;
4553
4554 /*
4555 * We enable some interrupt sources in our postinstall hooks, so mark
4556 * interrupts as enabled _before_ actually enabling them to avoid
4557 * special cases in our ordering checks.
4558 */
4559 dev_priv->runtime_pm.irqs_enabled = true1;
4560
4561 dev_priv->irq_enabled = true1;
4562
4563 intel_irq_reset(dev_priv);
4564
4565 ret = request_irq(irq, intel_irq_handler(dev_priv),(0)
4566 IRQF_SHARED, DRIVER_NAME, dev_priv)(0);
4567 if (ret < 0) {
4568 dev_priv->irq_enabled = false0;
4569 return ret;
4570 }
4571#ifdef __OpenBSD__1
4572 dev_priv->irq_handler = intel_irq_handler(dev_priv);
4573#endif
4574
4575 intel_irq_postinstall(dev_priv);
4576
4577 return ret;
4578}
4579
4580/**
4581 * intel_irq_uninstall - finilizes all irq handling
4582 * @dev_priv: i915 device instance
4583 *
4584 * This stops interrupt and hotplug handling and unregisters and frees all
4585 * resources acquired in the init functions.
4586 */
4587void intel_irq_uninstall(struct drm_i915_privateinteldrm_softc *dev_priv)
4588{
4589 int irq = dev_priv->drm.pdev->irq;
4590
4591 /*
4592 * FIXME we can get called twice during driver probe
4593 * error handling as well as during driver remove due to
4594 * intel_modeset_driver_remove() calling us out of sequence.
4595 * Would be nice if it didn't do that...
4596 */
4597 if (!dev_priv->irq_enabled)
4598 return;
4599
4600 dev_priv->irq_enabled = false0;
4601
4602 intel_irq_reset(dev_priv);
4603
4604 free_irq(irq, dev_priv);
4605#ifdef __OpenBSD__1
4606 dev_priv->irq_handler = NULL((void *)0);
4607#endif
4608
4609 intel_hpd_cancel_work(dev_priv);
4610 dev_priv->runtime_pm.irqs_enabled = false0;
4611}
4612
4613/**
4614 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4615 * @dev_priv: i915 device instance
4616 *
4617 * This function is used to disable interrupts at runtime, both in the runtime
4618 * pm and the system suspend/resume code.
4619 */
4620void intel_runtime_pm_disable_interrupts(struct drm_i915_privateinteldrm_softc *dev_priv)
4621{
4622 intel_irq_reset(dev_priv);
4623 dev_priv->runtime_pm.irqs_enabled = false0;
4624 intel_synchronize_irq(dev_priv);
4625}
4626
4627/**
4628 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4629 * @dev_priv: i915 device instance
4630 *
4631 * This function is used to enable interrupts at runtime, both in the runtime
4632 * pm and the system suspend/resume code.
4633 */
4634void intel_runtime_pm_enable_interrupts(struct drm_i915_privateinteldrm_softc *dev_priv)
4635{
4636 dev_priv->runtime_pm.irqs_enabled = true1;
4637 intel_irq_reset(dev_priv);
4638 intel_irq_postinstall(dev_priv);
4639}
4640
4641bool_Bool intel_irqs_enabled(struct drm_i915_privateinteldrm_softc *dev_priv)
4642{
4643 return dev_priv->runtime_pm.irqs_enabled;
4644}
4645
4646void intel_synchronize_irq(struct drm_i915_privateinteldrm_softc *i915)
4647{
4648#ifdef __linux__
4649 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4650#else
4651 intr_barrier(i915->irqh);
4652#endif
4653}
4654
4655void intel_synchronize_hardirq(struct drm_i915_privateinteldrm_softc *i915)
4656{
4657#ifdef __linux__
4658 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4659#else
4660 intr_barrier(i915->irqh);
4661#endif
4662}