Bug Summary

File:dev/pci/drm/i915/intel_uncore.c
Warning:line 2306, column 3
Value stored to 'mmio_size' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name intel_uncore.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/intel_uncore.c
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <drm/drm_managed.h>
25#include <linux/pm_runtime.h>
26
27#include "gt/intel_engine_regs.h"
28#include "gt/intel_gt_regs.h"
29
30#include "i915_drv.h"
31#include "i915_iosf_mbi.h"
32#include "i915_trace.h"
33#include "i915_vgpu.h"
34#include "intel_pm.h"
35
36#define FORCEWAKE_ACK_TIMEOUT_MS50 50
37#define GT_FIFO_TIMEOUT_MS10 10
38
39#define __raw_posting_read(...)((void)__raw_uncore_read32(...)) ((void)__raw_uncore_read32(__VA_ARGS__))
40
41static void
42fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
43{
44 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
45}
46
47void
48intel_uncore_mmio_debug_init_early(struct drm_i915_privateinteldrm_softc *i915)
49{
50 mtx_init(&i915->mmio_debug.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&i915->
mmio_debug.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9
) ? 0x9 : ((0x9)))); } while (0)
;
51 i915->mmio_debug.unclaimed_mmio_check = 1;
52
53 i915->uncore.debug = &i915->mmio_debug;
54}
55
56static void mmio_debug_suspend(struct intel_uncore *uncore)
57{
58 if (!uncore->debug)
59 return;
60
61 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
62
63 /* Save and disable mmio debugging for the user bypass */
64 if (!uncore->debug->suspend_count++) {
65 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
66 uncore->debug->unclaimed_mmio_check = 0;
67 }
68
69 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
70}
71
72static bool_Bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
73
74static void mmio_debug_resume(struct intel_uncore *uncore)
75{
76 if (!uncore->debug)
77 return;
78
79 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
80
81 if (!--uncore->debug->suspend_count)
82 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
83
84 if (check_for_unclaimed_mmio(uncore))
85 drm_info(&uncore->i915->drm,do { } while(0)
86 "Invalid mmio detected during user access\n")do { } while(0);
87
88 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
89}
90
91static const char * const forcewake_domain_names[] = {
92 "render",
93 "gt",
94 "media",
95 "vdbox0",
96 "vdbox1",
97 "vdbox2",
98 "vdbox3",
99 "vdbox4",
100 "vdbox5",
101 "vdbox6",
102 "vdbox7",
103 "vebox0",
104 "vebox1",
105 "vebox2",
106 "vebox3",
107};
108
109const char *
110intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
111{
112 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT)extern char _ctassert[(!((sizeof((forcewake_domain_names)) / sizeof
((forcewake_domain_names)[0])) != FW_DOMAIN_ID_COUNT)) ? 1 : -
1 ] __attribute__((__unused__))
;
113
114 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
115 return forcewake_domain_names[id];
116
117 WARN_ON(id)({ int __ret = !!(id); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "id", "/usr/src/sys/dev/pci/drm/i915/intel_uncore.c", 117);
__builtin_expect(!!(__ret), 0); })
;
118
119 return "unknown";
120}
121
122#define fw_ack(d)ioread32((d)->reg_ack) readl((d)->reg_ack)ioread32((d)->reg_ack)
123#define fw_set(d, val)iowrite32(({ typeof((val)) _a = ((val)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), (d
)->reg_set)
writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)iowrite32(({ typeof((val)) _a = ((val)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), (d
)->reg_set)
124#define fw_clear(d, val)iowrite32((({ if (__builtin_constant_p(((val)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((val))) && __builtin_constant_p(0)) do { } while (0
); ((((val))) << 16 | (0)); })), (d)->reg_set)
writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)iowrite32((({ if (__builtin_constant_p(((val)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((val))) && __builtin_constant_p(0)) do { } while (0
); ((((val))) << 16 | (0)); })), (d)->reg_set)
125
126static inline void
127fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
128{
129 /*
130 * We don't really know if the powerwell for the forcewake domain we are
131 * trying to reset here does exist at this point (engines could be fused
132 * off in ICL+), so no waiting for acks
133 */
134 /* WaRsClearFWBitsAtReset */
135 if (GRAPHICS_VER(d->uncore->i915)((&(d->uncore->i915)->__runtime)->graphics.ip
.ver)
>= 12)
136 fw_clear(d, 0xefff)iowrite32((({ if (__builtin_constant_p(((0xefff)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((0xefff))) && __builtin_constant_p(0)) do { } while
(0); ((((0xefff))) << 16 | (0)); })), (d)->reg_set)
;
137 else
138 fw_clear(d, 0xffff)iowrite32((({ if (__builtin_constant_p(((0xffff)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((0xffff))) && __builtin_constant_p(0)) do { } while
(0); ((((0xffff))) << 16 | (0)); })), (d)->reg_set)
;
139}
140
141static inline void
142fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
143{
144 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask)((void)0);
145 d->uncore->fw_domains_timer |= d->mask;
146 d->wake_count++;
147#ifdef __linux__
148 hrtimer_start_range_ns(&d->timer,
149 NSEC_PER_MSEC1000000L,
150 NSEC_PER_MSEC1000000L,
151 HRTIMER_MODE_REL1);
152#else
153 timeout_add_msec(&d->timer, 1);
154#endif
155}
156
157static inline int
158__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
159 const u32 ack,
160 const u32 value)
161{
162 return wait_for_atomic((fw_ack(d) & ack) == value,({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(1)) preempt_enable(); __asm volatile(""
: : : "memory"); if ((((ioread32((d)->reg_ack) & ack)
== value))) { ret = 0; break; } if (now - base >= timeout
) { ret = -60; break; } cpu_relax(); if (!(1)) { preempt_disable
(); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm
volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); })
163 FORCEWAKE_ACK_TIMEOUT_MS)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(1)) preempt_enable(); __asm volatile(""
: : : "memory"); if ((((ioread32((d)->reg_ack) & ack)
== value))) { ret = 0; break; } if (now - base >= timeout
) { ret = -60; break; } cpu_relax(); if (!(1)) { preempt_disable
(); if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm
volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); })
;
164}
165
166static inline int
167wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
168 const u32 ack)
169{
170 return __wait_for_ack(d, ack, 0);
171}
172
173static inline int
174wait_ack_set(const struct intel_uncore_forcewake_domain *d,
175 const u32 ack)
176{
177 return __wait_for_ack(d, ack, ack);
178}
179
180static inline void
181fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
182{
183 if (wait_ack_clear(d, FORCEWAKE_KERNEL(1UL << (0)))) {
184 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",__drm_err("%s: timed out waiting for forcewake ack to clear.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
185 intel_uncore_forcewake_domain_to_str(d->id))__drm_err("%s: timed out waiting for forcewake ack to clear.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
;
186 add_taint_for_CI(d->uncore->i915, TAINT_WARN1); /* CI now unreliable */
187 }
188}
189
190enum ack_type {
191 ACK_CLEAR = 0,
192 ACK_SET
193};
194
195static int
196fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
197 const enum ack_type type)
198{
199 const u32 ack_bit = FORCEWAKE_KERNEL(1UL << (0));
200 const u32 value = type == ACK_SET ? ack_bit : 0;
201 unsigned int pass;
202 bool_Bool ack_detected;
203
204 /*
205 * There is a possibility of driver's wake request colliding
206 * with hardware's own wake requests and that can cause
207 * hardware to not deliver the driver's ack message.
208 *
209 * Use a fallback bit toggle to kick the gpu state machine
210 * in the hope that the original ack will be delivered along with
211 * the fallback ack.
212 *
213 * This workaround is described in HSDES #1604254524 and it's known as:
214 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
215 * although the name is a bit misleading.
216 */
217
218 pass = 1;
219 do {
220 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK(1UL << (15)));
221
222 fw_set(d, FORCEWAKE_KERNEL_FALLBACK)iowrite32(({ typeof(((1UL << (15)))) _a = (((1UL <<
(15)))); ({ if (__builtin_constant_p(_a)) do { } while (0); if
(__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); }), (d)->reg_set)
;
223 /* Give gt some time to relax before the polling frenzy */
224 udelay(10 * pass);
225 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK(1UL << (15)));
226
227 ack_detected = (fw_ack(d)ioread32((d)->reg_ack) & ack_bit) == value;
228
229 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK)iowrite32((({ if (__builtin_constant_p((((1UL << (15)))
))) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((1UL << (15))))) &&
__builtin_constant_p(0)) do { } while (0); (((((1UL <<
(15))))) << 16 | (0)); })), (d)->reg_set)
;
230 } while (!ack_detected && pass++ < 10);
231
232 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
233 intel_uncore_forcewake_domain_to_str(d->id),___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
234 type == ACK_SET ? "set" : "clear",___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
235 fw_ack(d),___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
236 pass)___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
;
237
238 return ack_detected ? 0 : -ETIMEDOUT60;
239}
240
241static inline void
242fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
243{
244 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))__builtin_expect(!!(!wait_ack_clear(d, (1UL << (0)))), 1
)
)
245 return;
246
247 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
248 fw_domain_wait_ack_clear(d);
249}
250
251static inline void
252fw_domain_get(const struct intel_uncore_forcewake_domain *d)
253{
254 fw_set(d, FORCEWAKE_KERNEL)iowrite32(({ typeof(((1UL << (0)))) _a = (((1UL <<
(0)))); ({ if (__builtin_constant_p(_a)) do { } while (0); if
(__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); }), (d)->reg_set)
;
255}
256
257static inline void
258fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
259{
260 if (wait_ack_set(d, FORCEWAKE_KERNEL(1UL << (0)))) {
261 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",__drm_err("%s: timed out waiting for forcewake ack request.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
262 intel_uncore_forcewake_domain_to_str(d->id))__drm_err("%s: timed out waiting for forcewake ack request.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
;
263 add_taint_for_CI(d->uncore->i915, TAINT_WARN1); /* CI now unreliable */
264 }
265}
266
267static inline void
268fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
269{
270 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))__builtin_expect(!!(!wait_ack_set(d, (1UL << (0)))), 1))
271 return;
272
273 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
274 fw_domain_wait_ack_set(d);
275}
276
277static inline void
278fw_domain_put(const struct intel_uncore_forcewake_domain *d)
279{
280 fw_clear(d, FORCEWAKE_KERNEL)iowrite32((({ if (__builtin_constant_p((((1UL << (0))))
)) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((1UL << (0))))) &&
__builtin_constant_p(0)) do { } while (0); (((((1UL <<
(0))))) << 16 | (0)); })), (d)->reg_set)
;
281}
282
283static void
284fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
285{
286 struct intel_uncore_forcewake_domain *d;
287 unsigned int tmp;
288
289 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
290
291 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
292 fw_domain_wait_ack_clear(d);
293 fw_domain_get(d);
294 }
295
296 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
297 fw_domain_wait_ack_set(d);
298
299 uncore->fw_domains_active |= fw_domains;
300}
301
302static void
303fw_domains_get_with_fallback(struct intel_uncore *uncore,
304 enum forcewake_domains fw_domains)
305{
306 struct intel_uncore_forcewake_domain *d;
307 unsigned int tmp;
308
309 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
310
311 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
312 fw_domain_wait_ack_clear_fallback(d);
313 fw_domain_get(d);
314 }
315
316 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
317 fw_domain_wait_ack_set_fallback(d);
318
319 uncore->fw_domains_active |= fw_domains;
320}
321
322static void
323fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
324{
325 struct intel_uncore_forcewake_domain *d;
326 unsigned int tmp;
327
328 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
329
330 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
331 fw_domain_put(d);
332
333 uncore->fw_domains_active &= ~fw_domains;
334}
335
336static void
337fw_domains_reset(struct intel_uncore *uncore,
338 enum forcewake_domains fw_domains)
339{
340 struct intel_uncore_forcewake_domain *d;
341 unsigned int tmp;
342
343 if (!fw_domains)
344 return;
345
346 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
347
348 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
349 fw_domain_reset(d);
350}
351
352static inline u32 gt_thread_status(struct intel_uncore *uncore)
353{
354 u32 val;
355
356 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG((const i915_reg_t){ .reg = (0x13805c) }));
357 val &= GEN6_GT_THREAD_STATUS_CORE_MASK0x7;
358
359 return val;
360}
361
362static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
363{
364 /*
365 * w/a for a sporadic read returning 0 by waiting for the GT
366 * thread to wake up.
367 */
368 drm_WARN_ONCE(&uncore->i915->drm,({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { preempt_disable(); cpu =
(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64
now = local_clock(); if (!(1)) preempt_enable(); __asm volatile
("" : : : "memory"); if ((gt_thread_status(uncore) == 0)) { ret
= 0; break; } if (now - base >= timeout) { ret = -60; break
; } cpu_relax(); if (!(1)) { preempt_disable(); if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
369 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { preempt_disable(); cpu =
(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64
now = local_clock(); if (!(1)) preempt_enable(); __asm volatile
("" : : : "memory"); if ((gt_thread_status(uncore) == 0)) { ret
= 0; break; } if (now - base >= timeout) { ret = -60; break
; } cpu_relax(); if (!(1)) { preempt_disable(); if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
370 "GT thread status wait timed out\n")({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { preempt_disable(); cpu =
(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_cpuid); } base = local_clock(); for (;;) { u64
now = local_clock(); if (!(1)) preempt_enable(); __asm volatile
("" : : : "memory"); if ((gt_thread_status(uncore) == 0)) { ret
= 0; break; } if (now - base >= timeout) { ret = -60; break
; } cpu_relax(); if (!(1)) { preempt_disable(); if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
371}
372
373static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
374 enum forcewake_domains fw_domains)
375{
376 fw_domains_get_normal(uncore, fw_domains);
377
378 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
379 __gen6_gt_wait_for_thread_c0(uncore);
380}
381
382static inline u32 fifo_free_entries(struct intel_uncore *uncore)
383{
384 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) }));
385
386 return count & GT_FIFO_FREE_ENTRIES_MASK0x7f;
387}
388
389static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
390{
391 u32 n;
392
393 /* On VLV, FIFO will be shared by both SW and HW.
394 * So, we need to read the FREE_ENTRIES everytime */
395 if (IS_VALLEYVIEW(uncore->i915)IS_PLATFORM(uncore->i915, INTEL_VALLEYVIEW))
396 n = fifo_free_entries(uncore);
397 else
398 n = uncore->fifo_count;
399
400 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES20) {
401 if (wait_for_atomic((n = fifo_free_entries(uncore)) >({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(1)) preempt_enable(); __asm volatile(""
: : : "memory"); if ((((n = fifo_free_entries(uncore)) > 20
))) { ret = 0; break; } if (now - base >= timeout) { ret =
-60; break; } cpu_relax(); if (!(1)) { preempt_disable(); if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
402 GT_FIFO_NUM_RESERVED_ENTRIES,({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(1)) preempt_enable(); __asm volatile(""
: : : "memory"); if ((((n = fifo_free_entries(uncore)) > 20
))) { ret = 0; break; } if (now - base >= timeout) { ret =
-60; break; } cpu_relax(); if (!(1)) { preempt_disable(); if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
403 GT_FIFO_TIMEOUT_MS)({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(1)) preempt_enable(); __asm volatile(""
: : : "memory"); if ((((n = fifo_free_entries(uncore)) > 20
))) { ret = 0; break; } if (now - base >= timeout) { ret =
-60; break; } cpu_relax(); if (!(1)) { preempt_disable(); if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
) {
404 drm_dbg(&uncore->i915->drm,__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "GT_FIFO timeout, entries: %u\n", n)
405 "GT_FIFO timeout, entries: %u\n", n)__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "GT_FIFO timeout, entries: %u\n", n)
;
406 return;
407 }
408 }
409
410 uncore->fifo_count = n - 1;
411}
412
413#ifdef __linux__
414
415static enum hrtimer_restart
416intel_uncore_fw_release_timer(struct hrtimer *timer)
417{
418 struct intel_uncore_forcewake_domain *domain =
419 container_of(timer, struct intel_uncore_forcewake_domain, timer)({ const __typeof( ((struct intel_uncore_forcewake_domain *)0
)->timer ) *__mptr = (timer); (struct intel_uncore_forcewake_domain
*)( (char *)__mptr - __builtin_offsetof(struct intel_uncore_forcewake_domain
, timer) );})
;
420 struct intel_uncore *uncore = domain->uncore;
421 unsigned long irqflags;
422
423 assert_rpm_device_not_suspended(uncore->rpm);
424
425 if (xchg(&domain->active, false)__sync_lock_test_and_set(&domain->active, 0))
426 return HRTIMER_RESTART;
427
428 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
429
430 uncore->fw_domains_timer &= ~domain->mask;
431
432 GEM_BUG_ON(!domain->wake_count)((void)0);
433 if (--domain->wake_count == 0)
434 fw_domains_put(uncore, domain->mask);
435
436 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
437
438 return HRTIMER_NORESTART;
439}
440
441#else
442
443void
444intel_uncore_fw_release_timer(void *arg)
445{
446 struct intel_uncore_forcewake_domain *domain = arg;
447 struct intel_uncore *uncore = domain->uncore;
448 unsigned long irqflags;
449
450 assert_rpm_device_not_suspended(uncore->rpm);
451
452 if (xchg(&domain->active, false)__sync_lock_test_and_set(&domain->active, 0))
453 return;
454
455 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
456
457 uncore->fw_domains_timer &= ~domain->mask;
458
459 GEM_BUG_ON(!domain->wake_count)((void)0);
460 if (--domain->wake_count == 0)
461 fw_domains_put(uncore, domain->mask);
462
463 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
464}
465
466#endif
467
468/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
469static unsigned int
470intel_uncore_forcewake_reset(struct intel_uncore *uncore)
471{
472 unsigned long irqflags;
473 struct intel_uncore_forcewake_domain *domain;
474 int retry_count = 100;
475 enum forcewake_domains fw, active_domains;
476
477 iosf_mbi_assert_punit_acquired();
478
479 /* Hold uncore.lock across reset to prevent any register access
480 * with forcewake not set correctly. Wait until all pending
481 * timers are run before holding.
482 */
483 while (1) {
484 unsigned int tmp;
485
486 active_domains = 0;
487
488 for_each_fw_domain(domain, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(domain = (
uncore)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &=
~(1UL << (__idx)); __idx; })])) {} else
{
489 smp_store_mb(domain->active, false)do { domain->active = 0; do { __asm volatile("mfence" ::: "memory"
); } while (0); } while (0)
;
490 if (hrtimer_cancel(&domain->timer)timeout_del_barrier(&domain->timer) == 0)
491 continue;
492
493 intel_uncore_fw_release_timer(&domain->timer);
494 }
495
496 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
497
498 for_each_fw_domain(domain, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(domain = (
uncore)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &=
~(1UL << (__idx)); __idx; })])) {} else
{
499 if (hrtimer_active(&domain->timer)((&domain->timer)->to_flags & 0x02))
500 active_domains |= domain->mask;
501 }
502
503 if (active_domains == 0)
504 break;
505
506 if (--retry_count == 0) {
507 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for forcewake timers to finish\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
508 break;
509 }
510
511 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
512 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
513 }
514
515 drm_WARN_ON(&uncore->i915->drm, active_domains)({ int __ret = !!((active_domains)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&uncore->i915->drm))->
dev), "", "drm_WARN_ON(" "active_domains" ")"); __builtin_expect
(!!(__ret), 0); })
;
516
517 fw = uncore->fw_domains_active;
518 if (fw)
519 fw_domains_put(uncore, fw);
520
521 fw_domains_reset(uncore, uncore->fw_domains);
522 assert_forcewakes_inactive(uncore);
523
524 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
525
526 return fw; /* track the lost user forcewake domains */
527}
528
529static bool_Bool
530fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
531{
532 u32 dbg;
533
534 dbg = __raw_uncore_read32(uncore, FPGA_DBG((const i915_reg_t){ .reg = (0x42300) }));
535 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))__builtin_expect(!!(!(dbg & ((u32)((1UL << (31)) + 0
)))), 1)
)
536 return false0;
537
538 /*
539 * Bugs in PCI programming (or failing hardware) can occasionally cause
540 * us to lose access to the MMIO BAR. When this happens, register
541 * reads will come back with 0xFFFFFFFF for every register and things
542 * go bad very quickly. Let's try to detect that special case and at
543 * least try to print a more informative message about what has
544 * happened.
545 *
546 * During normal operation the FPGA_DBG register has several unused
547 * bits that will always read back as 0's so we can use them as canaries
548 * to recognize when MMIO accesses are just busted.
549 */
550 if (unlikely(dbg == ~0)__builtin_expect(!!(dbg == ~0), 0))
551 drm_err(&uncore->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
552 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
553
554 __raw_uncore_write32(uncore, FPGA_DBG((const i915_reg_t){ .reg = (0x42300) }), FPGA_DBG_RM_NOCLAIM((u32)((1UL << (31)) + 0)));
555
556 return true1;
557}
558
559static bool_Bool
560vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
561{
562 u32 cer;
563
564 cer = __raw_uncore_read32(uncore, CLAIM_ER((const i915_reg_t){ .reg = (0x180000 + 0x2028) }));
565 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))__builtin_expect(!!(!(cer & (((u32)((1UL << (16)) +
0)) | ((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL
) << (0))) + 0))))), 1)
)
566 return false0;
567
568 __raw_uncore_write32(uncore, CLAIM_ER((const i915_reg_t){ .reg = (0x180000 + 0x2028) }), CLAIM_ER_CLR((u32)((1UL << (31)) + 0)));
569
570 return true1;
571}
572
573static bool_Bool
574gen6_check_for_fifo_debug(struct intel_uncore *uncore)
575{
576 u32 fifodbg;
577
578 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG((const i915_reg_t){ .reg = (0x120000) }));
579
580 if (unlikely(fifodbg)__builtin_expect(!!(fifodbg), 0)) {
581 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg)__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "GTFIFODBG = 0x08%x\n", fifodbg)
;
582 __raw_uncore_write32(uncore, GTFIFODBG((const i915_reg_t){ .reg = (0x120000) }), fifodbg);
583 }
584
585 return fifodbg;
586}
587
588static bool_Bool
589check_for_unclaimed_mmio(struct intel_uncore *uncore)
590{
591 bool_Bool ret = false0;
592
593 lockdep_assert_held(&uncore->debug->lock)do { (void)(&uncore->debug->lock); } while(0);
594
595 if (uncore->debug->suspend_count)
596 return false0;
597
598 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
599 ret |= fpga_check_for_unclaimed_mmio(uncore);
600
601 if (intel_uncore_has_dbg_unclaimed(uncore))
602 ret |= vlv_check_for_unclaimed_mmio(uncore);
603
604 if (intel_uncore_has_fifo(uncore))
605 ret |= gen6_check_for_fifo_debug(uncore);
606
607 return ret;
608}
609
610static void forcewake_early_sanitize(struct intel_uncore *uncore,
611 unsigned int restore_forcewake)
612{
613 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
614
615 /* WaDisableShadowRegForCpd:chv */
616 if (IS_CHERRYVIEW(uncore->i915)IS_PLATFORM(uncore->i915, INTEL_CHERRYVIEW)) {
617 __raw_uncore_write32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) }),
618 __raw_uncore_read32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) })) |
619 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL(1 << 12) |
620 GT_FIFO_CTL_RC6_POLICY_STALL(1 << 11));
621 }
622
623 iosf_mbi_punit_acquire();
624 intel_uncore_forcewake_reset(uncore);
625 if (restore_forcewake) {
626 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
627 fw_domains_get(uncore, restore_forcewake);
628
629 if (intel_uncore_has_fifo(uncore))
630 uncore->fifo_count = fifo_free_entries(uncore);
631 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
632 }
633 iosf_mbi_punit_release();
634}
635
636void intel_uncore_suspend(struct intel_uncore *uncore)
637{
638 if (!intel_uncore_has_forcewake(uncore))
639 return;
640
641 iosf_mbi_punit_acquire();
642 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
643 &uncore->pmic_bus_access_nb);
644 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
645 iosf_mbi_punit_release();
646}
647
648void intel_uncore_resume_early(struct intel_uncore *uncore)
649{
650 unsigned int restore_forcewake;
651
652 if (intel_uncore_unclaimed_mmio(uncore))
653 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n")__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "unclaimed mmio detected on resume, clearing\n")
;
654
655 if (!intel_uncore_has_forcewake(uncore))
656 return;
657
658 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved)({ typeof(*&uncore->fw_domains_saved) __T = *(&uncore
->fw_domains_saved); *(&uncore->fw_domains_saved) =
(typeof(*&uncore->fw_domains_saved))0; __T; })
;
659 forcewake_early_sanitize(uncore, restore_forcewake);
660
661 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
662}
663
664void intel_uncore_runtime_resume(struct intel_uncore *uncore)
665{
666 if (!intel_uncore_has_forcewake(uncore))
667 return;
668
669 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
670}
671
672static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
673 enum forcewake_domains fw_domains)
674{
675 struct intel_uncore_forcewake_domain *domain;
676 unsigned int tmp;
677
678 fw_domains &= uncore->fw_domains;
679
680 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
681 if (domain->wake_count++) {
682 fw_domains &= ~domain->mask;
683 domain->active = true1;
684 }
685 }
686
687 if (fw_domains)
688 fw_domains_get(uncore, fw_domains);
689}
690
691/**
692 * intel_uncore_forcewake_get - grab forcewake domain references
693 * @uncore: the intel_uncore structure
694 * @fw_domains: forcewake domains to get reference on
695 *
696 * This function can be used get GT's forcewake domain references.
697 * Normal register access will handle the forcewake domains automatically.
698 * However if some sequence requires the GT to not power down a particular
699 * forcewake domains this function should be called at the beginning of the
700 * sequence. And subsequently the reference should be dropped by symmetric
701 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
702 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
703 */
704void intel_uncore_forcewake_get(struct intel_uncore *uncore,
705 enum forcewake_domains fw_domains)
706{
707 unsigned long irqflags;
708
709 if (!uncore->fw_get_funcs)
710 return;
711
712 assert_rpm_wakelock_held(uncore->rpm);
713
714 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
715 __intel_uncore_forcewake_get(uncore, fw_domains);
716 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
717}
718
719/**
720 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
721 * @uncore: the intel_uncore structure
722 *
723 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
724 * the GT powerwell and in the process disable our debugging for the
725 * duration of userspace's bypass.
726 */
727void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
728{
729 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
730 if (!uncore->user_forcewake_count++) {
731 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
732 mmio_debug_suspend(uncore);
733 }
734 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
735}
736
737/**
738 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
739 * @uncore: the intel_uncore structure
740 *
741 * This function complements intel_uncore_forcewake_user_get() and releases
742 * the GT powerwell taken on behalf of the userspace bypass.
743 */
744void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
745{
746 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
747 if (!--uncore->user_forcewake_count) {
748 mmio_debug_resume(uncore);
749 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
750 }
751 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
752}
753
754/**
755 * intel_uncore_forcewake_get__locked - grab forcewake domain references
756 * @uncore: the intel_uncore structure
757 * @fw_domains: forcewake domains to get reference on
758 *
759 * See intel_uncore_forcewake_get(). This variant places the onus
760 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
761 */
762void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
763 enum forcewake_domains fw_domains)
764{
765 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
766
767 if (!uncore->fw_get_funcs)
768 return;
769
770 __intel_uncore_forcewake_get(uncore, fw_domains);
771}
772
773static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
774 enum forcewake_domains fw_domains,
775 bool_Bool delayed)
776{
777 struct intel_uncore_forcewake_domain *domain;
778 unsigned int tmp;
779
780 fw_domains &= uncore->fw_domains;
781
782 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
783 GEM_BUG_ON(!domain->wake_count)((void)0);
784
785 if (--domain->wake_count) {
786 domain->active = true1;
787 continue;
788 }
789
790 if (delayed &&
791 !(domain->uncore->fw_domains_timer & domain->mask))
792 fw_domain_arm_timer(domain);
793 else
794 fw_domains_put(uncore, domain->mask);
795 }
796}
797
798/**
799 * intel_uncore_forcewake_put - release a forcewake domain reference
800 * @uncore: the intel_uncore structure
801 * @fw_domains: forcewake domains to put references
802 *
803 * This function drops the device-level forcewakes for specified
804 * domains obtained by intel_uncore_forcewake_get().
805 */
806void intel_uncore_forcewake_put(struct intel_uncore *uncore,
807 enum forcewake_domains fw_domains)
808{
809 unsigned long irqflags;
810
811 if (!uncore->fw_get_funcs)
812 return;
813
814 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
815 __intel_uncore_forcewake_put(uncore, fw_domains, false0);
816 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
817}
818
819void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
820 enum forcewake_domains fw_domains)
821{
822 unsigned long irqflags;
823
824 if (!uncore->fw_get_funcs)
825 return;
826
827 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
828 __intel_uncore_forcewake_put(uncore, fw_domains, true1);
829 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
830}
831
832/**
833 * intel_uncore_forcewake_flush - flush the delayed release
834 * @uncore: the intel_uncore structure
835 * @fw_domains: forcewake domains to flush
836 */
837void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
838 enum forcewake_domains fw_domains)
839{
840 struct intel_uncore_forcewake_domain *domain;
841 unsigned int tmp;
842
843 if (!uncore->fw_get_funcs)
844 return;
845
846 fw_domains &= uncore->fw_domains;
847 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
848 WRITE_ONCE(domain->active, false)({ typeof(domain->active) __tmp = (0); *(volatile typeof(domain
->active) *)&(domain->active) = __tmp; __tmp; })
;
849 if (hrtimer_cancel(&domain->timer)timeout_del_barrier(&domain->timer))
850 intel_uncore_fw_release_timer(&domain->timer);
851 }
852}
853
854/**
855 * intel_uncore_forcewake_put__locked - grab forcewake domain references
856 * @uncore: the intel_uncore structure
857 * @fw_domains: forcewake domains to get reference on
858 *
859 * See intel_uncore_forcewake_put(). This variant places the onus
860 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
861 */
862void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
863 enum forcewake_domains fw_domains)
864{
865 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
866
867 if (!uncore->fw_get_funcs)
868 return;
869
870 __intel_uncore_forcewake_put(uncore, fw_domains, false0);
871}
872
873void assert_forcewakes_inactive(struct intel_uncore *uncore)
874{
875 if (!uncore->fw_get_funcs)
876 return;
877
878 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
879 "Expected all fw_domains to be inactive, but %08x are still on\n",({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
880 uncore->fw_domains_active)({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
;
881}
882
883void assert_forcewakes_active(struct intel_uncore *uncore,
884 enum forcewake_domains fw_domains)
885{
886 struct intel_uncore_forcewake_domain *domain;
887 unsigned int tmp;
888
889 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0)
890 return;
891
892 if (!uncore->fw_get_funcs)
893 return;
894
895 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
896
897 assert_rpm_wakelock_held(uncore->rpm);
898
899 fw_domains &= uncore->fw_domains;
900 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
901 "Expected %08x fw_domains to be active, but %08x are off\n",({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
902 fw_domains, fw_domains & ~uncore->fw_domains_active)({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
;
903
904 /*
905 * Check that the caller has an explicit wakeref and we don't mistake
906 * it for the auto wakeref.
907 */
908 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
909 unsigned int actual = READ_ONCE(domain->wake_count)({ typeof(domain->wake_count) __tmp = *(volatile typeof(domain
->wake_count) *)&(domain->wake_count); membar_datadep_consumer
(); __tmp; })
;
910 unsigned int expect = 1;
911
912 if (uncore->fw_domains_timer & domain->mask)
913 expect++; /* pending automatic release */
914
915 if (drm_WARN(&uncore->i915->drm, actual < expect,({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
916 "Expected domain %d to be held awake by caller, count=%d\n",({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
917 domain->id, actual)({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
)
918 break;
919 }
920
921 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
922}
923
924/* We give fast paths for the really cool registers */
925#define NEEDS_FORCE_WAKE(reg)({ u32 __reg = (reg); __reg < 0x40000 || __reg >= 0x1c0000
; })
({ \
926 u32 __reg = (reg); \
927 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE0x1c0000; \
928})
929
930static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
931{
932 if (offset < entry->start)
933 return -1;
934 else if (offset > entry->end)
935 return 1;
936 else
937 return 0;
938}
939
940/* Copied and "macroized" from lib/bsearch.c */
941#define BSEARCH(key, base, num, cmp)({ unsigned int start__ = 0, end__ = (num); typeof(base) result__
= ((void *)0); while (start__ < end__) { unsigned int mid__
= start__ + (end__ - start__) / 2; int ret__ = (cmp)((key), (
base) + mid__); if (ret__ < 0) { end__ = mid__; } else if (
ret__ > 0) { start__ = mid__ + 1; } else { result__ = (base
) + mid__; break; } } result__; })
({ \
942 unsigned int start__ = 0, end__ = (num); \
943 typeof(base) result__ = NULL((void *)0); \
944 while (start__ < end__) { \
945 unsigned int mid__ = start__ + (end__ - start__) / 2; \
946 int ret__ = (cmp)((key), (base) + mid__); \
947 if (ret__ < 0) { \
948 end__ = mid__; \
949 } else if (ret__ > 0) { \
950 start__ = mid__ + 1; \
951 } else { \
952 result__ = (base) + mid__; \
953 break; \
954 } \
955 } \
956 result__; \
957})
958
959static enum forcewake_domains
960find_fw_domain(struct intel_uncore *uncore, u32 offset)
961{
962 const struct intel_forcewake_range *entry;
963
964 if (IS_GSI_REG(offset)((offset) < 0x40000))
965 offset += uncore->gsi_offset;
966
967 entry = BSEARCH(offset,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
968 uncore->fw_domains_table,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
969 uncore->fw_domains_table_entries,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
970 fw_range_cmp)({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
;
971
972 if (!entry)
973 return 0;
974
975 /*
976 * The list of FW domains depends on the SKU in gen11+ so we
977 * can't determine it statically. We use FORCEWAKE_ALL and
978 * translate it here to the list of available domains.
979 */
980 if (entry->domains == FORCEWAKE_ALL)
981 return uncore->fw_domains;
982
983 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
984 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
985 entry->domains & ~uncore->fw_domains, offset)({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
;
986
987 return entry->domains;
988}
989
990/*
991 * Shadowed register tables describe special register ranges that i915 is
992 * allowed to write to without acquiring forcewake. If these registers' power
993 * wells are down, the hardware will save values written by i915 to a shadow
994 * copy and automatically transfer them into the real register the next time
995 * the power well is woken up. Shadowing only applies to writes; forcewake
996 * must still be acquired when reading from registers in these ranges.
997 *
998 * The documentation for shadowed registers is somewhat spotty on older
999 * platforms. However missing registers from these lists is non-fatal; it just
1000 * means we'll wake up the hardware for some register accesses where we didn't
1001 * really need to.
1002 *
1003 * The ranges listed in these tables must be sorted by offset.
1004 *
1005 * When adding new tables here, please also add them to
1006 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
1007 * scanned for obvious mistakes or typos by the selftests.
1008 */
1009
1010static const struct i915_range gen8_shadowed_regs[] = {
1011 { .start = 0x2030, .end = 0x2030 },
1012 { .start = 0xA008, .end = 0xA00C },
1013 { .start = 0x12030, .end = 0x12030 },
1014 { .start = 0x1a030, .end = 0x1a030 },
1015 { .start = 0x22030, .end = 0x22030 },
1016};
1017
1018static const struct i915_range gen11_shadowed_regs[] = {
1019 { .start = 0x2030, .end = 0x2030 },
1020 { .start = 0x2550, .end = 0x2550 },
1021 { .start = 0xA008, .end = 0xA00C },
1022 { .start = 0x22030, .end = 0x22030 },
1023 { .start = 0x22230, .end = 0x22230 },
1024 { .start = 0x22510, .end = 0x22550 },
1025 { .start = 0x1C0030, .end = 0x1C0030 },
1026 { .start = 0x1C0230, .end = 0x1C0230 },
1027 { .start = 0x1C0510, .end = 0x1C0550 },
1028 { .start = 0x1C4030, .end = 0x1C4030 },
1029 { .start = 0x1C4230, .end = 0x1C4230 },
1030 { .start = 0x1C4510, .end = 0x1C4550 },
1031 { .start = 0x1C8030, .end = 0x1C8030 },
1032 { .start = 0x1C8230, .end = 0x1C8230 },
1033 { .start = 0x1C8510, .end = 0x1C8550 },
1034 { .start = 0x1D0030, .end = 0x1D0030 },
1035 { .start = 0x1D0230, .end = 0x1D0230 },
1036 { .start = 0x1D0510, .end = 0x1D0550 },
1037 { .start = 0x1D4030, .end = 0x1D4030 },
1038 { .start = 0x1D4230, .end = 0x1D4230 },
1039 { .start = 0x1D4510, .end = 0x1D4550 },
1040 { .start = 0x1D8030, .end = 0x1D8030 },
1041 { .start = 0x1D8230, .end = 0x1D8230 },
1042 { .start = 0x1D8510, .end = 0x1D8550 },
1043};
1044
1045static const struct i915_range gen12_shadowed_regs[] = {
1046 { .start = 0x2030, .end = 0x2030 },
1047 { .start = 0x2510, .end = 0x2550 },
1048 { .start = 0xA008, .end = 0xA00C },
1049 { .start = 0xA188, .end = 0xA188 },
1050 { .start = 0xA278, .end = 0xA278 },
1051 { .start = 0xA540, .end = 0xA56C },
1052 { .start = 0xC4C8, .end = 0xC4C8 },
1053 { .start = 0xC4D4, .end = 0xC4D4 },
1054 { .start = 0xC600, .end = 0xC600 },
1055 { .start = 0x22030, .end = 0x22030 },
1056 { .start = 0x22510, .end = 0x22550 },
1057 { .start = 0x1C0030, .end = 0x1C0030 },
1058 { .start = 0x1C0510, .end = 0x1C0550 },
1059 { .start = 0x1C4030, .end = 0x1C4030 },
1060 { .start = 0x1C4510, .end = 0x1C4550 },
1061 { .start = 0x1C8030, .end = 0x1C8030 },
1062 { .start = 0x1C8510, .end = 0x1C8550 },
1063 { .start = 0x1D0030, .end = 0x1D0030 },
1064 { .start = 0x1D0510, .end = 0x1D0550 },
1065 { .start = 0x1D4030, .end = 0x1D4030 },
1066 { .start = 0x1D4510, .end = 0x1D4550 },
1067 { .start = 0x1D8030, .end = 0x1D8030 },
1068 { .start = 0x1D8510, .end = 0x1D8550 },
1069
1070 /*
1071 * The rest of these ranges are specific to Xe_HP and beyond, but
1072 * are reserved/unused ranges on earlier gen12 platforms, so they can
1073 * be safely added to the gen12 table.
1074 */
1075 { .start = 0x1E0030, .end = 0x1E0030 },
1076 { .start = 0x1E0510, .end = 0x1E0550 },
1077 { .start = 0x1E4030, .end = 0x1E4030 },
1078 { .start = 0x1E4510, .end = 0x1E4550 },
1079 { .start = 0x1E8030, .end = 0x1E8030 },
1080 { .start = 0x1E8510, .end = 0x1E8550 },
1081 { .start = 0x1F0030, .end = 0x1F0030 },
1082 { .start = 0x1F0510, .end = 0x1F0550 },
1083 { .start = 0x1F4030, .end = 0x1F4030 },
1084 { .start = 0x1F4510, .end = 0x1F4550 },
1085 { .start = 0x1F8030, .end = 0x1F8030 },
1086 { .start = 0x1F8510, .end = 0x1F8550 },
1087};
1088
1089static const struct i915_range dg2_shadowed_regs[] = {
1090 { .start = 0x2030, .end = 0x2030 },
1091 { .start = 0x2510, .end = 0x2550 },
1092 { .start = 0xA008, .end = 0xA00C },
1093 { .start = 0xA188, .end = 0xA188 },
1094 { .start = 0xA278, .end = 0xA278 },
1095 { .start = 0xA540, .end = 0xA56C },
1096 { .start = 0xC4C8, .end = 0xC4C8 },
1097 { .start = 0xC4E0, .end = 0xC4E0 },
1098 { .start = 0xC600, .end = 0xC600 },
1099 { .start = 0xC658, .end = 0xC658 },
1100 { .start = 0x22030, .end = 0x22030 },
1101 { .start = 0x22510, .end = 0x22550 },
1102 { .start = 0x1C0030, .end = 0x1C0030 },
1103 { .start = 0x1C0510, .end = 0x1C0550 },
1104 { .start = 0x1C4030, .end = 0x1C4030 },
1105 { .start = 0x1C4510, .end = 0x1C4550 },
1106 { .start = 0x1C8030, .end = 0x1C8030 },
1107 { .start = 0x1C8510, .end = 0x1C8550 },
1108 { .start = 0x1D0030, .end = 0x1D0030 },
1109 { .start = 0x1D0510, .end = 0x1D0550 },
1110 { .start = 0x1D4030, .end = 0x1D4030 },
1111 { .start = 0x1D4510, .end = 0x1D4550 },
1112 { .start = 0x1D8030, .end = 0x1D8030 },
1113 { .start = 0x1D8510, .end = 0x1D8550 },
1114 { .start = 0x1E0030, .end = 0x1E0030 },
1115 { .start = 0x1E0510, .end = 0x1E0550 },
1116 { .start = 0x1E4030, .end = 0x1E4030 },
1117 { .start = 0x1E4510, .end = 0x1E4550 },
1118 { .start = 0x1E8030, .end = 0x1E8030 },
1119 { .start = 0x1E8510, .end = 0x1E8550 },
1120 { .start = 0x1F0030, .end = 0x1F0030 },
1121 { .start = 0x1F0510, .end = 0x1F0550 },
1122 { .start = 0x1F4030, .end = 0x1F4030 },
1123 { .start = 0x1F4510, .end = 0x1F4550 },
1124 { .start = 0x1F8030, .end = 0x1F8030 },
1125 { .start = 0x1F8510, .end = 0x1F8550 },
1126};
1127
1128static const struct i915_range pvc_shadowed_regs[] = {
1129 { .start = 0x2030, .end = 0x2030 },
1130 { .start = 0x2510, .end = 0x2550 },
1131 { .start = 0xA008, .end = 0xA00C },
1132 { .start = 0xA188, .end = 0xA188 },
1133 { .start = 0xA278, .end = 0xA278 },
1134 { .start = 0xA540, .end = 0xA56C },
1135 { .start = 0xC4C8, .end = 0xC4C8 },
1136 { .start = 0xC4E0, .end = 0xC4E0 },
1137 { .start = 0xC600, .end = 0xC600 },
1138 { .start = 0xC658, .end = 0xC658 },
1139 { .start = 0x22030, .end = 0x22030 },
1140 { .start = 0x22510, .end = 0x22550 },
1141 { .start = 0x1C0030, .end = 0x1C0030 },
1142 { .start = 0x1C0510, .end = 0x1C0550 },
1143 { .start = 0x1C4030, .end = 0x1C4030 },
1144 { .start = 0x1C4510, .end = 0x1C4550 },
1145 { .start = 0x1C8030, .end = 0x1C8030 },
1146 { .start = 0x1C8510, .end = 0x1C8550 },
1147 { .start = 0x1D0030, .end = 0x1D0030 },
1148 { .start = 0x1D0510, .end = 0x1D0550 },
1149 { .start = 0x1D4030, .end = 0x1D4030 },
1150 { .start = 0x1D4510, .end = 0x1D4550 },
1151 { .start = 0x1D8030, .end = 0x1D8030 },
1152 { .start = 0x1D8510, .end = 0x1D8550 },
1153 { .start = 0x1E0030, .end = 0x1E0030 },
1154 { .start = 0x1E0510, .end = 0x1E0550 },
1155 { .start = 0x1E4030, .end = 0x1E4030 },
1156 { .start = 0x1E4510, .end = 0x1E4550 },
1157 { .start = 0x1E8030, .end = 0x1E8030 },
1158 { .start = 0x1E8510, .end = 0x1E8550 },
1159 { .start = 0x1F0030, .end = 0x1F0030 },
1160 { .start = 0x1F0510, .end = 0x1F0550 },
1161 { .start = 0x1F4030, .end = 0x1F4030 },
1162 { .start = 0x1F4510, .end = 0x1F4550 },
1163 { .start = 0x1F8030, .end = 0x1F8030 },
1164 { .start = 0x1F8510, .end = 0x1F8550 },
1165};
1166
1167static int mmio_range_cmp(u32 key, const struct i915_range *range)
1168{
1169 if (key < range->start)
1170 return -1;
1171 else if (key > range->end)
1172 return 1;
1173 else
1174 return 0;
1175}
1176
1177static bool_Bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1178{
1179 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)({ int __ret = !!((!uncore->shadowed_reg_table)); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "!uncore->shadowed_reg_table"
")"); __builtin_expect(!!(__ret), 0); })
)
1180 return false0;
1181
1182 if (IS_GSI_REG(offset)((offset) < 0x40000))
1183 offset += uncore->gsi_offset;
1184
1185 return BSEARCH(offset,({ unsigned int start__ = 0, end__ = (uncore->shadowed_reg_table_entries
); typeof(uncore->shadowed_reg_table) result__ = ((void *)
0); while (start__ < end__) { unsigned int mid__ = start__
+ (end__ - start__) / 2; int ret__ = (mmio_range_cmp)((offset
), (uncore->shadowed_reg_table) + mid__); if (ret__ < 0
) { end__ = mid__; } else if (ret__ > 0) { start__ = mid__
+ 1; } else { result__ = (uncore->shadowed_reg_table) + mid__
; break; } } result__; })
1186 uncore->shadowed_reg_table,({ unsigned int start__ = 0, end__ = (uncore->shadowed_reg_table_entries
); typeof(uncore->shadowed_reg_table) result__ = ((void *)
0); while (start__ < end__) { unsigned int mid__ = start__
+ (end__ - start__) / 2; int ret__ = (mmio_range_cmp)((offset
), (uncore->shadowed_reg_table) + mid__); if (ret__ < 0
) { end__ = mid__; } else if (ret__ > 0) { start__ = mid__
+ 1; } else { result__ = (uncore->shadowed_reg_table) + mid__
; break; } } result__; })
1187 uncore->shadowed_reg_table_entries,({ unsigned int start__ = 0, end__ = (uncore->shadowed_reg_table_entries
); typeof(uncore->shadowed_reg_table) result__ = ((void *)
0); while (start__ < end__) { unsigned int mid__ = start__
+ (end__ - start__) / 2; int ret__ = (mmio_range_cmp)((offset
), (uncore->shadowed_reg_table) + mid__); if (ret__ < 0
) { end__ = mid__; } else if (ret__ > 0) { start__ = mid__
+ 1; } else { result__ = (uncore->shadowed_reg_table) + mid__
; break; } } result__; })
1188 mmio_range_cmp)({ unsigned int start__ = 0, end__ = (uncore->shadowed_reg_table_entries
); typeof(uncore->shadowed_reg_table) result__ = ((void *)
0); while (start__ < end__) { unsigned int mid__ = start__
+ (end__ - start__) / 2; int ret__ = (mmio_range_cmp)((offset
), (uncore->shadowed_reg_table) + mid__); if (ret__ < 0
) { end__ = mid__; } else if (ret__ > 0) { start__ = mid__
+ 1; } else { result__ = (uncore->shadowed_reg_table) + mid__
; break; } } result__; })
;
1189}
1190
1191static enum forcewake_domains
1192gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1193{
1194 return FORCEWAKE_RENDER;
1195}
1196
1197#define __fwtable_reg_read_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; if (({ u32 __reg = ((offset
)); __reg < 0x40000 || __reg >= 0x1c0000; })) __fwd = find_fw_domain
(uncore, offset); __fwd; })
\
1198({ \
1199 enum forcewake_domains __fwd = 0; \
1200 if (NEEDS_FORCE_WAKE((offset))({ u32 __reg = ((offset)); __reg < 0x40000 || __reg >= 0x1c0000
; })
) \
1201 __fwd = find_fw_domain(uncore, offset); \
1202 __fwd; \
1203})
1204
1205#define __fwtable_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; const u32 __offset = (offset
); if (({ u32 __reg = ((__offset)); __reg < 0x40000 || __reg
>= 0x1c0000; }) && !is_shadowed(uncore, __offset)
) __fwd = find_fw_domain(uncore, __offset); __fwd; })
\
1206({ \
1207 enum forcewake_domains __fwd = 0; \
1208 const u32 __offset = (offset); \
1209 if (NEEDS_FORCE_WAKE((__offset))({ u32 __reg = ((__offset)); __reg < 0x40000 || __reg >=
0x1c0000; })
&& !is_shadowed(uncore, __offset)) \
1210 __fwd = find_fw_domain(uncore, __offset); \
1211 __fwd; \
1212})
1213
1214#define GEN_FW_RANGE(s, e, d){ .start = (s), .end = (e), .domains = (d) } \
1215 { .start = (s), .end = (e), .domains = (d) }
1216
1217/*
1218 * All platforms' forcewake tables below must be sorted by offset ranges.
1219 * Furthermore, new forcewake tables added should be "watertight" and have
1220 * no gaps between ranges.
1221 *
1222 * When there are multiple consecutive ranges listed in the bspec with
1223 * the same forcewake domain, it is customary to combine them into a single
1224 * row in the tables below to keep the tables small and lookups fast.
1225 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1226 * following ranges since the driver will never be making MMIO accesses in
1227 * those ranges.
1228 *
1229 * For example, if the bspec were to list:
1230 *
1231 * ...
1232 * 0x1000 - 0x1fff: GT
1233 * 0x2000 - 0x2cff: GT
1234 * 0x2d00 - 0x2fff: unused/reserved
1235 * 0x3000 - 0xffff: GT
1236 * ...
1237 *
1238 * these could all be represented by a single line in the code:
1239 *
1240 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1241 *
1242 * When adding new forcewake tables here, please also add them to
1243 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1244 * scanned for obvious mistakes or typos by the selftests.
1245 */
1246
1247static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1248 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER){ .start = (0x0), .end = (0x3ffff), .domains = (FORCEWAKE_RENDER
) }
,
1249};
1250
1251static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1252 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1253 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5000), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1254 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0x11fff), .domains = (FORCEWAKE_RENDER
) }
,
1255 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
1256 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA){ .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_MEDIA
) }
,
1257 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER){ .start = (0x2e000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }
,
1258 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_MEDIA
) }
,
1259};
1260
1261static const struct intel_forcewake_range __chv_fw_ranges[] = {
1262 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1263 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x4000), .end = (0x4fff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1264 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1265 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x8000), .end = (0x82ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1266 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1267 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x8500), .end = (0x85ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1268 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA){ .start = (0x8800), .end = (0x88ff), .domains = (FORCEWAKE_MEDIA
) }
,
1269 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x9000), .end = (0xafff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1270 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1271 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA){ .start = (0xd000), .end = (0xd7ff), .domains = (FORCEWAKE_MEDIA
) }
,
1272 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER){ .start = (0xe000), .end = (0xe7ff), .domains = (FORCEWAKE_RENDER
) }
,
1273 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0xf000), .end = (0xffff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1274 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
1275 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA){ .start = (0x1a000), .end = (0x1bfff), .domains = (FORCEWAKE_MEDIA
) }
,
1276 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA){ .start = (0x1e800), .end = (0x1e9ff), .domains = (FORCEWAKE_MEDIA
) }
,
1277 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x37fff), .domains = (FORCEWAKE_MEDIA
) }
,
1278};
1279
1280static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1281 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT){ .start = (0x0), .end = (0xaff), .domains = (FORCEWAKE_GT) },
1282 GEN_FW_RANGE(0xb00, 0x1fff, 0){ .start = (0xb00), .end = (0x1fff), .domains = (0) }, /* uncore range */
1283 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1284 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_GT
) }
,
1285 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1286 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_GT
) }
,
1287 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1288 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT){ .start = (0x8000), .end = (0x812f), .domains = (FORCEWAKE_GT
) }
,
1289 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA){ .start = (0x8130), .end = (0x813f), .domains = (FORCEWAKE_MEDIA
) }
,
1290 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1291 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT){ .start = (0x8160), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }
,
1292 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1293 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT){ .start = (0x8500), .end = (0x87ff), .domains = (FORCEWAKE_GT
) }
,
1294 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA){ .start = (0x8800), .end = (0x89ff), .domains = (FORCEWAKE_MEDIA
) }
,
1295 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT){ .start = (0x8a00), .end = (0x8bff), .domains = (FORCEWAKE_GT
) }
,
1296 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER){ .start = (0x8c00), .end = (0x8cff), .domains = (FORCEWAKE_RENDER
) }
,
1297 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT){ .start = (0x8d00), .end = (0x93ff), .domains = (FORCEWAKE_GT
) }
,
1298 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x9400), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1299 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT){ .start = (0x9800), .end = (0xafff), .domains = (FORCEWAKE_GT
) }
,
1300 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1301 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT){ .start = (0xb480), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }
,
1302 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA){ .start = (0xd000), .end = (0xd7ff), .domains = (FORCEWAKE_MEDIA
) }
,
1303 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT){ .start = (0xd800), .end = (0xdfff), .domains = (FORCEWAKE_GT
) }
,
1304 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xe000), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
,
1305 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT){ .start = (0xe900), .end = (0x11fff), .domains = (FORCEWAKE_GT
) }
,
1306 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
1307 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT){ .start = (0x14000), .end = (0x19fff), .domains = (FORCEWAKE_GT
) }
,
1308 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA){ .start = (0x1a000), .end = (0x1e9ff), .domains = (FORCEWAKE_MEDIA
) }
,
1309 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT){ .start = (0x1ea00), .end = (0x243ff), .domains = (FORCEWAKE_GT
) }
,
1310 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER){ .start = (0x24400), .end = (0x247ff), .domains = (FORCEWAKE_RENDER
) }
,
1311 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT){ .start = (0x24800), .end = (0x2ffff), .domains = (FORCEWAKE_GT
) }
,
1312 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_MEDIA
) }
,
1313};
1314
1315static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1316 GEN_FW_RANGE(0x0, 0x1fff, 0){ .start = (0x0), .end = (0x1fff), .domains = (0) }, /* uncore range */
1317 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1318 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_GT
) }
,
1319 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1320 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_GT
) }
,
1321 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1322 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT){ .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }
,
1323 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1324 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT){ .start = (0x8160), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }
,
1325 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1326 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT){ .start = (0x8500), .end = (0x87ff), .domains = (FORCEWAKE_GT
) }
,
1327 GEN_FW_RANGE(0x8800, 0x8bff, 0){ .start = (0x8800), .end = (0x8bff), .domains = (0) },
1328 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER){ .start = (0x8c00), .end = (0x8cff), .domains = (FORCEWAKE_RENDER
) }
,
1329 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT){ .start = (0x8d00), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }
,
1330 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER){ .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }
,
1331 GEN_FW_RANGE(0x9560, 0x95ff, 0){ .start = (0x9560), .end = (0x95ff), .domains = (0) },
1332 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT){ .start = (0x9600), .end = (0xafff), .domains = (FORCEWAKE_GT
) }
,
1333 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1334 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT){ .start = (0xb480), .end = (0xdeff), .domains = (FORCEWAKE_GT
) }
,
1335 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xdf00), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
,
1336 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT){ .start = (0xe900), .end = (0x16dff), .domains = (FORCEWAKE_GT
) }
,
1337 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER){ .start = (0x16e00), .end = (0x19fff), .domains = (FORCEWAKE_RENDER
) }
,
1338 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT){ .start = (0x1a000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }
,
1339 GEN_FW_RANGE(0x24000, 0x2407f, 0){ .start = (0x24000), .end = (0x2407f), .domains = (0) },
1340 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT){ .start = (0x24080), .end = (0x2417f), .domains = (FORCEWAKE_GT
) }
,
1341 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER){ .start = (0x24180), .end = (0x242ff), .domains = (FORCEWAKE_RENDER
) }
,
1342 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT){ .start = (0x24300), .end = (0x243ff), .domains = (FORCEWAKE_GT
) }
,
1343 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER){ .start = (0x24400), .end = (0x24fff), .domains = (FORCEWAKE_RENDER
) }
,
1344 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT){ .start = (0x25000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }
,
1345 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) },
1346 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1347 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0){ .start = (0x1c4000), .end = (0x1c7fff), .domains = (0) },
1348 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0){ .start = (0x1c8000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }
,
1349 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
,
1350 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0){ .start = (0x1d4000), .end = (0x1dbfff), .domains = (0) }
1351};
1352
1353static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1354 GEN_FW_RANGE(0x0, 0x1fff, 0){ .start = (0x0), .end = (0x1fff), .domains = (0) }, /*
1355 0x0 - 0xaff: reserved
1356 0xb00 - 0x1fff: always on */
1357 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1358 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT){ .start = (0x2700), .end = (0x27ff), .domains = (FORCEWAKE_GT
) }
,
1359 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER){ .start = (0x2800), .end = (0x2aff), .domains = (FORCEWAKE_RENDER
) }
,
1360 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT){ .start = (0x2b00), .end = (0x2fff), .domains = (FORCEWAKE_GT
) }
,
1361 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1362 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_GT
) }
, /*
1363 0x4000 - 0x48ff: gt
1364 0x4900 - 0x51ff: reserved */
1365 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
, /*
1366 0x5200 - 0x53ff: render
1367 0x5400 - 0x54ff: reserved
1368 0x5500 - 0x7fff: render */
1369 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT){ .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }
,
1370 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1371 GEN_FW_RANGE(0x8160, 0x81ff, 0){ .start = (0x8160), .end = (0x81ff), .domains = (0) }, /*
1372 0x8160 - 0x817f: reserved
1373 0x8180 - 0x81ff: always on */
1374 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT){ .start = (0x8200), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }
,
1375 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1376 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT){ .start = (0x8500), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }
, /*
1377 0x8500 - 0x87ff: gt
1378 0x8800 - 0x8fff: reserved
1379 0x9000 - 0x947f: gt
1380 0x9480 - 0x94cf: reserved */
1381 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER){ .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }
,
1382 GEN_FW_RANGE(0x9560, 0x97ff, 0){ .start = (0x9560), .end = (0x97ff), .domains = (0) }, /*
1383 0x9560 - 0x95ff: always on
1384 0x9600 - 0x97ff: reserved */
1385 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT){ .start = (0x9800), .end = (0xafff), .domains = (FORCEWAKE_GT
) }
,
1386 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb3ff), .domains = (FORCEWAKE_RENDER
) }
,
1387 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT){ .start = (0xb400), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }
, /*
1388 0xb400 - 0xbf7f: gt
1389 0xb480 - 0xbfff: reserved
1390 0xc000 - 0xcfff: gt */
1391 GEN_FW_RANGE(0xd000, 0xd7ff, 0){ .start = (0xd000), .end = (0xd7ff), .domains = (0) },
1392 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER){ .start = (0xd800), .end = (0xd8ff), .domains = (FORCEWAKE_RENDER
) }
,
1393 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT){ .start = (0xd900), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }
,
1394 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER){ .start = (0xdc00), .end = (0xefff), .domains = (FORCEWAKE_RENDER
) }
, /*
1395 0xdc00 - 0xddff: render
1396 0xde00 - 0xde7f: reserved
1397 0xde80 - 0xe8ff: render
1398 0xe900 - 0xefff: reserved */
1399 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT){ .start = (0xf000), .end = (0x147ff), .domains = (FORCEWAKE_GT
) }
, /*
1400 0xf000 - 0xffff: gt
1401 0x10000 - 0x147ff: reserved */
1402 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER){ .start = (0x14800), .end = (0x1ffff), .domains = (FORCEWAKE_RENDER
) }
, /*
1403 0x14800 - 0x14fff: render
1404 0x15000 - 0x16dff: reserved
1405 0x16e00 - 0x1bfff: render
1406 0x1c000 - 0x1ffff: reserved */
1407 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x20000), .end = (0x20fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1408 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x21000), .end = (0x21fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
,
1409 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT){ .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }
,
1410 GEN_FW_RANGE(0x24000, 0x2417f, 0){ .start = (0x24000), .end = (0x2417f), .domains = (0) }, /*
1411 0x24000 - 0x2407f: always on
1412 0x24080 - 0x2417f: reserved */
1413 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT){ .start = (0x24180), .end = (0x249ff), .domains = (FORCEWAKE_GT
) }
, /*
1414 0x24180 - 0x241ff: gt
1415 0x24200 - 0x249ff: reserved */
1416 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER){ .start = (0x24a00), .end = (0x251ff), .domains = (FORCEWAKE_RENDER
) }
, /*
1417 0x24a00 - 0x24a7f: render
1418 0x24a80 - 0x251ff: reserved */
1419 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT){ .start = (0x25200), .end = (0x255ff), .domains = (FORCEWAKE_GT
) }
, /*
1420 0x25200 - 0x252ff: gt
1421 0x25300 - 0x255ff: reserved */
1422 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x25600), .end = (0x2567f), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1423 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x25680), .end = (0x259ff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /*
1424 0x25680 - 0x256ff: VD2
1425 0x25700 - 0x259ff: reserved */
1426 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x25a00), .end = (0x25a7f), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1427 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x25a80), .end = (0x2ffff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /*
1428 0x25a80 - 0x25aff: VD2
1429 0x25b00 - 0x2ffff: reserved */
1430 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }
,
1431 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) },
1432 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /*
1433 0x1c0000 - 0x1c2bff: VD0
1434 0x1c2c00 - 0x1c2cff: reserved
1435 0x1c2d00 - 0x1c2dff: VD0
1436 0x1c2e00 - 0x1c3eff: reserved
1437 0x1c3f00 - 0x1c3fff: VD0 */
1438 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0){ .start = (0x1c4000), .end = (0x1c7fff), .domains = (0) },
1439 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0){ .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }
, /*
1440 0x1c8000 - 0x1ca0ff: VE0
1441 0x1ca100 - 0x1cbeff: reserved
1442 0x1cbf00 - 0x1cbfff: VE0 */
1443 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1cc000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /*
1444 0x1cc000 - 0x1ccfff: VD0
1445 0x1cd000 - 0x1cffff: reserved */
1446 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /*
1447 0x1d0000 - 0x1d2bff: VD2
1448 0x1d2c00 - 0x1d2cff: reserved
1449 0x1d2d00 - 0x1d2dff: VD2
1450 0x1d2e00 - 0x1d3eff: reserved
1451 0x1d3f00 - 0x1d3fff: VD2 */
1452};
1453
1454/*
1455 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1456 * switching it from the GT domain to the render domain.
1457 */
1458#define XEHP_FWRANGES(FW_RANGE_D800){ .start = (0x0), .end = (0x1fff), .domains = (0) }, { .start
= (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER) }
, { .start = (0x2700), .end = (0x4aff), .domains = (FORCEWAKE_GT
) }, { .start = (0x4b00), .end = (0x51ff), .domains = (0) }, {
.start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }, { .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8160), .end = (0x81ff), .domains = (0) }, {
.start = (0x8200), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8500), .end = (0x8cff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8d00), .end = (0x8fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9000), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }, { .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9560), .end = (0x967f), .domains = (0) }, {
.start = (0x9680), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9800), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }, { .start = (0xd000), .end = (0xd7ff), .domains = (0) }, {
.start = (0xd800), .end = (0xd87f), .domains = (FW_RANGE_D800
) }, { .start = (0xd880), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }, { .start = (0xdc00), .end = (0xdcff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xdd00), .end = (0xde7f), .domains = (FORCEWAKE_GT
) }, { .start = (0xde80), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xe900), .end = (0xffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x10000), .end = (0x12fff), .domains = (0) }
, { .start = (0x13000), .end = (0x131ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x13200), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14000), .end = (0x141ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x14200), .end = (0x143ff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14400), .end = (0x145ff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x14600), .end = (0x147ff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x14800), .end = (0x14fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x15000), .end = (0x16dff), .domains = (FORCEWAKE_GT
) }, { .start = (0x16e00), .end = (0x1ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x20000), .end = (0x21fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24000), .end = (0x2417f), .domains = (0) }
, { .start = (0x24180), .end = (0x249ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24a00), .end = (0x251ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x25200), .end = (0x25fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x26000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x40000), .end = (0x1bffff), .domains = (0) }
, { .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1c4000), .end = (0x1c7fff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }, { .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }, { .start = (0x1cc000), .end = (0x1ccfff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1cd000), .end = (0x1cdfff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1ce000), .end = (0x1cefff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1cf000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1d4000), .end = (0x1d7fff), .domains = (FORCEWAKE_MEDIA_VDBOX3
) }, { .start = (0x1d8000), .end = (0x1dffff), .domains = (FORCEWAKE_MEDIA_VEBOX1
) }, { .start = (0x1e0000), .end = (0x1e3fff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1e4000), .end = (0x1e7fff), .domains = (FORCEWAKE_MEDIA_VDBOX5
) }, { .start = (0x1e8000), .end = (0x1effff), .domains = (FORCEWAKE_MEDIA_VEBOX2
) }, { .start = (0x1f0000), .end = (0x1f3fff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1f4000), .end = (0x1f7fff), .domains = (FORCEWAKE_MEDIA_VDBOX7
) }, { .start = (0x1f8000), .end = (0x1fa0ff), .domains = (FORCEWAKE_MEDIA_VEBOX3
) },
\
1459 GEN_FW_RANGE(0x0, 0x1fff, 0){ .start = (0x0), .end = (0x1fff), .domains = (0) }, /* \
1460 0x0 - 0xaff: reserved \
1461 0xb00 - 0x1fff: always on */ \
1462 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
, \
1463 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT){ .start = (0x2700), .end = (0x4aff), .domains = (FORCEWAKE_GT
) }
, \
1464 GEN_FW_RANGE(0x4b00, 0x51ff, 0){ .start = (0x4b00), .end = (0x51ff), .domains = (0) }, /* \
1465 0x4b00 - 0x4fff: reserved \
1466 0x5000 - 0x51ff: always on */ \
1467 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
, \
1468 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT){ .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }
, \
1469 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
, \
1470 GEN_FW_RANGE(0x8160, 0x81ff, 0){ .start = (0x8160), .end = (0x81ff), .domains = (0) }, /* \
1471 0x8160 - 0x817f: reserved \
1472 0x8180 - 0x81ff: always on */ \
1473 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT){ .start = (0x8200), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }
, \
1474 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
, \
1475 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT){ .start = (0x8500), .end = (0x8cff), .domains = (FORCEWAKE_GT
) }
, /* \
1476 0x8500 - 0x87ff: gt \
1477 0x8800 - 0x8c7f: reserved \
1478 0x8c80 - 0x8cff: gt (DG2 only) */ \
1479 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER){ .start = (0x8d00), .end = (0x8fff), .domains = (FORCEWAKE_RENDER
) }
, /* \
1480 0x8d00 - 0x8dff: render (DG2 only) \
1481 0x8e00 - 0x8fff: reserved */ \
1482 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT){ .start = (0x9000), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }
, /* \
1483 0x9000 - 0x947f: gt \
1484 0x9480 - 0x94cf: reserved */ \
1485 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER){ .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }
, \
1486 GEN_FW_RANGE(0x9560, 0x967f, 0){ .start = (0x9560), .end = (0x967f), .domains = (0) }, /* \
1487 0x9560 - 0x95ff: always on \
1488 0x9600 - 0x967f: reserved */ \
1489 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER){ .start = (0x9680), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
) }
, /* \
1490 0x9680 - 0x96ff: render (DG2 only) \
1491 0x9700 - 0x97ff: reserved */ \
1492 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT){ .start = (0x9800), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }
, /* \
1493 0x9800 - 0xb4ff: gt \
1494 0xb500 - 0xbfff: reserved \
1495 0xc000 - 0xcfff: gt */ \
1496 GEN_FW_RANGE(0xd000, 0xd7ff, 0){ .start = (0xd000), .end = (0xd7ff), .domains = (0) }, \
1497 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800){ .start = (0xd800), .end = (0xd87f), .domains = (FW_RANGE_D800
) }
, \
1498 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT){ .start = (0xd880), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }
, \
1499 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER){ .start = (0xdc00), .end = (0xdcff), .domains = (FORCEWAKE_RENDER
) }
, \
1500 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT){ .start = (0xdd00), .end = (0xde7f), .domains = (FORCEWAKE_GT
) }
, /* \
1501 0xdd00 - 0xddff: gt \
1502 0xde00 - 0xde7f: reserved */ \
1503 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xde80), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
, /* \
1504 0xde80 - 0xdfff: render \
1505 0xe000 - 0xe0ff: reserved \
1506 0xe100 - 0xe8ff: render */ \
1507 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT){ .start = (0xe900), .end = (0xffff), .domains = (FORCEWAKE_GT
) }
, /* \
1508 0xe900 - 0xe9ff: gt \
1509 0xea00 - 0xefff: reserved \
1510 0xf000 - 0xffff: gt */ \
1511 GEN_FW_RANGE(0x10000, 0x12fff, 0){ .start = (0x10000), .end = (0x12fff), .domains = (0) }, /* \
1512 0x10000 - 0x11fff: reserved \
1513 0x12000 - 0x127ff: always on \
1514 0x12800 - 0x12fff: reserved */ \
1515 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x13000), .end = (0x131ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /* DG2 only */ \
1516 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x13200), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /* \
1517 0x13200 - 0x133ff: VD2 (DG2 only) \
1518 0x13400 - 0x13fff: reserved */ \
1519 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x14000), .end = (0x141ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /* XEHPSDV only */ \
1520 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x14200), .end = (0x143ff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /* XEHPSDV only */ \
1521 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4){ .start = (0x14400), .end = (0x145ff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }
, /* XEHPSDV only */ \
1522 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6){ .start = (0x14600), .end = (0x147ff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }
, /* XEHPSDV only */ \
1523 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER){ .start = (0x14800), .end = (0x14fff), .domains = (FORCEWAKE_RENDER
) }
, \
1524 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT){ .start = (0x15000), .end = (0x16dff), .domains = (FORCEWAKE_GT
) }
, /* \
1525 0x15000 - 0x15fff: gt (DG2 only) \
1526 0x16000 - 0x16dff: reserved */ \
1527 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER){ .start = (0x16e00), .end = (0x1ffff), .domains = (FORCEWAKE_RENDER
) }
, \
1528 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x20000), .end = (0x21fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /* \
1529 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1530 0x21000 - 0x21fff: reserved */ \
1531 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT){ .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }
, \
1532 GEN_FW_RANGE(0x24000, 0x2417f, 0){ .start = (0x24000), .end = (0x2417f), .domains = (0) }, /* \
1533 0x24000 - 0x2407f: always on \
1534 0x24080 - 0x2417f: reserved */ \
1535 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT){ .start = (0x24180), .end = (0x249ff), .domains = (FORCEWAKE_GT
) }
, /* \
1536 0x24180 - 0x241ff: gt \
1537 0x24200 - 0x249ff: reserved */ \
1538 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER){ .start = (0x24a00), .end = (0x251ff), .domains = (FORCEWAKE_RENDER
) }
, /* \
1539 0x24a00 - 0x24a7f: render \
1540 0x24a80 - 0x251ff: reserved */ \
1541 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT){ .start = (0x25200), .end = (0x25fff), .domains = (FORCEWAKE_GT
) }
, /* \
1542 0x25200 - 0x252ff: gt \
1543 0x25300 - 0x25fff: reserved */ \
1544 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER){ .start = (0x26000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }
, /* \
1545 0x26000 - 0x27fff: render \
1546 0x28000 - 0x29fff: reserved \
1547 0x2a000 - 0x2ffff: undocumented */ \
1548 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }
, \
1549 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) }, \
1550 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /* \
1551 0x1c0000 - 0x1c2bff: VD0 \
1552 0x1c2c00 - 0x1c2cff: reserved \
1553 0x1c2d00 - 0x1c2dff: VD0 \
1554 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1555 0x1c3f00 - 0x1c3fff: VD0 */ \
1556 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1){ .start = (0x1c4000), .end = (0x1c7fff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }
, /* \
1557 0x1c4000 - 0x1c6bff: VD1 \
1558 0x1c6c00 - 0x1c6cff: reserved \
1559 0x1c6d00 - 0x1c6dff: VD1 \
1560 0x1c6e00 - 0x1c7fff: reserved */ \
1561 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0){ .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }
, /* \
1562 0x1c8000 - 0x1ca0ff: VE0 \
1563 0x1ca100 - 0x1cbfff: reserved */ \
1564 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1cc000), .end = (0x1ccfff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, \
1565 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1cd000), .end = (0x1cdfff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, \
1566 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4){ .start = (0x1ce000), .end = (0x1cefff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }
, \
1567 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6){ .start = (0x1cf000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }
, \
1568 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /* \
1569 0x1d0000 - 0x1d2bff: VD2 \
1570 0x1d2c00 - 0x1d2cff: reserved \
1571 0x1d2d00 - 0x1d2dff: VD2 \
1572 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1573 0x1d3e00 - 0x1d3eff: reserved \
1574 0x1d3f00 - 0x1d3fff: VD2 */ \
1575 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3){ .start = (0x1d4000), .end = (0x1d7fff), .domains = (FORCEWAKE_MEDIA_VDBOX3
) }
, /* \
1576 0x1d4000 - 0x1d6bff: VD3 \
1577 0x1d6c00 - 0x1d6cff: reserved \
1578 0x1d6d00 - 0x1d6dff: VD3 \
1579 0x1d6e00 - 0x1d7fff: reserved */ \
1580 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1){ .start = (0x1d8000), .end = (0x1dffff), .domains = (FORCEWAKE_MEDIA_VEBOX1
) }
, /* \
1581 0x1d8000 - 0x1da0ff: VE1 \
1582 0x1da100 - 0x1dffff: reserved */ \
1583 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4){ .start = (0x1e0000), .end = (0x1e3fff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }
, /* \
1584 0x1e0000 - 0x1e2bff: VD4 \
1585 0x1e2c00 - 0x1e2cff: reserved \
1586 0x1e2d00 - 0x1e2dff: VD4 \
1587 0x1e2e00 - 0x1e3eff: reserved \
1588 0x1e3f00 - 0x1e3fff: VD4 */ \
1589 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5){ .start = (0x1e4000), .end = (0x1e7fff), .domains = (FORCEWAKE_MEDIA_VDBOX5
) }
, /* \
1590 0x1e4000 - 0x1e6bff: VD5 \
1591 0x1e6c00 - 0x1e6cff: reserved \
1592 0x1e6d00 - 0x1e6dff: VD5 \
1593 0x1e6e00 - 0x1e7fff: reserved */ \
1594 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2){ .start = (0x1e8000), .end = (0x1effff), .domains = (FORCEWAKE_MEDIA_VEBOX2
) }
, /* \
1595 0x1e8000 - 0x1ea0ff: VE2 \
1596 0x1ea100 - 0x1effff: reserved */ \
1597 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6){ .start = (0x1f0000), .end = (0x1f3fff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }
, /* \
1598 0x1f0000 - 0x1f2bff: VD6 \
1599 0x1f2c00 - 0x1f2cff: reserved \
1600 0x1f2d00 - 0x1f2dff: VD6 \
1601 0x1f2e00 - 0x1f3eff: reserved \
1602 0x1f3f00 - 0x1f3fff: VD6 */ \
1603 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7){ .start = (0x1f4000), .end = (0x1f7fff), .domains = (FORCEWAKE_MEDIA_VDBOX7
) }
, /* \
1604 0x1f4000 - 0x1f6bff: VD7 \
1605 0x1f6c00 - 0x1f6cff: reserved \
1606 0x1f6d00 - 0x1f6dff: VD7 \
1607 0x1f6e00 - 0x1f7fff: reserved */ \
1608 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3){ .start = (0x1f8000), .end = (0x1fa0ff), .domains = (FORCEWAKE_MEDIA_VEBOX3
) }
,
1609
1610static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1611 XEHP_FWRANGES(FORCEWAKE_GT){ .start = (0x0), .end = (0x1fff), .domains = (0) }, { .start
= (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER) }
, { .start = (0x2700), .end = (0x4aff), .domains = (FORCEWAKE_GT
) }, { .start = (0x4b00), .end = (0x51ff), .domains = (0) }, {
.start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }, { .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8160), .end = (0x81ff), .domains = (0) }, {
.start = (0x8200), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8500), .end = (0x8cff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8d00), .end = (0x8fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9000), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }, { .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9560), .end = (0x967f), .domains = (0) }, {
.start = (0x9680), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9800), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }, { .start = (0xd000), .end = (0xd7ff), .domains = (0) }, {
.start = (0xd800), .end = (0xd87f), .domains = (FORCEWAKE_GT
) }, { .start = (0xd880), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }, { .start = (0xdc00), .end = (0xdcff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xdd00), .end = (0xde7f), .domains = (FORCEWAKE_GT
) }, { .start = (0xde80), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xe900), .end = (0xffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x10000), .end = (0x12fff), .domains = (0) }
, { .start = (0x13000), .end = (0x131ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x13200), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14000), .end = (0x141ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x14200), .end = (0x143ff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14400), .end = (0x145ff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x14600), .end = (0x147ff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x14800), .end = (0x14fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x15000), .end = (0x16dff), .domains = (FORCEWAKE_GT
) }, { .start = (0x16e00), .end = (0x1ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x20000), .end = (0x21fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24000), .end = (0x2417f), .domains = (0) }
, { .start = (0x24180), .end = (0x249ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24a00), .end = (0x251ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x25200), .end = (0x25fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x26000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x40000), .end = (0x1bffff), .domains = (0) }
, { .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1c4000), .end = (0x1c7fff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }, { .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }, { .start = (0x1cc000), .end = (0x1ccfff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1cd000), .end = (0x1cdfff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1ce000), .end = (0x1cefff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1cf000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1d4000), .end = (0x1d7fff), .domains = (FORCEWAKE_MEDIA_VDBOX3
) }, { .start = (0x1d8000), .end = (0x1dffff), .domains = (FORCEWAKE_MEDIA_VEBOX1
) }, { .start = (0x1e0000), .end = (0x1e3fff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1e4000), .end = (0x1e7fff), .domains = (FORCEWAKE_MEDIA_VDBOX5
) }, { .start = (0x1e8000), .end = (0x1effff), .domains = (FORCEWAKE_MEDIA_VEBOX2
) }, { .start = (0x1f0000), .end = (0x1f3fff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1f4000), .end = (0x1f7fff), .domains = (FORCEWAKE_MEDIA_VDBOX7
) }, { .start = (0x1f8000), .end = (0x1fa0ff), .domains = (FORCEWAKE_MEDIA_VEBOX3
) },
1612};
1613
1614static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1615 XEHP_FWRANGES(FORCEWAKE_RENDER){ .start = (0x0), .end = (0x1fff), .domains = (0) }, { .start
= (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER) }
, { .start = (0x2700), .end = (0x4aff), .domains = (FORCEWAKE_GT
) }, { .start = (0x4b00), .end = (0x51ff), .domains = (0) }, {
.start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }, { .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8160), .end = (0x81ff), .domains = (0) }, {
.start = (0x8200), .end = (0x82ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x8500), .end = (0x8cff), .domains = (FORCEWAKE_GT
) }, { .start = (0x8d00), .end = (0x8fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9000), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }, { .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9560), .end = (0x967f), .domains = (0) }, {
.start = (0x9680), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x9800), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }, { .start = (0xd000), .end = (0xd7ff), .domains = (0) }, {
.start = (0xd800), .end = (0xd87f), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xd880), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }, { .start = (0xdc00), .end = (0xdcff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xdd00), .end = (0xde7f), .domains = (FORCEWAKE_GT
) }, { .start = (0xde80), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0xe900), .end = (0xffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x10000), .end = (0x12fff), .domains = (0) }
, { .start = (0x13000), .end = (0x131ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x13200), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14000), .end = (0x141ff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x14200), .end = (0x143ff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x14400), .end = (0x145ff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x14600), .end = (0x147ff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x14800), .end = (0x14fff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x15000), .end = (0x16dff), .domains = (FORCEWAKE_GT
) }, { .start = (0x16e00), .end = (0x1ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x20000), .end = (0x21fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24000), .end = (0x2417f), .domains = (0) }
, { .start = (0x24180), .end = (0x249ff), .domains = (FORCEWAKE_GT
) }, { .start = (0x24a00), .end = (0x251ff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x25200), .end = (0x25fff), .domains = (FORCEWAKE_GT
) }, { .start = (0x26000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }, { .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }, { .start = (0x40000), .end = (0x1bffff), .domains = (0) }
, { .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1c4000), .end = (0x1c7fff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }, { .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }, { .start = (0x1cc000), .end = (0x1ccfff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }, { .start = (0x1cd000), .end = (0x1cdfff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1ce000), .end = (0x1cefff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1cf000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }, { .start = (0x1d4000), .end = (0x1d7fff), .domains = (FORCEWAKE_MEDIA_VDBOX3
) }, { .start = (0x1d8000), .end = (0x1dffff), .domains = (FORCEWAKE_MEDIA_VEBOX1
) }, { .start = (0x1e0000), .end = (0x1e3fff), .domains = (FORCEWAKE_MEDIA_VDBOX4
) }, { .start = (0x1e4000), .end = (0x1e7fff), .domains = (FORCEWAKE_MEDIA_VDBOX5
) }, { .start = (0x1e8000), .end = (0x1effff), .domains = (FORCEWAKE_MEDIA_VEBOX2
) }, { .start = (0x1f0000), .end = (0x1f3fff), .domains = (FORCEWAKE_MEDIA_VDBOX6
) }, { .start = (0x1f4000), .end = (0x1f7fff), .domains = (FORCEWAKE_MEDIA_VDBOX7
) }, { .start = (0x1f8000), .end = (0x1fa0ff), .domains = (FORCEWAKE_MEDIA_VEBOX3
) },
1616};
1617
1618static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1619 GEN_FW_RANGE(0x0, 0xaff, 0){ .start = (0x0), .end = (0xaff), .domains = (0) },
1620 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT){ .start = (0xb00), .end = (0xbff), .domains = (FORCEWAKE_GT)
}
,
1621 GEN_FW_RANGE(0xc00, 0xfff, 0){ .start = (0xc00), .end = (0xfff), .domains = (0) },
1622 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT){ .start = (0x1000), .end = (0x1fff), .domains = (FORCEWAKE_GT
) }
,
1623 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1624 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_GT
) }
,
1625 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1626 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT){ .start = (0x4000), .end = (0x813f), .domains = (FORCEWAKE_GT
) }
, /*
1627 0x4000 - 0x4aff: gt
1628 0x4b00 - 0x4fff: reserved
1629 0x5000 - 0x51ff: gt
1630 0x5200 - 0x52ff: reserved
1631 0x5300 - 0x53ff: gt
1632 0x5400 - 0x7fff: reserved
1633 0x8000 - 0x813f: gt */
1634 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x817f), .domains = (FORCEWAKE_RENDER
) }
,
1635 GEN_FW_RANGE(0x8180, 0x81ff, 0){ .start = (0x8180), .end = (0x81ff), .domains = (0) },
1636 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT){ .start = (0x8200), .end = (0x94cf), .domains = (FORCEWAKE_GT
) }
, /*
1637 0x8200 - 0x82ff: gt
1638 0x8300 - 0x84ff: reserved
1639 0x8500 - 0x887f: gt
1640 0x8880 - 0x8a7f: reserved
1641 0x8a80 - 0x8aff: gt
1642 0x8b00 - 0x8fff: reserved
1643 0x9000 - 0x947f: gt
1644 0x9480 - 0x94cf: reserved */
1645 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER){ .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }
,
1646 GEN_FW_RANGE(0x9560, 0x967f, 0){ .start = (0x9560), .end = (0x967f), .domains = (0) }, /*
1647 0x9560 - 0x95ff: always on
1648 0x9600 - 0x967f: reserved */
1649 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER){ .start = (0x9680), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
) }
, /*
1650 0x9680 - 0x96ff: render
1651 0x9700 - 0x97ff: reserved */
1652 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT){ .start = (0x9800), .end = (0xcfff), .domains = (FORCEWAKE_GT
) }
, /*
1653 0x9800 - 0xb4ff: gt
1654 0xb500 - 0xbfff: reserved
1655 0xc000 - 0xcfff: gt */
1656 GEN_FW_RANGE(0xd000, 0xd3ff, 0){ .start = (0xd000), .end = (0xd3ff), .domains = (0) },
1657 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT){ .start = (0xd400), .end = (0xdbff), .domains = (FORCEWAKE_GT
) }
,
1658 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER){ .start = (0xdc00), .end = (0xdcff), .domains = (FORCEWAKE_RENDER
) }
,
1659 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT){ .start = (0xdd00), .end = (0xde7f), .domains = (FORCEWAKE_GT
) }
, /*
1660 0xdd00 - 0xddff: gt
1661 0xde00 - 0xde7f: reserved */
1662 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xde80), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
, /*
1663 0xde80 - 0xdeff: render
1664 0xdf00 - 0xe1ff: reserved
1665 0xe200 - 0xe7ff: render
1666 0xe800 - 0xe8ff: reserved */
1667 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT){ .start = (0xe900), .end = (0x11fff), .domains = (FORCEWAKE_GT
) }
, /*
1668 0xe900 - 0xe9ff: gt
1669 0xea00 - 0xebff: reserved
1670 0xec00 - 0xffff: gt
1671 0x10000 - 0x11fff: reserved */
1672 GEN_FW_RANGE(0x12000, 0x12fff, 0){ .start = (0x12000), .end = (0x12fff), .domains = (0) }, /*
1673 0x12000 - 0x127ff: always on
1674 0x12800 - 0x12fff: reserved */
1675 GEN_FW_RANGE(0x13000, 0x23fff, FORCEWAKE_GT){ .start = (0x13000), .end = (0x23fff), .domains = (FORCEWAKE_GT
) }
, /*
1676 0x13000 - 0x135ff: gt
1677 0x13600 - 0x147ff: reserved
1678 0x14800 - 0x153ff: gt
1679 0x15400 - 0x19fff: reserved
1680 0x1a000 - 0x1ffff: gt
1681 0x20000 - 0x21fff: reserved
1682 0x22000 - 0x23fff: gt */
1683 GEN_FW_RANGE(0x24000, 0x2417f, 0){ .start = (0x24000), .end = (0x2417f), .domains = (0) }, /*
1684 24000 - 0x2407f: always on
1685 24080 - 0x2417f: reserved */
1686 GEN_FW_RANGE(0x24180, 0x3ffff, FORCEWAKE_GT){ .start = (0x24180), .end = (0x3ffff), .domains = (FORCEWAKE_GT
) }
, /*
1687 0x24180 - 0x241ff: gt
1688 0x24200 - 0x251ff: reserved
1689 0x25200 - 0x252ff: gt
1690 0x25300 - 0x25fff: reserved
1691 0x26000 - 0x27fff: gt
1692 0x28000 - 0x2ffff: reserved
1693 0x30000 - 0x3ffff: gt */
1694 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) },
1695 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
, /*
1696 0x1c0000 - 0x1c2bff: VD0
1697 0x1c2c00 - 0x1c2cff: reserved
1698 0x1c2d00 - 0x1c2dff: VD0
1699 0x1c2e00 - 0x1c3eff: reserved
1700 0x1c3f00 - 0x1c3fff: VD0 */
1701 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1){ .start = (0x1c4000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }
, /*
1702 0x1c4000 - 0x1c6aff: VD1
1703 0x1c6b00 - 0x1c7eff: reserved
1704 0x1c7f00 - 0x1c7fff: VD1
1705 0x1c8000 - 0x1cffff: reserved */
1706 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x23ffff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
, /*
1707 0x1d0000 - 0x1d2aff: VD2
1708 0x1d2b00 - 0x1d3eff: reserved
1709 0x1d3f00 - 0x1d3fff: VD2
1710 0x1d4000 - 0x23ffff: reserved */
1711 GEN_FW_RANGE(0x240000, 0x3dffff, 0){ .start = (0x240000), .end = (0x3dffff), .domains = (0) },
1712 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT){ .start = (0x3e0000), .end = (0x3effff), .domains = (FORCEWAKE_GT
) }
,
1713};
1714
1715static void
1716ilk_dummy_write(struct intel_uncore *uncore)
1717{
1718 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1719 * the chip from rc6 before touching it for real. MI_MODE is masked,
1720 * hence harmless to write 0 into. */
1721 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x9c) }), 0);
1722}
1723
1724static void
1725__unclaimed_reg_debug(struct intel_uncore *uncore,
1726 const i915_reg_t reg,
1727 const bool_Bool read)
1728{
1729 if (drm_WARN(&uncore->i915->drm,({ int __ret = !!(check_for_unclaimed_mmio(uncore)); if (__ret
) printf("%s %s: " "Unclaimed %s register 0x%x\n", dev_driver_string
((&uncore->i915->drm)->dev), "", read ? "read from"
: "write to", i915_mmio_reg_offset(reg)); __builtin_expect(!
!(__ret), 0); })
1730 check_for_unclaimed_mmio(uncore),({ int __ret = !!(check_for_unclaimed_mmio(uncore)); if (__ret
) printf("%s %s: " "Unclaimed %s register 0x%x\n", dev_driver_string
((&uncore->i915->drm)->dev), "", read ? "read from"
: "write to", i915_mmio_reg_offset(reg)); __builtin_expect(!
!(__ret), 0); })
1731 "Unclaimed %s register 0x%x\n",({ int __ret = !!(check_for_unclaimed_mmio(uncore)); if (__ret
) printf("%s %s: " "Unclaimed %s register 0x%x\n", dev_driver_string
((&uncore->i915->drm)->dev), "", read ? "read from"
: "write to", i915_mmio_reg_offset(reg)); __builtin_expect(!
!(__ret), 0); })
1732 read ? "read from" : "write to",({ int __ret = !!(check_for_unclaimed_mmio(uncore)); if (__ret
) printf("%s %s: " "Unclaimed %s register 0x%x\n", dev_driver_string
((&uncore->i915->drm)->dev), "", read ? "read from"
: "write to", i915_mmio_reg_offset(reg)); __builtin_expect(!
!(__ret), 0); })
1733 i915_mmio_reg_offset(reg))({ int __ret = !!(check_for_unclaimed_mmio(uncore)); if (__ret
) printf("%s %s: " "Unclaimed %s register 0x%x\n", dev_driver_string
((&uncore->i915->drm)->dev), "", read ? "read from"
: "write to", i915_mmio_reg_offset(reg)); __builtin_expect(!
!(__ret), 0); })
)
1734 /* Only report the first N failures */
1735 uncore->i915->params.mmio_debug--;
1736}
1737
1738static void
1739__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1740 const i915_reg_t reg,
1741 const bool_Bool read)
1742{
1743 if (check_for_unclaimed_mmio(uncore))
1744 drm_dbg(&uncore->i915->drm,__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed access detected before %s register 0x%x\n", read
? "read from" : "write to", i915_mmio_reg_offset(reg))
1745 "Unclaimed access detected before %s register 0x%x\n",__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed access detected before %s register 0x%x\n", read
? "read from" : "write to", i915_mmio_reg_offset(reg))
1746 read ? "read from" : "write to",__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed access detected before %s register 0x%x\n", read
? "read from" : "write to", i915_mmio_reg_offset(reg))
1747 i915_mmio_reg_offset(reg))__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed access detected before %s register 0x%x\n", read
? "read from" : "write to", i915_mmio_reg_offset(reg))
;
1748}
1749
1750static inline void
1751unclaimed_reg_debug(struct intel_uncore *uncore,
1752 const i915_reg_t reg,
1753 const bool_Bool read,
1754 const bool_Bool before)
1755{
1756 if (likely(!uncore->i915->params.mmio_debug)__builtin_expect(!!(!uncore->i915->params.mmio_debug), 1
)
|| !uncore->debug)
1757 return;
1758
1759 /* interrupts are disabled and re-enabled around uncore->lock usage */
1760 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
1761
1762 if (before) {
1763 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
1764 __unclaimed_previous_reg_debug(uncore, reg, read);
1765 } else {
1766 __unclaimed_reg_debug(uncore, reg, read);
1767 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
1768 }
1769}
1770
1771#define __vgpu_read(x)static ux vgpu_readx(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { ux val = __raw_uncore_readx(uncore, reg); ; return
val; }
\
1772static u##x \
1773vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1774 u##x val = __raw_uncore_read##x(uncore, reg); \
1775 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1776 return val; \
1777}
1778__vgpu_read(8)static u8 vgpu_read8(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { u8 val = __raw_uncore_read8(uncore, reg); ; return
val; }
1779__vgpu_read(16)static u16 vgpu_read16(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u16 val = __raw_uncore_read16(uncore, reg
); ; return val; }
1780__vgpu_read(32)static u32 vgpu_read32(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u32 val = __raw_uncore_read32(uncore, reg
); ; return val; }
1781__vgpu_read(64)static u64 vgpu_read64(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u64 val = __raw_uncore_read64(uncore, reg
); ; return val; }
1782
1783#define GEN2_READ_HEADER(x) \
1784 u##x val = 0; \
1785 assert_rpm_wakelock_held(uncore->rpm);
1786
1787#define GEN2_READ_FOOTER \
1788 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1789 return val
1790
1791#define __gen2_read(x) \
1792static u##x \
1793gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1794 GEN2_READ_HEADER(x); \
1795 val = __raw_uncore_read##x(uncore, reg); \
1796 GEN2_READ_FOOTER; \
1797}
1798
1799#define __gen5_read(x) \
1800static u##x \
1801gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1802 GEN2_READ_HEADER(x); \
1803 ilk_dummy_write(uncore); \
1804 val = __raw_uncore_read##x(uncore, reg); \
1805 GEN2_READ_FOOTER; \
1806}
1807
1808__gen5_read(8)
1809__gen5_read(16)
1810__gen5_read(32)
1811__gen5_read(64)
1812__gen2_read(8)
1813__gen2_read(16)
1814__gen2_read(32)
1815__gen2_read(64)
1816
1817#undef __gen5_read
1818#undef __gen2_read
1819
1820#undef GEN2_READ_FOOTER
1821#undef GEN2_READ_HEADER
1822
1823#define GEN6_READ_HEADER(x) \
1824 u32 offset = i915_mmio_reg_offset(reg); \
1825 unsigned long irqflags; \
1826 u##x val = 0; \
1827 assert_rpm_wakelock_held(uncore->rpm); \
1828 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
; \
1829 unclaimed_reg_debug(uncore, reg, true1, true1)
1830
1831#define GEN6_READ_FOOTER \
1832 unclaimed_reg_debug(uncore, reg, true1, false0); \
1833 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
; \
1834 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1835 return val
1836
1837static noinline__attribute__((__noinline__)) void ___force_wake_auto(struct intel_uncore *uncore,
1838 enum forcewake_domains fw_domains)
1839{
1840 struct intel_uncore_forcewake_domain *domain;
1841 unsigned int tmp;
1842
1843 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
1844
1845 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
1846 fw_domain_arm_timer(domain);
1847
1848 fw_domains_get(uncore, fw_domains);
1849}
1850
1851static inline void __force_wake_auto(struct intel_uncore *uncore,
1852 enum forcewake_domains fw_domains)
1853{
1854 GEM_BUG_ON(!fw_domains)((void)0);
1855
1856 /* Turn on all requested but inactive supported forcewake domains. */
1857 fw_domains &= uncore->fw_domains;
1858 fw_domains &= ~uncore->fw_domains_active;
1859
1860 if (fw_domains)
1861 ___force_wake_auto(uncore, fw_domains);
1862}
1863
1864#define __gen_fwtable_read(x) \
1865static u##x \
1866fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) \
1867{ \
1868 enum forcewake_domains fw_engine; \
1869 GEN6_READ_HEADER(x); \
1870 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; if (({ u32 __reg = ((offset
)); __reg < 0x40000 || __reg >= 0x1c0000; })) __fwd = find_fw_domain
(uncore, offset); __fwd; })
; \
1871 if (fw_engine) \
1872 __force_wake_auto(uncore, fw_engine); \
1873 val = __raw_uncore_read##x(uncore, reg); \
1874 GEN6_READ_FOOTER; \
1875}
1876
1877static enum forcewake_domains
1878fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1879 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg))({ enum forcewake_domains __fwd = 0; if (({ u32 __reg = ((i915_mmio_reg_offset
(reg))); __reg < 0x40000 || __reg >= 0x1c0000; })) __fwd
= find_fw_domain(uncore, i915_mmio_reg_offset(reg)); __fwd; }
)
;
1880}
1881
1882__gen_fwtable_read(8)
1883__gen_fwtable_read(16)
1884__gen_fwtable_read(32)
1885__gen_fwtable_read(64)
1886
1887#undef __gen_fwtable_read
1888#undef GEN6_READ_FOOTER
1889#undef GEN6_READ_HEADER
1890
1891#define GEN2_WRITE_HEADER \
1892 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1893 assert_rpm_wakelock_held(uncore->rpm); \
1894
1895#define GEN2_WRITE_FOOTER
1896
1897#define __gen2_write(x) \
1898static void \
1899gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1900 GEN2_WRITE_HEADER; \
1901 __raw_uncore_write##x(uncore, reg, val); \
1902 GEN2_WRITE_FOOTER; \
1903}
1904
1905#define __gen5_write(x) \
1906static void \
1907gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1908 GEN2_WRITE_HEADER; \
1909 ilk_dummy_write(uncore); \
1910 __raw_uncore_write##x(uncore, reg, val); \
1911 GEN2_WRITE_FOOTER; \
1912}
1913
1914__gen5_write(8)
1915__gen5_write(16)
1916__gen5_write(32)
1917__gen2_write(8)
1918__gen2_write(16)
1919__gen2_write(32)
1920
1921#undef __gen5_write
1922#undef __gen2_write
1923
1924#undef GEN2_WRITE_FOOTER
1925#undef GEN2_WRITE_HEADER
1926
1927#define GEN6_WRITE_HEADER \
1928 u32 offset = i915_mmio_reg_offset(reg); \
1929 unsigned long irqflags; \
1930 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1931 assert_rpm_wakelock_held(uncore->rpm); \
1932 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
; \
1933 unclaimed_reg_debug(uncore, reg, false0, true1)
1934
1935#define GEN6_WRITE_FOOTER \
1936 unclaimed_reg_debug(uncore, reg, false0, false0); \
1937 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
1938
1939#define __gen6_write(x)static void gen6_writex(struct intel_uncore *uncore, i915_reg_t
reg, ux val, _Bool trace) { GEN6_WRITE_HEADER; if (({ u32 __reg
= (offset); __reg < 0x40000 || __reg >= 0x1c0000; })) __gen6_gt_wait_for_fifo
(uncore); __raw_uncore_writex(uncore, reg, val); GEN6_WRITE_FOOTER
; }
\
1940static void \
1941gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1942 GEN6_WRITE_HEADER; \
1943 if (NEEDS_FORCE_WAKE(offset)({ u32 __reg = (offset); __reg < 0x40000 || __reg >= 0x1c0000
; })
) \
1944 __gen6_gt_wait_for_fifo(uncore); \
1945 __raw_uncore_write##x(uncore, reg, val); \
1946 GEN6_WRITE_FOOTER; \
1947}
1948__gen6_write(8)static void gen6_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { GEN6_WRITE_HEADER; if (({ u32 __reg
= (offset); __reg < 0x40000 || __reg >= 0x1c0000; })) __gen6_gt_wait_for_fifo
(uncore); __raw_uncore_write8(uncore, reg, val); GEN6_WRITE_FOOTER
; }
1949__gen6_write(16)static void gen6_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { GEN6_WRITE_HEADER; if (({ u32 __reg
= (offset); __reg < 0x40000 || __reg >= 0x1c0000; })) __gen6_gt_wait_for_fifo
(uncore); __raw_uncore_write16(uncore, reg, val); GEN6_WRITE_FOOTER
; }
1950__gen6_write(32)static void gen6_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { GEN6_WRITE_HEADER; if (({ u32 __reg
= (offset); __reg < 0x40000 || __reg >= 0x1c0000; })) __gen6_gt_wait_for_fifo
(uncore); __raw_uncore_write32(uncore, reg, val); GEN6_WRITE_FOOTER
; }
1951
1952#define __gen_fwtable_write(x) \
1953static void \
1954fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1955 enum forcewake_domains fw_engine; \
1956 GEN6_WRITE_HEADER; \
1957 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; const u32 __offset = (offset
); if (({ u32 __reg = ((__offset)); __reg < 0x40000 || __reg
>= 0x1c0000; }) && !is_shadowed(uncore, __offset)
) __fwd = find_fw_domain(uncore, __offset); __fwd; })
; \
1958 if (fw_engine) \
1959 __force_wake_auto(uncore, fw_engine); \
1960 __raw_uncore_write##x(uncore, reg, val); \
1961 GEN6_WRITE_FOOTER; \
1962}
1963
1964static enum forcewake_domains
1965fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1966{
1967 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg))({ enum forcewake_domains __fwd = 0; const u32 __offset = (i915_mmio_reg_offset
(reg)); if (({ u32 __reg = ((__offset)); __reg < 0x40000 ||
__reg >= 0x1c0000; }) && !is_shadowed(uncore, __offset
)) __fwd = find_fw_domain(uncore, __offset); __fwd; })
;
1968}
1969
1970__gen_fwtable_write(8)
1971__gen_fwtable_write(16)
1972__gen_fwtable_write(32)
1973
1974#undef __gen_fwtable_write
1975#undef GEN6_WRITE_FOOTER
1976#undef GEN6_WRITE_HEADER
1977
1978#define __vgpu_write(x)static void vgpu_writex(struct intel_uncore *uncore, i915_reg_t
reg, ux val, _Bool trace) { ; __raw_uncore_writex(uncore, reg
, val); }
\
1979static void \
1980vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1981 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1982 __raw_uncore_write##x(uncore, reg, val); \
1983}
1984__vgpu_write(8)static void vgpu_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { ; __raw_uncore_write8(uncore, reg
, val); }
1985__vgpu_write(16)static void vgpu_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { ; __raw_uncore_write16(uncore, reg
, val); }
1986__vgpu_write(32)static void vgpu_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { ; __raw_uncore_write32(uncore, reg
, val); }
1987
1988#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_writeb = x_write8; (uncore)->
funcs.mmio_writew = x_write16; (uncore)->funcs.mmio_writel
= x_write32; } while (0)
\
1989do { \
1990 (uncore)->funcs.mmio_writeb = x##_write8; \
1991 (uncore)->funcs.mmio_writew = x##_write16; \
1992 (uncore)->funcs.mmio_writel = x##_write32; \
1993} while (0)
1994
1995#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->funcs
.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0)
\
1996do { \
1997 (uncore)->funcs.mmio_readb = x##_read8; \
1998 (uncore)->funcs.mmio_readw = x##_read16; \
1999 (uncore)->funcs.mmio_readl = x##_read32; \
2000 (uncore)->funcs.mmio_readq = x##_read64; \
2001} while (0)
2002
2003#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x)do { do { ((uncore))->funcs.mmio_writeb = x_write8; ((uncore
))->funcs.mmio_writew = x_write16; ((uncore))->funcs.mmio_writel
= x_write32; } while (0); (uncore)->funcs.write_fw_domains
= x_reg_write_fw_domains; } while (0)
\
2004do { \
2005 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x)do { ((uncore))->funcs.mmio_writeb = x_write8; ((uncore))->
funcs.mmio_writew = x_write16; ((uncore))->funcs.mmio_writel
= x_write32; } while (0)
; \
2006 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2007} while (0)
2008
2009#define ASSIGN_READ_MMIO_VFUNCS(uncore, x)do { do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->
funcs.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0); (uncore
)->funcs.read_fw_domains = x_reg_read_fw_domains; } while (
0)
\
2010do { \
2011 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->funcs
.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0)
; \
2012 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2013} while (0)
2014
2015static int __fw_domain_init(struct intel_uncore *uncore,
2016 enum forcewake_domain_id domain_id,
2017 i915_reg_t reg_set,
2018 i915_reg_t reg_ack)
2019{
2020 struct intel_uncore_forcewake_domain *d;
2021
2022 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT)((void)0);
2023 GEM_BUG_ON(uncore->fw_domain[domain_id])((void)0);
2024
2025 if (i915_inject_probe_failure(uncore->i915)({ ((void)0); 0; }))
2026 return -ENOMEM12;
2027
2028 d = kzalloc(sizeof(*d), GFP_KERNEL(0x0001 | 0x0004));
2029 if (!d)
2030 return -ENOMEM12;
2031
2032 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set))({ int __ret = !!((!i915_mmio_reg_valid(reg_set))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "!i915_mmio_reg_valid(reg_set)"
")"); __builtin_expect(!!(__ret), 0); })
;
2033 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack))({ int __ret = !!((!i915_mmio_reg_valid(reg_ack))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "!i915_mmio_reg_valid(reg_ack)"
")"); __builtin_expect(!!(__ret), 0); })
;
2034
2035 d->uncore = uncore;
2036 d->wake_count = 0;
2037 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2038 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2039
2040 d->id = domain_id;
2041
2042 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER))extern char _ctassert[(!(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER
))) ? 1 : -1 ] __attribute__((__unused__))
;
2043 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT))extern char _ctassert[(!(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT
))) ? 1 : -1 ] __attribute__((__unused__))
;
2044 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA))extern char _ctassert[(!(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA
))) ? 1 : -1 ] __attribute__((__unused__))
;
2045 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX0 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX0))) ? 1 : -1 ] __attribute__((__unused__
))
;
2046 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX1 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX1))) ? 1 : -1 ] __attribute__((__unused__
))
;
2047 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX2 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX2))) ? 1 : -1 ] __attribute__((__unused__
))
;
2048 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX3 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX3))) ? 1 : -1 ] __attribute__((__unused__
))
;
2049 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX4 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX4))) ? 1 : -1 ] __attribute__((__unused__
))
;
2050 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX5 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX5))) ? 1 : -1 ] __attribute__((__unused__
))
;
2051 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX6 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX6))) ? 1 : -1 ] __attribute__((__unused__
))
;
2052 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX7 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX7))) ? 1 : -1 ] __attribute__((__unused__
))
;
2053 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX0 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX0))) ? 1 : -1 ] __attribute__((__unused__
))
;
2054 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX1 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX1))) ? 1 : -1 ] __attribute__((__unused__
))
;
2055 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX2 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX2))) ? 1 : -1 ] __attribute__((__unused__
))
;
2056 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX3 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX3))) ? 1 : -1 ] __attribute__((__unused__
))
;
2057
2058 d->mask = BIT(domain_id)(1UL << (domain_id));
2059
2060#ifdef __linux__
2061 hrtimer_init(&d->timer, CLOCK_MONOTONIC3, HRTIMER_MODE_REL1);
2062 d->timer.function = intel_uncore_fw_release_timer;
2063#else
2064 timeout_set(&d->timer, intel_uncore_fw_release_timer, d);
2065#endif
2066
2067 uncore->fw_domains |= BIT(domain_id)(1UL << (domain_id));
2068
2069 fw_domain_reset(d);
2070
2071 uncore->fw_domain[domain_id] = d;
2072
2073 return 0;
2074}
2075
2076static void fw_domain_fini(struct intel_uncore *uncore,
2077 enum forcewake_domain_id domain_id)
2078{
2079 struct intel_uncore_forcewake_domain *d;
2080
2081 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT)((void)0);
2082
2083 d = fetch_and_zero(&uncore->fw_domain[domain_id])({ typeof(*&uncore->fw_domain[domain_id]) __T = *(&
uncore->fw_domain[domain_id]); *(&uncore->fw_domain
[domain_id]) = (typeof(*&uncore->fw_domain[domain_id])
)0; __T; })
;
2084 if (!d)
2085 return;
2086
2087 uncore->fw_domains &= ~BIT(domain_id)(1UL << (domain_id));
2088 drm_WARN_ON(&uncore->i915->drm, d->wake_count)({ int __ret = !!((d->wake_count)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&uncore->i915->drm))->
dev), "", "drm_WARN_ON(" "d->wake_count" ")"); __builtin_expect
(!!(__ret), 0); })
;
2089 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer))({ int __ret = !!((timeout_del_barrier(&d->timer))); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&uncore
->i915->drm))->dev), "", "drm_WARN_ON(" "timeout_del_barrier(&d->timer)"
")"); __builtin_expect(!!(__ret), 0); })
;
2090 kfree(d);
2091}
2092
2093static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2094{
2095 struct intel_uncore_forcewake_domain *d;
2096 int tmp;
2097
2098 for_each_fw_domain(d, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(d = (uncore
)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL
<< (__idx)); __idx; })])) {} else
2099 fw_domain_fini(uncore, d->id);
2100}
2101
2102static const struct intel_uncore_fw_get uncore_get_fallback = {
2103 .force_wake_get = fw_domains_get_with_fallback
2104};
2105
2106static const struct intel_uncore_fw_get uncore_get_normal = {
2107 .force_wake_get = fw_domains_get_normal,
2108};
2109
2110static const struct intel_uncore_fw_get uncore_get_thread_status = {
2111 .force_wake_get = fw_domains_get_with_thread_status
2112};
2113
2114static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2115{
2116 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
2117 int ret = 0;
2118
2119 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
2120
2121#define fw_domain_init(uncore__, id__, set__, ack__) \
2122 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2123
2124 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 11) {
2125 /* we'll prune the domains of missing engines later */
2126 intel_engine_mask_t emask = RUNTIME_INFO(i915)(&(i915)->__runtime)->platform_engine_mask;
2127 int i;
2128
2129 uncore->fw_get_funcs = &uncore_get_fallback;
2130 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2131 FORCEWAKE_RENDER_GEN9((const i915_reg_t){ .reg = (0xa278) }),
2132 FORCEWAKE_ACK_RENDER_GEN9((const i915_reg_t){ .reg = (0xd84) }));
2133 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2134 FORCEWAKE_GT_GEN9((const i915_reg_t){ .reg = (0xa188) }),
2135 FORCEWAKE_ACK_GT_GEN9((const i915_reg_t){ .reg = (0x130044) }));
2136
2137 for (i = 0; i < I915_MAX_VCS8; i++) {
2138 if (!__HAS_ENGINE(emask, _VCS(i))((emask) & (1UL << ((VCS0 + (i))))))
2139 continue;
2140
2141 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2142 FORCEWAKE_MEDIA_VDBOX_GEN11(i)((const i915_reg_t){ .reg = (0xa540 + (i) * 4) }),
2143 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)((const i915_reg_t){ .reg = (0xd50 + (i) * 4) }));
2144 }
2145 for (i = 0; i < I915_MAX_VECS4; i++) {
2146 if (!__HAS_ENGINE(emask, _VECS(i))((emask) & (1UL << ((VECS0 + (i))))))
2147 continue;
2148
2149 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2150 FORCEWAKE_MEDIA_VEBOX_GEN11(i)((const i915_reg_t){ .reg = (0xa560 + (i) * 4) }),
2151 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)((const i915_reg_t){ .reg = (0xd70 + (i) * 4) }));
2152 }
2153 } else if (IS_GRAPHICS_VER(i915, 9, 10)(((&(i915)->__runtime)->graphics.ip.ver) >= (9) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (10)
)
) {
2154 uncore->fw_get_funcs = &uncore_get_fallback;
2155 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2156 FORCEWAKE_RENDER_GEN9((const i915_reg_t){ .reg = (0xa278) }),
2157 FORCEWAKE_ACK_RENDER_GEN9((const i915_reg_t){ .reg = (0xd84) }));
2158 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2159 FORCEWAKE_GT_GEN9((const i915_reg_t){ .reg = (0xa188) }),
2160 FORCEWAKE_ACK_GT_GEN9((const i915_reg_t){ .reg = (0x130044) }));
2161 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2162 FORCEWAKE_MEDIA_GEN9((const i915_reg_t){ .reg = (0xa270) }), FORCEWAKE_ACK_MEDIA_GEN9((const i915_reg_t){ .reg = (0xd88) }));
2163 } else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) {
2164 uncore->fw_get_funcs = &uncore_get_normal;
2165 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2166 FORCEWAKE_VLV((const i915_reg_t){ .reg = (0x1300b0) }), FORCEWAKE_ACK_VLV((const i915_reg_t){ .reg = (0x1300b4) }));
2167 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2168 FORCEWAKE_MEDIA_VLV((const i915_reg_t){ .reg = (0x1300b8) }), FORCEWAKE_ACK_MEDIA_VLV((const i915_reg_t){ .reg = (0x1300bc) }));
2169 } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) {
2170 uncore->fw_get_funcs = &uncore_get_thread_status;
2171 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2172 FORCEWAKE_MT((const i915_reg_t){ .reg = (0xa188) }), FORCEWAKE_ACK_HSW((const i915_reg_t){ .reg = (0x130044) }));
2173 } else if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) {
2174 u32 ecobus;
2175
2176 /* IVB configs may use multi-threaded forcewake */
2177
2178 /* A small trick here - if the bios hasn't configured
2179 * MT forcewake, and if the device is in RC6, then
2180 * force_wake_mt_get will not wake the device and the
2181 * ECOBUS read will return zero. Which will be
2182 * (correctly) interpreted by the test below as MT
2183 * forcewake being disabled.
2184 */
2185 uncore->fw_get_funcs = &uncore_get_thread_status;
2186
2187 /* We need to init first for ECOBUS access and then
2188 * determine later if we want to reinit, in case of MT access is
2189 * not working. In this stage we don't know which flavour this
2190 * ivb is, so it is better to reset also the gen6 fw registers
2191 * before the ecobus check.
2192 */
2193
2194 __raw_uncore_write32(uncore, FORCEWAKE((const i915_reg_t){ .reg = (0xa18c) }), 0);
2195 __raw_posting_read(uncore, ECOBUS)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
(0xa180) })))
;
2196
2197 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2198 FORCEWAKE_MT((const i915_reg_t){ .reg = (0xa188) }), FORCEWAKE_MT_ACK((const i915_reg_t){ .reg = (0x130040) }));
2199 if (ret)
2200 goto out;
2201
2202 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
2203 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2204 ecobus = __raw_uncore_read32(uncore, ECOBUS((const i915_reg_t){ .reg = (0xa180) }));
2205 fw_domains_put(uncore, FORCEWAKE_RENDER);
2206 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
2207
2208 if (!(ecobus & FORCEWAKE_MT_ENABLE(1 << 5))) {
2209 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n")do { } while(0);
2210 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n")do { } while(0);
2211 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2212 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2213 FORCEWAKE((const i915_reg_t){ .reg = (0xa18c) }), FORCEWAKE_ACK((const i915_reg_t){ .reg = (0x130090) }));
2214 }
2215 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) {
2216 uncore->fw_get_funcs = &uncore_get_thread_status;
2217 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2218 FORCEWAKE((const i915_reg_t){ .reg = (0xa18c) }), FORCEWAKE_ACK((const i915_reg_t){ .reg = (0x130090) }));
2219 }
2220
2221#undef fw_domain_init
2222
2223 /* All future platforms are expected to require complex power gating */
2224 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0)({ int __ret = !!((!ret && uncore->fw_domains == 0
)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "!ret && uncore->fw_domains == 0"
")"); __builtin_expect(!!(__ret), 0); })
;
2225
2226out:
2227 if (ret)
2228 intel_uncore_fw_domains_fini(uncore);
2229
2230 return ret;
2231}
2232
2233#define ASSIGN_FW_DOMAINS_TABLE(uncore, d){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(d); (uncore)->fw_domains_table_entries = (sizeof(((d))
) / sizeof(((d))[0])); }
\
2234{ \
2235 (uncore)->fw_domains_table = \
2236 (struct intel_forcewake_range *)(d); \
2237 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d))(sizeof(((d))) / sizeof(((d))[0])); \
2238}
2239
2240#define ASSIGN_SHADOW_TABLE(uncore, d){ (uncore)->shadowed_reg_table = d; (uncore)->shadowed_reg_table_entries
= (sizeof(((d))) / sizeof(((d))[0])); }
\
2241{ \
2242 (uncore)->shadowed_reg_table = d; \
2243 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d))(sizeof(((d))) / sizeof(((d))[0])); \
2244}
2245
2246static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2247 unsigned long action, void *data)
2248{
2249 struct intel_uncore *uncore = container_of(nb,({ const __typeof( ((struct intel_uncore *)0)->pmic_bus_access_nb
) *__mptr = (nb); (struct intel_uncore *)( (char *)__mptr - __builtin_offsetof
(struct intel_uncore, pmic_bus_access_nb) );})
2250 struct intel_uncore, pmic_bus_access_nb)({ const __typeof( ((struct intel_uncore *)0)->pmic_bus_access_nb
) *__mptr = (nb); (struct intel_uncore *)( (char *)__mptr - __builtin_offsetof
(struct intel_uncore, pmic_bus_access_nb) );})
;
2251
2252 switch (action) {
2253 case MBI_PMIC_BUS_ACCESS_BEGIN1:
2254 /*
2255 * forcewake all now to make sure that we don't need to do a
2256 * forcewake later which on systems where this notifier gets
2257 * called requires the punit to access to the shared pmic i2c
2258 * bus, which will be busy after this notification, leading to:
2259 * "render: timed out waiting for forcewake ack request."
2260 * errors.
2261 *
2262 * The notifier is unregistered during intel_runtime_suspend(),
2263 * so it's ok to access the HW here without holding a RPM
2264 * wake reference -> disable wakeref asserts for the time of
2265 * the access.
2266 */
2267 disable_rpm_wakeref_asserts(uncore->rpm);
2268 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2269 enable_rpm_wakeref_asserts(uncore->rpm);
2270 break;
2271 case MBI_PMIC_BUS_ACCESS_END2:
2272 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2273 break;
2274 }
2275
2276 return NOTIFY_OK1;
2277}
2278
2279static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2280{
2281#ifdef __linux__
2282 iounmap(regs);
2283#endif
2284}
2285
2286int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2287{
2288 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
2289 int mmio_size;
2290
2291 /*
2292 * Before gen4, the registers and the GTT are behind different BARs.
2293 * However, from gen4 onwards, the registers and the GTT are shared
2294 * in the same BAR, so we want to restrict this ioremap from
2295 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2296 * the register BAR remains the same size for all the earlier
2297 * generations up to Ironlake.
2298 * For dgfx chips register range is expanded to 4MB, and this larger
2299 * range is also used for integrated gpus beginning with Meteor Lake.
2300 */
2301 if (IS_DGFX(i915)((&(i915)->__info)->is_dgfx) || GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8
| ((&(i915)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 70)((12) << 8 | (70)))
2302 mmio_size = 4 * 1024 * 1024;
2303 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 5)
2304 mmio_size = 2 * 1024 * 1024;
2305 else
2306 mmio_size = 512 * 1024;
Value stored to 'mmio_size' is never read
2307#ifdef __linux__
2308 uncore->regs = ioremap(phys_addr, mmio_size);
2309 if (uncore->regs == NULL((void *)0)) {
2310 drm_err(&i915->drm, "failed to map registers\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to map registers\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2311 return -EIO5;
2312 }
2313#endif
2314
2315 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
2316}
2317
2318void intel_uncore_init_early(struct intel_uncore *uncore,
2319 struct intel_gt *gt)
2320{
2321 mtx_init(&uncore->lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&uncore->
lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 :
((0x9)))); } while (0)
;
2322 uncore->i915 = gt->i915;
2323 uncore->gt = gt;
2324 uncore->rpm = &gt->i915->runtime_pm;
2325}
2326
2327static void uncore_raw_init(struct intel_uncore *uncore)
2328{
2329 GEM_BUG_ON(intel_uncore_has_forcewake(uncore))((void)0);
2330
2331 if (intel_vgpu_active(uncore->i915)) {
2332 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu)do { (uncore)->funcs.mmio_writeb = vgpu_write8; (uncore)->
funcs.mmio_writew = vgpu_write16; (uncore)->funcs.mmio_writel
= vgpu_write32; } while (0)
;
2333 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu)do { (uncore)->funcs.mmio_readb = vgpu_read8; (uncore)->
funcs.mmio_readw = vgpu_read16; (uncore)->funcs.mmio_readl
= vgpu_read32; (uncore)->funcs.mmio_readq = vgpu_read64; }
while (0)
;
2334 } else if (GRAPHICS_VER(uncore->i915)((&(uncore->i915)->__runtime)->graphics.ip.ver) == 5) {
2335 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5)do { (uncore)->funcs.mmio_writeb = gen5_write8; (uncore)->
funcs.mmio_writew = gen5_write16; (uncore)->funcs.mmio_writel
= gen5_write32; } while (0)
;
2336 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5)do { (uncore)->funcs.mmio_readb = gen5_read8; (uncore)->
funcs.mmio_readw = gen5_read16; (uncore)->funcs.mmio_readl
= gen5_read32; (uncore)->funcs.mmio_readq = gen5_read64; }
while (0)
;
2337 } else {
2338 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2)do { (uncore)->funcs.mmio_writeb = gen2_write8; (uncore)->
funcs.mmio_writew = gen2_write16; (uncore)->funcs.mmio_writel
= gen2_write32; } while (0)
;
2339 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2)do { (uncore)->funcs.mmio_readb = gen2_read8; (uncore)->
funcs.mmio_readw = gen2_read16; (uncore)->funcs.mmio_readl
= gen2_read32; (uncore)->funcs.mmio_readq = gen2_read64; }
while (0)
;
2340 }
2341}
2342
2343static int uncore_forcewake_init(struct intel_uncore *uncore)
2344{
2345 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
2346 int ret;
2347
2348 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
2349
2350 ret = intel_uncore_fw_domains_init(uncore);
2351 if (ret)
2352 return ret;
2353 forcewake_early_sanitize(uncore, 0);
2354
2355 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable)do { do { (uncore)->funcs.mmio_readb = fwtable_read8; (uncore
)->funcs.mmio_readw = fwtable_read16; (uncore)->funcs.mmio_readl
= fwtable_read32; (uncore)->funcs.mmio_readq = fwtable_read64
; } while (0); (uncore)->funcs.read_fw_domains = fwtable_reg_read_fw_domains
; } while (0)
;
2356
2357 if (GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8
| ((&(i915)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 60)((12) << 8 | (60))) {
2358 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__pvc_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__pvc_fw_ranges))) / sizeof(((__pvc_fw_ranges))[0]
)); }
;
2359 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs){ (uncore)->shadowed_reg_table = pvc_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((pvc_shadowed_regs
))) / sizeof(((pvc_shadowed_regs))[0])); }
;
2360 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2361 } else if (GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8
| ((&(i915)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 55)((12) << 8 | (55))) {
2362 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__dg2_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__dg2_fw_ranges))) / sizeof(((__dg2_fw_ranges))[0]
)); }
;
2363 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs){ (uncore)->shadowed_reg_table = dg2_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((dg2_shadowed_regs
))) / sizeof(((dg2_shadowed_regs))[0])); }
;
2364 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2365 } else if (GRAPHICS_VER_FULL(i915)(((&(i915)->__runtime)->graphics.ip.ver) << 8
| ((&(i915)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 50)((12) << 8 | (50))) {
2366 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__xehp_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__xehp_fw_ranges))) / sizeof(((__xehp_fw_ranges))[
0])); }
;
2367 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs){ (uncore)->shadowed_reg_table = gen12_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen12_shadowed_regs
))) / sizeof(((gen12_shadowed_regs))[0])); }
;
2368 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2369 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) {
2370 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen12_fw_ranges); (uncore)->fw_domains_table_entries
= (sizeof(((__gen12_fw_ranges))) / sizeof(((__gen12_fw_ranges
))[0])); }
;
2371 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs){ (uncore)->shadowed_reg_table = gen12_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen12_shadowed_regs
))) / sizeof(((gen12_shadowed_regs))[0])); }
;
2372 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2373 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 11) {
2374 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen11_fw_ranges); (uncore)->fw_domains_table_entries
= (sizeof(((__gen11_fw_ranges))) / sizeof(((__gen11_fw_ranges
))[0])); }
;
2375 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs){ (uncore)->shadowed_reg_table = gen11_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen11_shadowed_regs
))) / sizeof(((gen11_shadowed_regs))[0])); }
;
2376 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2377 } else if (IS_GRAPHICS_VER(i915, 9, 10)(((&(i915)->__runtime)->graphics.ip.ver) >= (9) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (10)
)
) {
2378 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen9_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__gen9_fw_ranges))) / sizeof(((__gen9_fw_ranges))[
0])); }
;
2379 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs){ (uncore)->shadowed_reg_table = gen8_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen8_shadowed_regs
))) / sizeof(((gen8_shadowed_regs))[0])); }
;
2380 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2381 } else if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) {
2382 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__chv_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__chv_fw_ranges))) / sizeof(((__chv_fw_ranges))[0]
)); }
;
2383 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs){ (uncore)->shadowed_reg_table = gen8_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen8_shadowed_regs
))) / sizeof(((gen8_shadowed_regs))[0])); }
;
2384 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2385 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 8) {
2386 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen6_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__gen6_fw_ranges))) / sizeof(((__gen6_fw_ranges))[
0])); }
;
2387 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs){ (uncore)->shadowed_reg_table = gen8_shadowed_regs; (uncore
)->shadowed_reg_table_entries = (sizeof(((gen8_shadowed_regs
))) / sizeof(((gen8_shadowed_regs))[0])); }
;
2388 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
2389 } else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
2390 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__vlv_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__vlv_fw_ranges))) / sizeof(((__vlv_fw_ranges))[0]
)); }
;
2391 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6)do { do { ((uncore))->funcs.mmio_writeb = gen6_write8; ((uncore
))->funcs.mmio_writew = gen6_write16; ((uncore))->funcs
.mmio_writel = gen6_write32; } while (0); (uncore)->funcs.
write_fw_domains = gen6_reg_write_fw_domains; } while (0)
;
2392 } else if (IS_GRAPHICS_VER(i915, 6, 7)(((&(i915)->__runtime)->graphics.ip.ver) >= (6) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (7))
) {
2393 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen6_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__gen6_fw_ranges))) / sizeof(((__gen6_fw_ranges))[
0])); }
;
2394 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6)do { do { ((uncore))->funcs.mmio_writeb = gen6_write8; ((uncore
))->funcs.mmio_writew = gen6_write16; ((uncore))->funcs
.mmio_writel = gen6_write32; } while (0); (uncore)->funcs.
write_fw_domains = gen6_reg_write_fw_domains; } while (0)
;
2395 }
2396
2397 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2398 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2399
2400 return 0;
2401}
2402
2403int intel_uncore_init_mmio(struct intel_uncore *uncore)
2404{
2405 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
2406 int ret;
2407
2408 /*
2409 * The boot firmware initializes local memory and assesses its health.
2410 * If memory training fails, the punit will have been instructed to
2411 * keep the GT powered down; we won't be able to communicate with it
2412 * and we should not continue with driver initialization.
2413 */
2414 if (IS_DGFX(i915)((&(i915)->__info)->is_dgfx) &&
2415 !(__raw_uncore_read32(uncore, GU_CNTL((const i915_reg_t){ .reg = (0x101010) })) & LMEM_INIT((u32)((1UL << (7)) + 0)))) {
2416 drm_err(&i915->drm, "LMEM not initialized by firmware\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "LMEM not initialized by firmware\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
2417 return -ENODEV19;
2418 }
2419
2420 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) > 5 && !intel_vgpu_active(i915))
2421 uncore->flags |= UNCORE_HAS_FORCEWAKE(1UL << (0));
2422
2423 if (!intel_uncore_has_forcewake(uncore)) {
2424 uncore_raw_init(uncore);
2425 } else {
2426 ret = uncore_forcewake_init(uncore);
2427 if (ret)
2428 return ret;
2429 }
2430
2431 /* make sure fw funcs are set if and only if we have fw*/
2432 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs)((void)0);
2433 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains)((void)0);
2434 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains)((void)0);
2435
2436 if (HAS_FPGA_DBG_UNCLAIMED(i915)((&(i915)->__info)->display.has_fpga_dbg))
2437 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1));
2438
2439 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW))
2440 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED(1UL << (2));
2441
2442 if (IS_GRAPHICS_VER(i915, 6, 7)(((&(i915)->__runtime)->graphics.ip.ver) >= (6) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (7))
)
2443 uncore->flags |= UNCORE_HAS_FIFO(1UL << (3));
2444
2445 /* clear out unclaimed reg detection bit */
2446 if (intel_uncore_unclaimed_mmio(uncore))
2447 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "unclaimed mmio detected on uncore init, clearing\n"
)
;
2448
2449 return 0;
2450}
2451
2452/*
2453 * We might have detected that some engines are fused off after we initialized
2454 * the forcewake domains. Prune them, to make sure they only reference existing
2455 * engines.
2456 */
2457void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2458 struct intel_gt *gt)
2459{
2460 enum forcewake_domains fw_domains = uncore->fw_domains;
2461 enum forcewake_domain_id domain_id;
2462 int i;
2463
2464 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915)((&(uncore->i915)->__runtime)->graphics.ip.ver) < 11)
2465 return;
2466
2467 for (i = 0; i < I915_MAX_VCS8; i++) {
2468 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2469
2470 if (HAS_ENGINE(gt, _VCS(i))(((gt)->info.engine_mask) & (1UL << ((VCS0 + (i)
))))
)
2471 continue;
2472
2473 /*
2474 * Starting with XeHP, the power well for an even-numbered
2475 * VDBOX is also used for shared units within the
2476 * media slice such as SFC. So even if the engine
2477 * itself is fused off, we still need to initialize
2478 * the forcewake domain if any of the other engines
2479 * in the same media slice are present.
2480 */
2481 if (GRAPHICS_VER_FULL(uncore->i915)(((&(uncore->i915)->__runtime)->graphics.ip.ver)
<< 8 | ((&(uncore->i915)->__runtime)->graphics
.ip.rel))
>= IP_VER(12, 50)((12) << 8 | (50)) && i % 2 == 0) {
2482 if ((i + 1 < I915_MAX_VCS8) && HAS_ENGINE(gt, _VCS(i + 1))(((gt)->info.engine_mask) & (1UL << ((VCS0 + (i +
1)))))
)
2483 continue;
2484
2485 if (HAS_ENGINE(gt, _VECS(i / 2))(((gt)->info.engine_mask) & (1UL << ((VECS0 + (i
/ 2)))))
)
2486 continue;
2487 }
2488
2489 if (fw_domains & BIT(domain_id)(1UL << (domain_id)))
2490 fw_domain_fini(uncore, domain_id);
2491 }
2492
2493 for (i = 0; i < I915_MAX_VECS4; i++) {
2494 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2495
2496 if (HAS_ENGINE(gt, _VECS(i))(((gt)->info.engine_mask) & (1UL << ((VECS0 + (i
)))))
)
2497 continue;
2498
2499 if (fw_domains & BIT(domain_id)(1UL << (domain_id)))
2500 fw_domain_fini(uncore, domain_id);
2501 }
2502}
2503
2504/* Called via drm-managed action */
2505void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2506{
2507 struct intel_uncore *uncore = data;
2508
2509 if (intel_uncore_has_forcewake(uncore)) {
2510 iosf_mbi_punit_acquire();
2511 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2512 &uncore->pmic_bus_access_nb);
2513 intel_uncore_forcewake_reset(uncore);
2514 intel_uncore_fw_domains_fini(uncore);
2515 iosf_mbi_punit_release();
2516 }
2517}
2518
2519/**
2520 * __intel_wait_for_register_fw - wait until register matches expected state
2521 * @uncore: the struct intel_uncore
2522 * @reg: the register to read
2523 * @mask: mask to apply to register value
2524 * @value: expected value
2525 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2526 * @slow_timeout_ms: slow timeout in millisecond
2527 * @out_value: optional placeholder to hold registry value
2528 *
2529 * This routine waits until the target register @reg contains the expected
2530 * @value after applying the @mask, i.e. it waits until ::
2531 *
2532 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2533 *
2534 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2535 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2536 * must be not larger than 20,0000 microseconds.
2537 *
2538 * Note that this routine assumes the caller holds forcewake asserted, it is
2539 * not suitable for very long waits. See intel_wait_for_register() if you
2540 * wish to wait without holding forcewake for the duration (i.e. you expect
2541 * the wait to be slow).
2542 *
2543 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2544 */
2545int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2546 i915_reg_t reg,
2547 u32 mask,
2548 u32 value,
2549 unsigned int fast_timeout_us,
2550 unsigned int slow_timeout_ms,
2551 u32 *out_value)
2552{
2553 u32 reg_value = 0;
2554#define done (((reg_value = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg)) & mask) == value)
2555 int ret;
2556
2557 /* Catch any overuse of this function */
2558 might_sleep_if(slow_timeout_ms)do { if (slow_timeout_ms) assertwaitok(); } while (0);
2559 GEM_BUG_ON(fast_timeout_us > 20000)((void)0);
2560 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms)((void)0);
2561
2562 ret = -ETIMEDOUT60;
2563 if (fast_timeout_us && fast_timeout_us <= 20000)
2564 ret = _wait_for_atomic(done, fast_timeout_us, 0)({ int cpu, ret, timeout = (fast_timeout_us) * 1000; u64 base
; do { } while (0); if (!(0)) { preempt_disable(); cpu = (({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_cpuid); } base = local_clock(); for (;;) { u64 now =
local_clock(); if (!(0)) preempt_enable(); __asm volatile(""
: : : "memory"); if (done) { ret = 0; break; } if (now - base
>= timeout) { ret = -60; break; } cpu_relax(); if (!(0)) {
preempt_disable(); if (__builtin_expect(!!(cpu != (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; })
;
2565 if (ret && slow_timeout_ms)
2566 ret = wait_for(done, slow_timeout_ms)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((slow_timeout_ms) * 1000))); long wait__ = ((10)); int ret__
; assertwaitok(); for (;;) { const _Bool expired__ = ktime_after
(ktime_get_raw(), end__); ; __asm volatile("" : : : "memory")
; if (((done))) { ret__ = 0; break; } if (expired__) { ret__ =
-60; break; } usleep_range(wait__, wait__ * 2); if (wait__ <
((1000))) wait__ <<= 1; } ret__; })
;
2567
2568 if (out_value)
2569 *out_value = reg_value;
2570
2571 return ret;
2572#undef done
2573}
2574
2575/**
2576 * __intel_wait_for_register - wait until register matches expected state
2577 * @uncore: the struct intel_uncore
2578 * @reg: the register to read
2579 * @mask: mask to apply to register value
2580 * @value: expected value
2581 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2582 * @slow_timeout_ms: slow timeout in millisecond
2583 * @out_value: optional placeholder to hold registry value
2584 *
2585 * This routine waits until the target register @reg contains the expected
2586 * @value after applying the @mask, i.e. it waits until ::
2587 *
2588 * (intel_uncore_read(uncore, reg) & mask) == value
2589 *
2590 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2591 *
2592 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2593 */
2594int __intel_wait_for_register(struct intel_uncore *uncore,
2595 i915_reg_t reg,
2596 u32 mask,
2597 u32 value,
2598 unsigned int fast_timeout_us,
2599 unsigned int slow_timeout_ms,
2600 u32 *out_value)
2601{
2602 unsigned fw =
2603 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ(1));
2604 u32 reg_value;
2605 int ret;
2606
2607 might_sleep_if(slow_timeout_ms)do { if (slow_timeout_ms) assertwaitok(); } while (0);
2608
2609 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
2610 intel_uncore_forcewake_get__locked(uncore, fw);
2611
2612 ret = __intel_wait_for_register_fw(uncore,
2613 reg, mask, value,
2614 fast_timeout_us, 0, &reg_value);
2615
2616 intel_uncore_forcewake_put__locked(uncore, fw);
2617 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
2618
2619 if (ret && slow_timeout_ms)
2620 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2621 reg),({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2622 (reg_value & mask) == value,({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2623 slow_timeout_ms * 1000, 10, 1000)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
;
2624
2625 /* just trace the final value */
2626 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2627
2628 if (out_value)
2629 *out_value = reg_value;
2630
2631 return ret;
2632}
2633
2634bool_Bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2635{
2636 bool_Bool ret;
2637
2638 if (!uncore->debug)
2639 return false0;
2640
2641 spin_lock_irq(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
2642 ret = check_for_unclaimed_mmio(uncore);
2643 spin_unlock_irq(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
2644
2645 return ret;
2646}
2647
2648bool_Bool
2649intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2650{
2651 bool_Bool ret = false0;
2652
2653 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug)({ int __ret = !!((!uncore->debug)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&uncore->i915->drm))->
dev), "", "drm_WARN_ON(" "!uncore->debug" ")"); __builtin_expect
(!!(__ret), 0); })
)
2654 return false0;
2655
2656 spin_lock_irq(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
2657
2658 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)__builtin_expect(!!(uncore->debug->unclaimed_mmio_check
<= 0), 0)
)
2659 goto out;
2660
2661 if (unlikely(check_for_unclaimed_mmio(uncore))__builtin_expect(!!(check_for_unclaimed_mmio(uncore)), 0)) {
2662 if (!uncore->i915->params.mmio_debug) {
2663 drm_dbg(&uncore->i915->drm,__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2664 "Unclaimed register detected, "__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2665 "enabling oneshot unclaimed register reporting. "__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2666 "Please use i915.mmio_debug=N for more information.\n")__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
;
2667 uncore->i915->params.mmio_debug++;
2668 }
2669 uncore->debug->unclaimed_mmio_check--;
2670 ret = true1;
2671 }
2672
2673out:
2674 spin_unlock_irq(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
2675
2676 return ret;
2677}
2678
2679/**
2680 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2681 * a register
2682 * @uncore: pointer to struct intel_uncore
2683 * @reg: register in question
2684 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2685 *
2686 * Returns a set of forcewake domains required to be taken with for example
2687 * intel_uncore_forcewake_get for the specified register to be accessible in the
2688 * specified mode (read, write or read/write) with raw mmio accessors.
2689 *
2690 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2691 * callers to do FIFO management on their own or risk losing writes.
2692 */
2693enum forcewake_domains
2694intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2695 i915_reg_t reg, unsigned int op)
2696{
2697 enum forcewake_domains fw_domains = 0;
2698
2699 drm_WARN_ON(&uncore->i915->drm, !op)({ int __ret = !!((!op)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&uncore->i915->drm))->dev), "", "drm_WARN_ON("
"!op" ")"); __builtin_expect(!!(__ret), 0); })
;
2700
2701 if (!intel_uncore_has_forcewake(uncore))
2702 return 0;
2703
2704 if (op & FW_REG_READ(1))
2705 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2706
2707 if (op & FW_REG_WRITE(2))
2708 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2709
2710 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains)({ int __ret = !!((fw_domains & ~uncore->fw_domains));
if (__ret) printf("%s %s: " "%s", dev_driver_string(((&uncore
->i915->drm))->dev), "", "drm_WARN_ON(" "fw_domains & ~uncore->fw_domains"
")"); __builtin_expect(!!(__ret), 0); })
;
2711
2712 return fw_domains;
2713}
2714
2715#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
2716#include "selftests/mock_uncore.c"
2717#include "selftests/intel_uncore.c"
2718#endif