Bug Summary

File:dev/pci/drm/i915/intel_uncore.c
Warning:line 1765, column 3
Value stored to 'mmio_size' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name intel_uncore.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/intel_uncore.c
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/pm_runtime.h>
25#include <asm/iosf_mbi.h>
26
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "i915_vgpu.h"
30#include "intel_pm.h"
31
32#define FORCEWAKE_ACK_TIMEOUT_MS50 50
33#define GT_FIFO_TIMEOUT_MS10 10
34
35#define __raw_posting_read(...)((void)__raw_uncore_read32(...)) ((void)__raw_uncore_read32(__VA_ARGS__))
36
37void
38intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
39{
40 mtx_init(&mmio_debug->lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&mmio_debug
->lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ?
0x9 : ((0x9)))); } while (0)
;
41 mmio_debug->unclaimed_mmio_check = 1;
42}
43
44static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
45{
46 lockdep_assert_held(&mmio_debug->lock)do { (void)(&mmio_debug->lock); } while(0);
47
48 /* Save and disable mmio debugging for the user bypass */
49 if (!mmio_debug->suspend_count++) {
50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51 mmio_debug->unclaimed_mmio_check = 0;
52 }
53}
54
55static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
56{
57 lockdep_assert_held(&mmio_debug->lock)do { (void)(&mmio_debug->lock); } while(0);
58
59 if (!--mmio_debug->suspend_count)
60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
61}
62
63static const char * const forcewake_domain_names[] = {
64 "render",
65 "blitter",
66 "media",
67 "vdbox0",
68 "vdbox1",
69 "vdbox2",
70 "vdbox3",
71 "vebox0",
72 "vebox1",
73};
74
75const char *
76intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
77{
78 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT)extern char _ctassert[(!((sizeof((forcewake_domain_names)) / sizeof
((forcewake_domain_names)[0])) != FW_DOMAIN_ID_COUNT)) ? 1 : -
1 ] __attribute__((__unused__))
;
79
80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
81 return forcewake_domain_names[id];
82
83 WARN_ON(id)({ int __ret = !!((id)); if (__ret) printf("%s", "WARN_ON(" "id"
")"); __builtin_expect(!!(__ret), 0); })
;
84
85 return "unknown";
86}
87
88#define fw_ack(d)ioread32((d)->reg_ack) readl((d)->reg_ack)ioread32((d)->reg_ack)
89#define fw_set(d, val)iowrite32(({ typeof((val)) _a = ((val)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), (d
)->reg_set)
writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)iowrite32(({ typeof((val)) _a = ((val)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }), (d
)->reg_set)
90#define fw_clear(d, val)iowrite32((({ if (__builtin_constant_p(((val)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((val))) && __builtin_constant_p(0)) do { } while (0
); ((((val))) << 16 | (0)); })), (d)->reg_set)
writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)iowrite32((({ if (__builtin_constant_p(((val)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((val))) && __builtin_constant_p(0)) do { } while (0
); ((((val))) << 16 | (0)); })), (d)->reg_set)
91
92static inline void
93fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
94{
95 /*
96 * We don't really know if the powerwell for the forcewake domain we are
97 * trying to reset here does exist at this point (engines could be fused
98 * off in ICL+), so no waiting for acks
99 */
100 /* WaRsClearFWBitsAtReset:bdw,skl */
101 fw_clear(d, 0xffff)iowrite32((({ if (__builtin_constant_p(((0xffff)))) do { } while
(0); if (__builtin_constant_p(0)) do { } while (0); if (__builtin_constant_p
(((0xffff))) && __builtin_constant_p(0)) do { } while
(0); ((((0xffff))) << 16 | (0)); })), (d)->reg_set)
;
102}
103
104static inline void
105fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
106{
107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask)((void)0);
108 d->uncore->fw_domains_timer |= d->mask;
109 d->wake_count++;
110#ifdef __linux__
111 hrtimer_start_range_ns(&d->timer,
112 NSEC_PER_MSEC1000000L,
113 NSEC_PER_MSEC1000000L,
114 HRTIMER_MODE_REL1);
115#else
116 timeout_add_msec(&d->timer, 1);
117#endif
118}
119
120static inline int
121__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
122 const u32 ack,
123 const u32 value)
124{
125 return wait_for_atomic((fw_ack(d) & ack) == value,({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((((ioread32((d)->
reg_ack) & ack) == value))) { ret = 0; break; } if (now -
base >= timeout) { ret = -60; break; } cpu_relax(); if (!
(1)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); })
126 FORCEWAKE_ACK_TIMEOUT_MS)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((((ioread32((d)->
reg_ack) & ack) == value))) { ret = 0; break; } if (now -
base >= timeout) { ret = -60; break; } cpu_relax(); if (!
(1)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; }); })
;
127}
128
129static inline int
130wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
131 const u32 ack)
132{
133 return __wait_for_ack(d, ack, 0);
134}
135
136static inline int
137wait_ack_set(const struct intel_uncore_forcewake_domain *d,
138 const u32 ack)
139{
140 return __wait_for_ack(d, ack, ack);
141}
142
143static inline void
144fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
145{
146 if (wait_ack_clear(d, FORCEWAKE_KERNEL(1UL << (0)))) {
147 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",__drm_err("%s: timed out waiting for forcewake ack to clear.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
148 intel_uncore_forcewake_domain_to_str(d->id))__drm_err("%s: timed out waiting for forcewake ack to clear.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
;
149 add_taint_for_CI(d->uncore->i915, TAINT_WARN1); /* CI now unreliable */
150 }
151}
152
153enum ack_type {
154 ACK_CLEAR = 0,
155 ACK_SET
156};
157
158static int
159fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
160 const enum ack_type type)
161{
162 const u32 ack_bit = FORCEWAKE_KERNEL(1UL << (0));
163 const u32 value = type == ACK_SET ? ack_bit : 0;
164 unsigned int pass;
165 bool_Bool ack_detected;
166
167 /*
168 * There is a possibility of driver's wake request colliding
169 * with hardware's own wake requests and that can cause
170 * hardware to not deliver the driver's ack message.
171 *
172 * Use a fallback bit toggle to kick the gpu state machine
173 * in the hope that the original ack will be delivered along with
174 * the fallback ack.
175 *
176 * This workaround is described in HSDES #1604254524 and it's known as:
177 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
178 * although the name is a bit misleading.
179 */
180
181 pass = 1;
182 do {
183 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK(1UL << (15)));
184
185 fw_set(d, FORCEWAKE_KERNEL_FALLBACK)iowrite32(({ typeof(((1UL << (15)))) _a = (((1UL <<
(15)))); ({ if (__builtin_constant_p(_a)) do { } while (0); if
(__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); }), (d)->reg_set)
;
186 /* Give gt some time to relax before the polling frenzy */
187 udelay(10 * pass);
188 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK(1UL << (15)));
189
190 ack_detected = (fw_ack(d)ioread32((d)->reg_ack) & ack_bit) == value;
191
192 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK)iowrite32((({ if (__builtin_constant_p((((1UL << (15)))
))) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((1UL << (15))))) &&
__builtin_constant_p(0)) do { } while (0); (((((1UL <<
(15))))) << 16 | (0)); })), (d)->reg_set)
;
193 } while (!ack_detected && pass++ < 10);
194
195 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",__drm_dbg(DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
196 intel_uncore_forcewake_domain_to_str(d->id),__drm_dbg(DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
197 type == ACK_SET ? "set" : "clear",__drm_dbg(DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
198 fw_ack(d),__drm_dbg(DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
199 pass)__drm_dbg(DRM_UT_DRIVER, "%s had to use fallback to %s ack, 0x%x (passes %u)\n"
, intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET
? "set" : "clear", ioread32((d)->reg_ack), pass)
;
200
201 return ack_detected ? 0 : -ETIMEDOUT60;
202}
203
204static inline void
205fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
206{
207 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))__builtin_expect(!!(!wait_ack_clear(d, (1UL << (0)))), 1
)
)
208 return;
209
210 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
211 fw_domain_wait_ack_clear(d);
212}
213
214static inline void
215fw_domain_get(const struct intel_uncore_forcewake_domain *d)
216{
217 fw_set(d, FORCEWAKE_KERNEL)iowrite32(({ typeof(((1UL << (0)))) _a = (((1UL <<
(0)))); ({ if (__builtin_constant_p(_a)) do { } while (0); if
(__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); }), (d)->reg_set)
;
218}
219
220static inline void
221fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
222{
223 if (wait_ack_set(d, FORCEWAKE_KERNEL(1UL << (0)))) {
224 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",__drm_err("%s: timed out waiting for forcewake ack request.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
225 intel_uncore_forcewake_domain_to_str(d->id))__drm_err("%s: timed out waiting for forcewake ack request.\n"
, intel_uncore_forcewake_domain_to_str(d->id))
;
226 add_taint_for_CI(d->uncore->i915, TAINT_WARN1); /* CI now unreliable */
227 }
228}
229
230static inline void
231fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
232{
233 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))__builtin_expect(!!(!wait_ack_set(d, (1UL << (0)))), 1))
234 return;
235
236 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
237 fw_domain_wait_ack_set(d);
238}
239
240static inline void
241fw_domain_put(const struct intel_uncore_forcewake_domain *d)
242{
243 fw_clear(d, FORCEWAKE_KERNEL)iowrite32((({ if (__builtin_constant_p((((1UL << (0))))
)) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((1UL << (0))))) &&
__builtin_constant_p(0)) do { } while (0); (((((1UL <<
(0))))) << 16 | (0)); })), (d)->reg_set)
;
244}
245
246static void
247fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
248{
249 struct intel_uncore_forcewake_domain *d;
250 unsigned int tmp;
251
252 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
253
254 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
255 fw_domain_wait_ack_clear(d);
256 fw_domain_get(d);
257 }
258
259 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
260 fw_domain_wait_ack_set(d);
261
262 uncore->fw_domains_active |= fw_domains;
263}
264
265static void
266fw_domains_get_with_fallback(struct intel_uncore *uncore,
267 enum forcewake_domains fw_domains)
268{
269 struct intel_uncore_forcewake_domain *d;
270 unsigned int tmp;
271
272 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
273
274 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
275 fw_domain_wait_ack_clear_fallback(d);
276 fw_domain_get(d);
277 }
278
279 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
280 fw_domain_wait_ack_set_fallback(d);
281
282 uncore->fw_domains_active |= fw_domains;
283}
284
285static void
286fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
287{
288 struct intel_uncore_forcewake_domain *d;
289 unsigned int tmp;
290
291 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
292
293 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
294 fw_domain_put(d);
295
296 uncore->fw_domains_active &= ~fw_domains;
297}
298
299static void
300fw_domains_reset(struct intel_uncore *uncore,
301 enum forcewake_domains fw_domains)
302{
303 struct intel_uncore_forcewake_domain *d;
304 unsigned int tmp;
305
306 if (!fw_domains)
307 return;
308
309 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
310
311 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(d = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
312 fw_domain_reset(d);
313}
314
315static inline u32 gt_thread_status(struct intel_uncore *uncore)
316{
317 u32 val;
318
319 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG((const i915_reg_t){ .reg = (0x13805c) }));
320 val &= GEN6_GT_THREAD_STATUS_CORE_MASK0x7;
321
322 return val;
323}
324
325static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
326{
327 /*
328 * w/a for a sporadic read returning 0 by waiting for the GT
329 * thread to wake up.
330 */
331 drm_WARN_ONCE(&uncore->i915->drm,({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((gt_thread_status(uncore
) == 0)) { ret = 0; break; } if (now - base >= timeout) { ret
= -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
332 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((gt_thread_status(uncore
) == 0)) { ret = 0; break; } if (now - base >= timeout) { ret
= -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
333 "GT thread status wait timed out\n")({ static int __warned; int __ret = !!(({ extern char _ctassert
[(!(!__builtin_constant_p(5000))) ? 1 : -1 ] __attribute__((__unused__
)); extern char _ctassert[(!((5000) > 50000)) ? 1 : -1 ] __attribute__
((__unused__)); ({ int cpu, ret, timeout = ((5000)) * 1000; u64
base; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((gt_thread_status(uncore
) == 0)) { ret = 0; break; } if (now - base >= timeout) { ret
= -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })); if (__ret && !__warned) { printf("%s %s: " "GT thread status wait timed out\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
); __warned = 1; } __builtin_expect(!!(__ret), 0); })
;
334}
335
336static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
337 enum forcewake_domains fw_domains)
338{
339 fw_domains_get(uncore, fw_domains);
340
341 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
342 __gen6_gt_wait_for_thread_c0(uncore);
343}
344
345static inline u32 fifo_free_entries(struct intel_uncore *uncore)
346{
347 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) }));
348
349 return count & GT_FIFO_FREE_ENTRIES_MASK0x7f;
350}
351
352static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
353{
354 u32 n;
355
356 /* On VLV, FIFO will be shared by both SW and HW.
357 * So, we need to read the FREE_ENTRIES everytime */
358 if (IS_VALLEYVIEW(uncore->i915)IS_PLATFORM(uncore->i915, INTEL_VALLEYVIEW))
359 n = fifo_free_entries(uncore);
360 else
361 n = uncore->fifo_count;
362
363 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES20) {
364 if (wait_for_atomic((n = fifo_free_entries(uncore)) >({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((((n = fifo_free_entries
(uncore)) > 20))) { ret = 0; break; } if (now - base >=
timeout) { ret = -60; break; } cpu_relax(); if (!(1)) { ; if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
365 GT_FIFO_NUM_RESERVED_ENTRIES,({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((((n = fifo_free_entries
(uncore)) > 20))) { ret = 0; break; } if (now - base >=
timeout) { ret = -60; break; } cpu_relax(); if (!(1)) { ; if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
366 GT_FIFO_TIMEOUT_MS)({ extern char _ctassert[(!(!__builtin_constant_p((10) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((10) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((10) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if ((((n = fifo_free_entries
(uncore)) > 20))) { ret = 0; break; } if (now - base >=
timeout) { ret = -60; break; } cpu_relax(); if (!(1)) { ; if
(__builtin_expect(!!(cpu != (({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout -=
now - base; cpu = (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
) {
367 drm_dbg(&uncore->i915->drm,drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "GT_FIFO timeout, entries: %u\n", n)
368 "GT_FIFO timeout, entries: %u\n", n)drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "GT_FIFO timeout, entries: %u\n", n)
;
369 return;
370 }
371 }
372
373 uncore->fifo_count = n - 1;
374}
375
376#ifdef __linux__
377
378static enum hrtimer_restart
379intel_uncore_fw_release_timer(struct hrtimer *timer)
380{
381 struct intel_uncore_forcewake_domain *domain =
382 container_of(timer, struct intel_uncore_forcewake_domain, timer)({ const __typeof( ((struct intel_uncore_forcewake_domain *)0
)->timer ) *__mptr = (timer); (struct intel_uncore_forcewake_domain
*)( (char *)__mptr - __builtin_offsetof(struct intel_uncore_forcewake_domain
, timer) );})
;
383 struct intel_uncore *uncore = domain->uncore;
384 unsigned long irqflags;
385
386 assert_rpm_device_not_suspended(uncore->rpm);
387
388 if (xchg(&domain->active, false)__sync_lock_test_and_set(&domain->active, 0))
389 return HRTIMER_RESTART;
390
391 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
392
393 uncore->fw_domains_timer &= ~domain->mask;
394
395 GEM_BUG_ON(!domain->wake_count)((void)0);
396 if (--domain->wake_count == 0)
397 uncore->funcs.force_wake_put(uncore, domain->mask);
398
399 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
400
401 return HRTIMER_NORESTART;
402}
403
404#else
405
406void
407intel_uncore_fw_release_timer(void *arg)
408{
409 struct intel_uncore_forcewake_domain *domain = arg;
410 struct intel_uncore *uncore = domain->uncore;
411 unsigned long irqflags;
412
413 assert_rpm_device_not_suspended(uncore->rpm);
414
415 if (xchg(&domain->active, false)__sync_lock_test_and_set(&domain->active, 0))
416 return;
417
418 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
419
420 uncore->fw_domains_timer &= ~domain->mask;
421
422 GEM_BUG_ON(!domain->wake_count)((void)0);
423 if (--domain->wake_count == 0)
424 uncore->funcs.force_wake_put(uncore, domain->mask);
425
426 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
427}
428
429#endif
430
431/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
432static unsigned int
433intel_uncore_forcewake_reset(struct intel_uncore *uncore)
434{
435 unsigned long irqflags;
436 struct intel_uncore_forcewake_domain *domain;
437 int retry_count = 100;
438 enum forcewake_domains fw, active_domains;
439
440 iosf_mbi_assert_punit_acquired();
441
442 /* Hold uncore.lock across reset to prevent any register access
443 * with forcewake not set correctly. Wait until all pending
444 * timers are run before holding.
445 */
446 while (1) {
447 unsigned int tmp;
448
449 active_domains = 0;
450
451 for_each_fw_domain(domain, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(domain = (
uncore)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &=
~(1UL << (__idx)); __idx; })])) {} else
{
452 smp_store_mb(domain->active, false)do { domain->active = 0; do { __asm volatile("mfence" ::: "memory"
); } while (0); } while (0)
;
453 if (hrtimer_cancel(&domain->timer)timeout_del(&domain->timer) == 0)
454 continue;
455
456 intel_uncore_fw_release_timer(&domain->timer);
457 }
458
459 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
460
461 for_each_fw_domain(domain, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(domain = (
uncore)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &=
~(1UL << (__idx)); __idx; })])) {} else
{
462 if (hrtimer_active(&domain->timer)((&domain->timer)->to_flags & 0x02))
463 active_domains |= domain->mask;
464 }
465
466 if (active_domains == 0)
467 break;
468
469 if (--retry_count == 0) {
470 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Timed out waiting for forcewake timers to finish\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
471 break;
472 }
473
474 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
475 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
476 }
477
478 drm_WARN_ON(&uncore->i915->drm, active_domains)({ int __ret = !!((active_domains)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&uncore->i915->drm))->
dev), "", "drm_WARN_ON(" "active_domains" ")"); __builtin_expect
(!!(__ret), 0); })
;
479
480 fw = uncore->fw_domains_active;
481 if (fw)
482 uncore->funcs.force_wake_put(uncore, fw);
483
484 fw_domains_reset(uncore, uncore->fw_domains);
485 assert_forcewakes_inactive(uncore);
486
487 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
488
489 return fw; /* track the lost user forcewake domains */
490}
491
492static bool_Bool
493fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
494{
495 u32 dbg;
496
497 dbg = __raw_uncore_read32(uncore, FPGA_DBG((const i915_reg_t){ .reg = (0x42300) }));
498 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))__builtin_expect(!!(!(dbg & (1 << 31))), 1))
499 return false0;
500
501 __raw_uncore_write32(uncore, FPGA_DBG((const i915_reg_t){ .reg = (0x42300) }), FPGA_DBG_RM_NOCLAIM(1 << 31));
502
503 return true1;
504}
505
506static bool_Bool
507vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
508{
509 u32 cer;
510
511 cer = __raw_uncore_read32(uncore, CLAIM_ER((const i915_reg_t){ .reg = (0x180000 + 0x2028) }));
512 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))__builtin_expect(!!(!(cer & ((1 << 16) | 0xffff))),
1)
)
513 return false0;
514
515 __raw_uncore_write32(uncore, CLAIM_ER((const i915_reg_t){ .reg = (0x180000 + 0x2028) }), CLAIM_ER_CLR(1 << 31));
516
517 return true1;
518}
519
520static bool_Bool
521gen6_check_for_fifo_debug(struct intel_uncore *uncore)
522{
523 u32 fifodbg;
524
525 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG((const i915_reg_t){ .reg = (0x120000) }));
526
527 if (unlikely(fifodbg)__builtin_expect(!!(fifodbg), 0)) {
528 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg)drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "GTFIFODBG = 0x08%x\n", fifodbg)
;
529 __raw_uncore_write32(uncore, GTFIFODBG((const i915_reg_t){ .reg = (0x120000) }), fifodbg);
530 }
531
532 return fifodbg;
533}
534
535static bool_Bool
536check_for_unclaimed_mmio(struct intel_uncore *uncore)
537{
538 bool_Bool ret = false0;
539
540 lockdep_assert_held(&uncore->debug->lock)do { (void)(&uncore->debug->lock); } while(0);
541
542 if (uncore->debug->suspend_count)
543 return false0;
544
545 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
546 ret |= fpga_check_for_unclaimed_mmio(uncore);
547
548 if (intel_uncore_has_dbg_unclaimed(uncore))
549 ret |= vlv_check_for_unclaimed_mmio(uncore);
550
551 if (intel_uncore_has_fifo(uncore))
552 ret |= gen6_check_for_fifo_debug(uncore);
553
554 return ret;
555}
556
557static void forcewake_early_sanitize(struct intel_uncore *uncore,
558 unsigned int restore_forcewake)
559{
560 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
561
562 /* WaDisableShadowRegForCpd:chv */
563 if (IS_CHERRYVIEW(uncore->i915)IS_PLATFORM(uncore->i915, INTEL_CHERRYVIEW)) {
564 __raw_uncore_write32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) }),
565 __raw_uncore_read32(uncore, GTFIFOCTL((const i915_reg_t){ .reg = (0x120008) })) |
566 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL(1 << 12) |
567 GT_FIFO_CTL_RC6_POLICY_STALL(1 << 11));
568 }
569
570 iosf_mbi_punit_acquire();
571 intel_uncore_forcewake_reset(uncore);
572 if (restore_forcewake) {
573 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
574 uncore->funcs.force_wake_get(uncore, restore_forcewake);
575
576 if (intel_uncore_has_fifo(uncore))
577 uncore->fifo_count = fifo_free_entries(uncore);
578 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
579 }
580 iosf_mbi_punit_release();
581}
582
583void intel_uncore_suspend(struct intel_uncore *uncore)
584{
585 if (!intel_uncore_has_forcewake(uncore))
586 return;
587
588 iosf_mbi_punit_acquire();
589 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(0
590 &uncore->pmic_bus_access_nb)0;
591 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
592 iosf_mbi_punit_release();
593}
594
595void intel_uncore_resume_early(struct intel_uncore *uncore)
596{
597 unsigned int restore_forcewake;
598
599 if (intel_uncore_unclaimed_mmio(uncore))
600 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n")drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "unclaimed mmio detected on resume, clearing\n")
;
601
602 if (!intel_uncore_has_forcewake(uncore))
603 return;
604
605 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved)({ typeof(*&uncore->fw_domains_saved) __T = *(&uncore
->fw_domains_saved); *(&uncore->fw_domains_saved) =
(typeof(*&uncore->fw_domains_saved))0; __T; })
;
606 forcewake_early_sanitize(uncore, restore_forcewake);
607
608 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb)0;
609}
610
611void intel_uncore_runtime_resume(struct intel_uncore *uncore)
612{
613 if (!intel_uncore_has_forcewake(uncore))
614 return;
615
616 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb)0;
617}
618
619static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
620 enum forcewake_domains fw_domains)
621{
622 struct intel_uncore_forcewake_domain *domain;
623 unsigned int tmp;
624
625 fw_domains &= uncore->fw_domains;
626
627 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
628 if (domain->wake_count++) {
629 fw_domains &= ~domain->mask;
630 domain->active = true1;
631 }
632 }
633
634 if (fw_domains)
635 uncore->funcs.force_wake_get(uncore, fw_domains);
636}
637
638/**
639 * intel_uncore_forcewake_get - grab forcewake domain references
640 * @uncore: the intel_uncore structure
641 * @fw_domains: forcewake domains to get reference on
642 *
643 * This function can be used get GT's forcewake domain references.
644 * Normal register access will handle the forcewake domains automatically.
645 * However if some sequence requires the GT to not power down a particular
646 * forcewake domains this function should be called at the beginning of the
647 * sequence. And subsequently the reference should be dropped by symmetric
648 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
649 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
650 */
651void intel_uncore_forcewake_get(struct intel_uncore *uncore,
652 enum forcewake_domains fw_domains)
653{
654 unsigned long irqflags;
655
656 if (!uncore->funcs.force_wake_get)
657 return;
658
659 assert_rpm_wakelock_held(uncore->rpm);
660
661 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
662 __intel_uncore_forcewake_get(uncore, fw_domains);
663 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
664}
665
666/**
667 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
668 * @uncore: the intel_uncore structure
669 *
670 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
671 * the GT powerwell and in the process disable our debugging for the
672 * duration of userspace's bypass.
673 */
674void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
675{
676 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
677 if (!uncore->user_forcewake_count++) {
678 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
679 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
680 mmio_debug_suspend(uncore->debug);
681 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
682 }
683 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
684}
685
686/**
687 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
688 * @uncore: the intel_uncore structure
689 *
690 * This function complements intel_uncore_forcewake_user_get() and releases
691 * the GT powerwell taken on behalf of the userspace bypass.
692 */
693void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
694{
695 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
696 if (!--uncore->user_forcewake_count) {
697 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
698 mmio_debug_resume(uncore->debug);
699
700 if (check_for_unclaimed_mmio(uncore))
701 drm_info(&uncore->i915->drm,do { } while(0)
702 "Invalid mmio detected during user access\n")do { } while(0);
703 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
704
705 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
706 }
707 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
708}
709
710/**
711 * intel_uncore_forcewake_get__locked - grab forcewake domain references
712 * @uncore: the intel_uncore structure
713 * @fw_domains: forcewake domains to get reference on
714 *
715 * See intel_uncore_forcewake_get(). This variant places the onus
716 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
717 */
718void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
719 enum forcewake_domains fw_domains)
720{
721 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
722
723 if (!uncore->funcs.force_wake_get)
724 return;
725
726 __intel_uncore_forcewake_get(uncore, fw_domains);
727}
728
729static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
730 enum forcewake_domains fw_domains)
731{
732 struct intel_uncore_forcewake_domain *domain;
733 unsigned int tmp;
734
735 fw_domains &= uncore->fw_domains;
736
737 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
738 GEM_BUG_ON(!domain->wake_count)((void)0);
739
740 if (--domain->wake_count) {
741 domain->active = true1;
742 continue;
743 }
744
745 uncore->funcs.force_wake_put(uncore, domain->mask);
746 }
747}
748
749/**
750 * intel_uncore_forcewake_put - release a forcewake domain reference
751 * @uncore: the intel_uncore structure
752 * @fw_domains: forcewake domains to put references
753 *
754 * This function drops the device-level forcewakes for specified
755 * domains obtained by intel_uncore_forcewake_get().
756 */
757void intel_uncore_forcewake_put(struct intel_uncore *uncore,
758 enum forcewake_domains fw_domains)
759{
760 unsigned long irqflags;
761
762 if (!uncore->funcs.force_wake_put)
763 return;
764
765 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
;
766 __intel_uncore_forcewake_put(uncore, fw_domains);
767 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
;
768}
769
770/**
771 * intel_uncore_forcewake_flush - flush the delayed release
772 * @uncore: the intel_uncore structure
773 * @fw_domains: forcewake domains to flush
774 */
775void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
776 enum forcewake_domains fw_domains)
777{
778 struct intel_uncore_forcewake_domain *domain;
779 unsigned int tmp;
780
781 if (!uncore->funcs.force_wake_put)
782 return;
783
784 fw_domains &= uncore->fw_domains;
785 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
786 WRITE_ONCE(domain->active, false)({ typeof(domain->active) __tmp = (0); *(volatile typeof(domain
->active) *)&(domain->active) = __tmp; __tmp; })
;
787 if (hrtimer_cancel(&domain->timer)timeout_del(&domain->timer))
788 intel_uncore_fw_release_timer(&domain->timer);
789 }
790}
791
792/**
793 * intel_uncore_forcewake_put__locked - grab forcewake domain references
794 * @uncore: the intel_uncore structure
795 * @fw_domains: forcewake domains to get reference on
796 *
797 * See intel_uncore_forcewake_put(). This variant places the onus
798 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
799 */
800void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
801 enum forcewake_domains fw_domains)
802{
803 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
804
805 if (!uncore->funcs.force_wake_put)
806 return;
807
808 __intel_uncore_forcewake_put(uncore, fw_domains);
809}
810
811void assert_forcewakes_inactive(struct intel_uncore *uncore)
812{
813 if (!uncore->funcs.force_wake_get)
814 return;
815
816 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
817 "Expected all fw_domains to be inactive, but %08x are still on\n",({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
818 uncore->fw_domains_active)({ int __ret = !!(uncore->fw_domains_active); if (__ret) printf
("%s %s: " "Expected all fw_domains to be inactive, but %08x are still on\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, uncore->fw_domains_active); __builtin_expect(!!(__ret), 0
); })
;
819}
820
821void assert_forcewakes_active(struct intel_uncore *uncore,
822 enum forcewake_domains fw_domains)
823{
824 struct intel_uncore_forcewake_domain *domain;
825 unsigned int tmp;
826
827 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0)
828 return;
829
830 if (!uncore->funcs.force_wake_get)
831 return;
832
833 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
834
835 assert_rpm_wakelock_held(uncore->rpm);
836
837 fw_domains &= uncore->fw_domains;
838 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
839 "Expected %08x fw_domains to be active, but %08x are off\n",({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
840 fw_domains, fw_domains & ~uncore->fw_domains_active)({ int __ret = !!(fw_domains & ~uncore->fw_domains_active
); if (__ret) printf("%s %s: " "Expected %08x fw_domains to be active, but %08x are off\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, fw_domains, fw_domains & ~uncore->fw_domains_active)
; __builtin_expect(!!(__ret), 0); })
;
841
842 /*
843 * Check that the caller has an explicit wakeref and we don't mistake
844 * it for the auto wakeref.
845 */
846 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
{
847 unsigned int actual = READ_ONCE(domain->wake_count)({ typeof(domain->wake_count) __tmp = *(volatile typeof(domain
->wake_count) *)&(domain->wake_count); membar_datadep_consumer
(); __tmp; })
;
848 unsigned int expect = 1;
849
850 if (uncore->fw_domains_timer & domain->mask)
851 expect++; /* pending automatic release */
852
853 if (drm_WARN(&uncore->i915->drm, actual < expect,({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
854 "Expected domain %d to be held awake by caller, count=%d\n",({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
855 domain->id, actual)({ int __ret = !!(actual < expect); if (__ret) printf("%s %s: "
"Expected domain %d to be held awake by caller, count=%d\n",
dev_driver_string((&uncore->i915->drm)->dev), ""
, domain->id, actual); __builtin_expect(!!(__ret), 0); })
)
856 break;
857 }
858
859 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
860}
861
862/* We give fast paths for the really cool registers */
863#define NEEDS_FORCE_WAKE(reg)((reg) < 0x40000) ((reg) < 0x40000)
864
865#define __gen6_reg_read_fw_domains(uncore, offset)({ enum forcewake_domains __fwd; if (((offset) < 0x40000))
__fwd = FORCEWAKE_RENDER; else __fwd = 0; __fwd; })
\
866({ \
867 enum forcewake_domains __fwd; \
868 if (NEEDS_FORCE_WAKE(offset)((offset) < 0x40000)) \
869 __fwd = FORCEWAKE_RENDER; \
870 else \
871 __fwd = 0; \
872 __fwd; \
873})
874
875static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
876{
877 if (offset < entry->start)
878 return -1;
879 else if (offset > entry->end)
880 return 1;
881 else
882 return 0;
883}
884
885/* Copied and "macroized" from lib/bsearch.c */
886#define BSEARCH(key, base, num, cmp)({ unsigned int start__ = 0, end__ = (num); typeof(base) result__
= ((void *)0); while (start__ < end__) { unsigned int mid__
= start__ + (end__ - start__) / 2; int ret__ = (cmp)((key), (
base) + mid__); if (ret__ < 0) { end__ = mid__; } else if (
ret__ > 0) { start__ = mid__ + 1; } else { result__ = (base
) + mid__; break; } } result__; })
({ \
887 unsigned int start__ = 0, end__ = (num); \
888 typeof(base) result__ = NULL((void *)0); \
889 while (start__ < end__) { \
890 unsigned int mid__ = start__ + (end__ - start__) / 2; \
891 int ret__ = (cmp)((key), (base) + mid__); \
892 if (ret__ < 0) { \
893 end__ = mid__; \
894 } else if (ret__ > 0) { \
895 start__ = mid__ + 1; \
896 } else { \
897 result__ = (base) + mid__; \
898 break; \
899 } \
900 } \
901 result__; \
902})
903
904static enum forcewake_domains
905find_fw_domain(struct intel_uncore *uncore, u32 offset)
906{
907 const struct intel_forcewake_range *entry;
908
909 entry = BSEARCH(offset,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
910 uncore->fw_domains_table,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
911 uncore->fw_domains_table_entries,({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
912 fw_range_cmp)({ unsigned int start__ = 0, end__ = (uncore->fw_domains_table_entries
); typeof(uncore->fw_domains_table) result__ = ((void *)0)
; while (start__ < end__) { unsigned int mid__ = start__ +
(end__ - start__) / 2; int ret__ = (fw_range_cmp)((offset), (
uncore->fw_domains_table) + mid__); if (ret__ < 0) { end__
= mid__; } else if (ret__ > 0) { start__ = mid__ + 1; } else
{ result__ = (uncore->fw_domains_table) + mid__; break; }
} result__; })
;
913
914 if (!entry)
915 return 0;
916
917 /*
918 * The list of FW domains depends on the SKU in gen11+ so we
919 * can't determine it statically. We use FORCEWAKE_ALL and
920 * translate it here to the list of available domains.
921 */
922 if (entry->domains == FORCEWAKE_ALL)
923 return uncore->fw_domains;
924
925 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
926 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
927 entry->domains & ~uncore->fw_domains, offset)({ int __ret = !!(entry->domains & ~uncore->fw_domains
); if (__ret) printf("%s %s: " "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, entry->domains & ~uncore->fw_domains, offset); __builtin_expect
(!!(__ret), 0); })
;
928
929 return entry->domains;
930}
931
932#define GEN_FW_RANGE(s, e, d){ .start = (s), .end = (e), .domains = (d) } \
933 { .start = (s), .end = (e), .domains = (d) }
934
935/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
936static const struct intel_forcewake_range __vlv_fw_ranges[] = {
937 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
938 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5000), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
939 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0x11fff), .domains = (FORCEWAKE_RENDER
) }
,
940 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
941 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA){ .start = (0x22000), .end = (0x23fff), .domains = (FORCEWAKE_MEDIA
) }
,
942 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER){ .start = (0x2e000), .end = (0x2ffff), .domains = (FORCEWAKE_RENDER
) }
,
943 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_MEDIA
) }
,
944};
945
946#define __fwtable_reg_read_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; if ((((offset)) < 0x40000
)) __fwd = find_fw_domain(uncore, offset); __fwd; })
\
947({ \
948 enum forcewake_domains __fwd = 0; \
949 if (NEEDS_FORCE_WAKE((offset))(((offset)) < 0x40000)) \
950 __fwd = find_fw_domain(uncore, offset); \
951 __fwd; \
952})
953
954#define __gen11_fwtable_reg_read_fw_domains(uncore, offset)find_fw_domain(uncore, offset) \
955 find_fw_domain(uncore, offset)
956
957#define __gen12_fwtable_reg_read_fw_domains(uncore, offset)find_fw_domain(uncore, offset) \
958 find_fw_domain(uncore, offset)
959
960/* *Must* be sorted by offset! See intel_shadow_table_check(). */
961static const i915_reg_t gen8_shadowed_regs[] = {
962 RING_TAIL(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x30) }), /* 0x2000 (base) */
963 GEN6_RPNSWREQ((const i915_reg_t){ .reg = (0xA008) }), /* 0xA008 */
964 GEN6_RC_VIDEO_FREQ((const i915_reg_t){ .reg = (0xA00C) }), /* 0xA00C */
965 RING_TAIL(GEN6_BSD_RING_BASE)((const i915_reg_t){ .reg = ((0x12000) + 0x30) }), /* 0x12000 (base) */
966 RING_TAIL(VEBOX_RING_BASE)((const i915_reg_t){ .reg = ((0x1a000) + 0x30) }), /* 0x1a000 (base) */
967 RING_TAIL(BLT_RING_BASE)((const i915_reg_t){ .reg = ((0x22000) + 0x30) }), /* 0x22000 (base) */
968 /* TODO: Other registers are not yet used */
969};
970
971static const i915_reg_t gen11_shadowed_regs[] = {
972 RING_TAIL(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x30) }), /* 0x2000 (base) */
973 GEN6_RPNSWREQ((const i915_reg_t){ .reg = (0xA008) }), /* 0xA008 */
974 GEN6_RC_VIDEO_FREQ((const i915_reg_t){ .reg = (0xA00C) }), /* 0xA00C */
975 RING_TAIL(BLT_RING_BASE)((const i915_reg_t){ .reg = ((0x22000) + 0x30) }), /* 0x22000 (base) */
976 RING_TAIL(GEN11_BSD_RING_BASE)((const i915_reg_t){ .reg = ((0x1c0000) + 0x30) }), /* 0x1C0000 (base) */
977 RING_TAIL(GEN11_BSD2_RING_BASE)((const i915_reg_t){ .reg = ((0x1c4000) + 0x30) }), /* 0x1C4000 (base) */
978 RING_TAIL(GEN11_VEBOX_RING_BASE)((const i915_reg_t){ .reg = ((0x1c8000) + 0x30) }), /* 0x1C8000 (base) */
979 RING_TAIL(GEN11_BSD3_RING_BASE)((const i915_reg_t){ .reg = ((0x1d0000) + 0x30) }), /* 0x1D0000 (base) */
980 RING_TAIL(GEN11_BSD4_RING_BASE)((const i915_reg_t){ .reg = ((0x1d4000) + 0x30) }), /* 0x1D4000 (base) */
981 RING_TAIL(GEN11_VEBOX2_RING_BASE)((const i915_reg_t){ .reg = ((0x1d8000) + 0x30) }), /* 0x1D8000 (base) */
982 /* TODO: Other registers are not yet used */
983};
984
985static const i915_reg_t gen12_shadowed_regs[] = {
986 RING_TAIL(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x30) }), /* 0x2000 (base) */
987 GEN6_RPNSWREQ((const i915_reg_t){ .reg = (0xA008) }), /* 0xA008 */
988 GEN6_RC_VIDEO_FREQ((const i915_reg_t){ .reg = (0xA00C) }), /* 0xA00C */
989 RING_TAIL(BLT_RING_BASE)((const i915_reg_t){ .reg = ((0x22000) + 0x30) }), /* 0x22000 (base) */
990 RING_TAIL(GEN11_BSD_RING_BASE)((const i915_reg_t){ .reg = ((0x1c0000) + 0x30) }), /* 0x1C0000 (base) */
991 RING_TAIL(GEN11_BSD2_RING_BASE)((const i915_reg_t){ .reg = ((0x1c4000) + 0x30) }), /* 0x1C4000 (base) */
992 RING_TAIL(GEN11_VEBOX_RING_BASE)((const i915_reg_t){ .reg = ((0x1c8000) + 0x30) }), /* 0x1C8000 (base) */
993 RING_TAIL(GEN11_BSD3_RING_BASE)((const i915_reg_t){ .reg = ((0x1d0000) + 0x30) }), /* 0x1D0000 (base) */
994 RING_TAIL(GEN11_BSD4_RING_BASE)((const i915_reg_t){ .reg = ((0x1d4000) + 0x30) }), /* 0x1D4000 (base) */
995 RING_TAIL(GEN11_VEBOX2_RING_BASE)((const i915_reg_t){ .reg = ((0x1d8000) + 0x30) }), /* 0x1D8000 (base) */
996 /* TODO: Other registers are not yet used */
997};
998
999static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
1000{
1001 u32 offset = i915_mmio_reg_offset(*reg);
1002
1003 if (key < offset)
1004 return -1;
1005 else if (key > offset)
1006 return 1;
1007 else
1008 return 0;
1009}
1010
1011#define __is_genX_shadowed(x)static _Bool is_genx_shadowed(u32 offset) { const i915_reg_t *
regs = genx_shadowed_regs; return ({ unsigned int start__ = 0
, end__ = ((sizeof((genx_shadowed_regs)) / sizeof((genx_shadowed_regs
)[0]))); typeof(regs) result__ = ((void *)0); while (start__ <
end__) { unsigned int mid__ = start__ + (end__ - start__) / 2
; int ret__ = (mmio_reg_cmp)((offset), (regs) + mid__); if (ret__
< 0) { end__ = mid__; } else if (ret__ > 0) { start__ =
mid__ + 1; } else { result__ = (regs) + mid__; break; } } result__
; }); }
\
1012static bool_Bool is_gen##x##_shadowed(u32 offset) \
1013{ \
1014 const i915_reg_t *regs = gen##x##_shadowed_regs; \
1015 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \({ unsigned int start__ = 0, end__ = ((sizeof((gen##x##_shadowed_regs
)) / sizeof((gen##x##_shadowed_regs)[0]))); typeof(regs) result__
= ((void *)0); while (start__ < end__) { unsigned int mid__
= start__ + (end__ - start__) / 2; int ret__ = (mmio_reg_cmp
)((offset), (regs) + mid__); if (ret__ < 0) { end__ = mid__
; } else if (ret__ > 0) { start__ = mid__ + 1; } else { result__
= (regs) + mid__; break; } } result__; })
1016 mmio_reg_cmp)({ unsigned int start__ = 0, end__ = ((sizeof((gen##x##_shadowed_regs
)) / sizeof((gen##x##_shadowed_regs)[0]))); typeof(regs) result__
= ((void *)0); while (start__ < end__) { unsigned int mid__
= start__ + (end__ - start__) / 2; int ret__ = (mmio_reg_cmp
)((offset), (regs) + mid__); if (ret__ < 0) { end__ = mid__
; } else if (ret__ > 0) { start__ = mid__ + 1; } else { result__
= (regs) + mid__; break; } } result__; })
; \
1017}
1018
1019__is_genX_shadowed(8)static _Bool is_gen8_shadowed(u32 offset) { const i915_reg_t *
regs = gen8_shadowed_regs; return ({ unsigned int start__ = 0
, end__ = ((sizeof((gen8_shadowed_regs)) / sizeof((gen8_shadowed_regs
)[0]))); typeof(regs) result__ = ((void *)0); while (start__ <
end__) { unsigned int mid__ = start__ + (end__ - start__) / 2
; int ret__ = (mmio_reg_cmp)((offset), (regs) + mid__); if (ret__
< 0) { end__ = mid__; } else if (ret__ > 0) { start__ =
mid__ + 1; } else { result__ = (regs) + mid__; break; } } result__
; }); }
1020__is_genX_shadowed(11)static _Bool is_gen11_shadowed(u32 offset) { const i915_reg_t
*regs = gen11_shadowed_regs; return ({ unsigned int start__ =
0, end__ = ((sizeof((gen11_shadowed_regs)) / sizeof((gen11_shadowed_regs
)[0]))); typeof(regs) result__ = ((void *)0); while (start__ <
end__) { unsigned int mid__ = start__ + (end__ - start__) / 2
; int ret__ = (mmio_reg_cmp)((offset), (regs) + mid__); if (ret__
< 0) { end__ = mid__; } else if (ret__ > 0) { start__ =
mid__ + 1; } else { result__ = (regs) + mid__; break; } } result__
; }); }
1021__is_genX_shadowed(12)static _Bool is_gen12_shadowed(u32 offset) { const i915_reg_t
*regs = gen12_shadowed_regs; return ({ unsigned int start__ =
0, end__ = ((sizeof((gen12_shadowed_regs)) / sizeof((gen12_shadowed_regs
)[0]))); typeof(regs) result__ = ((void *)0); while (start__ <
end__) { unsigned int mid__ = start__ + (end__ - start__) / 2
; int ret__ = (mmio_reg_cmp)((offset), (regs) + mid__); if (ret__
< 0) { end__ = mid__; } else if (ret__ > 0) { start__ =
mid__ + 1; } else { result__ = (regs) + mid__; break; } } result__
; }); }
1022
1023static enum forcewake_domains
1024gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1025{
1026 return FORCEWAKE_RENDER;
1027}
1028
1029#define __gen8_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd; if (((offset) < 0x40000) &&
!is_gen8_shadowed(offset)) __fwd = FORCEWAKE_RENDER; else __fwd
= 0; __fwd; })
\
1030({ \
1031 enum forcewake_domains __fwd; \
1032 if (NEEDS_FORCE_WAKE(offset)((offset) < 0x40000) && !is_gen8_shadowed(offset)) \
1033 __fwd = FORCEWAKE_RENDER; \
1034 else \
1035 __fwd = 0; \
1036 __fwd; \
1037})
1038
1039/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1040static const struct intel_forcewake_range __chv_fw_ranges[] = {
1041 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1042 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x4000), .end = (0x4fff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1043 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1044 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x8000), .end = (0x82ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1045 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1046 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x8500), .end = (0x85ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1047 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA){ .start = (0x8800), .end = (0x88ff), .domains = (FORCEWAKE_MEDIA
) }
,
1048 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x9000), .end = (0xafff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1049 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1050 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA){ .start = (0xd000), .end = (0xd7ff), .domains = (FORCEWAKE_MEDIA
) }
,
1051 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER){ .start = (0xe000), .end = (0xe7ff), .domains = (FORCEWAKE_RENDER
) }
,
1052 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0xf000), .end = (0xffff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1053 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
1054 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA){ .start = (0x1a000), .end = (0x1bfff), .domains = (FORCEWAKE_MEDIA
) }
,
1055 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA){ .start = (0x1e800), .end = (0x1e9ff), .domains = (FORCEWAKE_MEDIA
) }
,
1056 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x37fff), .domains = (FORCEWAKE_MEDIA
) }
,
1057};
1058
1059#define __fwtable_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; if ((((offset)) < 0x40000
) && !is_gen8_shadowed(offset)) __fwd = find_fw_domain
(uncore, offset); __fwd; })
\
1060({ \
1061 enum forcewake_domains __fwd = 0; \
1062 if (NEEDS_FORCE_WAKE((offset))(((offset)) < 0x40000) && !is_gen8_shadowed(offset)) \
1063 __fwd = find_fw_domain(uncore, offset); \
1064 __fwd; \
1065})
1066
1067#define __gen11_fwtable_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; const u32 __offset = (offset
); if (!is_gen11_shadowed(__offset)) __fwd = find_fw_domain(uncore
, __offset); __fwd; })
\
1068({ \
1069 enum forcewake_domains __fwd = 0; \
1070 const u32 __offset = (offset); \
1071 if (!is_gen11_shadowed(__offset)) \
1072 __fwd = find_fw_domain(uncore, __offset); \
1073 __fwd; \
1074})
1075
1076#define __gen12_fwtable_reg_write_fw_domains(uncore, offset)({ enum forcewake_domains __fwd = 0; const u32 __offset = (offset
); if (!is_gen12_shadowed(__offset)) __fwd = find_fw_domain(uncore
, __offset); __fwd; })
\
1077({ \
1078 enum forcewake_domains __fwd = 0; \
1079 const u32 __offset = (offset); \
1080 if (!is_gen12_shadowed(__offset)) \
1081 __fwd = find_fw_domain(uncore, __offset); \
1082 __fwd; \
1083})
1084
1085/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1086static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1087 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER){ .start = (0x0), .end = (0xaff), .domains = (FORCEWAKE_BLITTER
) }
,
1088 GEN_FW_RANGE(0xb00, 0x1fff, 0){ .start = (0xb00), .end = (0x1fff), .domains = (0) }, /* uncore range */
1089 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1090 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_BLITTER
) }
,
1091 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1092 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_BLITTER
) }
,
1093 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1094 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER){ .start = (0x8000), .end = (0x812f), .domains = (FORCEWAKE_BLITTER
) }
,
1095 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA){ .start = (0x8130), .end = (0x813f), .domains = (FORCEWAKE_MEDIA
) }
,
1096 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1097 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER){ .start = (0x8160), .end = (0x82ff), .domains = (FORCEWAKE_BLITTER
) }
,
1098 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1099 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER){ .start = (0x8500), .end = (0x87ff), .domains = (FORCEWAKE_BLITTER
) }
,
1100 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA){ .start = (0x8800), .end = (0x89ff), .domains = (FORCEWAKE_MEDIA
) }
,
1101 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER){ .start = (0x8a00), .end = (0x8bff), .domains = (FORCEWAKE_BLITTER
) }
,
1102 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER){ .start = (0x8c00), .end = (0x8cff), .domains = (FORCEWAKE_RENDER
) }
,
1103 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER){ .start = (0x8d00), .end = (0x93ff), .domains = (FORCEWAKE_BLITTER
) }
,
1104 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA){ .start = (0x9400), .end = (0x97ff), .domains = (FORCEWAKE_RENDER
| FORCEWAKE_MEDIA) }
,
1105 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER){ .start = (0x9800), .end = (0xafff), .domains = (FORCEWAKE_BLITTER
) }
,
1106 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1107 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER){ .start = (0xb480), .end = (0xcfff), .domains = (FORCEWAKE_BLITTER
) }
,
1108 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA){ .start = (0xd000), .end = (0xd7ff), .domains = (FORCEWAKE_MEDIA
) }
,
1109 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER){ .start = (0xd800), .end = (0xdfff), .domains = (FORCEWAKE_BLITTER
) }
,
1110 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xe000), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
,
1111 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER){ .start = (0xe900), .end = (0x11fff), .domains = (FORCEWAKE_BLITTER
) }
,
1112 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA){ .start = (0x12000), .end = (0x13fff), .domains = (FORCEWAKE_MEDIA
) }
,
1113 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER){ .start = (0x14000), .end = (0x19fff), .domains = (FORCEWAKE_BLITTER
) }
,
1114 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA){ .start = (0x1a000), .end = (0x1e9ff), .domains = (FORCEWAKE_MEDIA
) }
,
1115 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER){ .start = (0x1ea00), .end = (0x243ff), .domains = (FORCEWAKE_BLITTER
) }
,
1116 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER){ .start = (0x24400), .end = (0x247ff), .domains = (FORCEWAKE_RENDER
) }
,
1117 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER){ .start = (0x24800), .end = (0x2ffff), .domains = (FORCEWAKE_BLITTER
) }
,
1118 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA){ .start = (0x30000), .end = (0x3ffff), .domains = (FORCEWAKE_MEDIA
) }
,
1119};
1120
1121/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1122static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1123 GEN_FW_RANGE(0x0, 0x1fff, 0){ .start = (0x0), .end = (0x1fff), .domains = (0) }, /* uncore range */
1124 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1125 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_BLITTER
) }
,
1126 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1127 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_BLITTER
) }
,
1128 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1129 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER){ .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_BLITTER
) }
,
1130 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1131 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER){ .start = (0x8160), .end = (0x82ff), .domains = (FORCEWAKE_BLITTER
) }
,
1132 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1133 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER){ .start = (0x8500), .end = (0x87ff), .domains = (FORCEWAKE_BLITTER
) }
,
1134 GEN_FW_RANGE(0x8800, 0x8bff, 0){ .start = (0x8800), .end = (0x8bff), .domains = (0) },
1135 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER){ .start = (0x8c00), .end = (0x8cff), .domains = (FORCEWAKE_RENDER
) }
,
1136 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER){ .start = (0x8d00), .end = (0x94cf), .domains = (FORCEWAKE_BLITTER
) }
,
1137 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER){ .start = (0x94d0), .end = (0x955f), .domains = (FORCEWAKE_RENDER
) }
,
1138 GEN_FW_RANGE(0x9560, 0x95ff, 0){ .start = (0x9560), .end = (0x95ff), .domains = (0) },
1139 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER){ .start = (0x9600), .end = (0xafff), .domains = (FORCEWAKE_BLITTER
) }
,
1140 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1141 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER){ .start = (0xb480), .end = (0xdeff), .domains = (FORCEWAKE_BLITTER
) }
,
1142 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xdf00), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
,
1143 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER){ .start = (0xe900), .end = (0x16dff), .domains = (FORCEWAKE_BLITTER
) }
,
1144 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER){ .start = (0x16e00), .end = (0x19fff), .domains = (FORCEWAKE_RENDER
) }
,
1145 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER){ .start = (0x1a000), .end = (0x23fff), .domains = (FORCEWAKE_BLITTER
) }
,
1146 GEN_FW_RANGE(0x24000, 0x2407f, 0){ .start = (0x24000), .end = (0x2407f), .domains = (0) },
1147 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER){ .start = (0x24080), .end = (0x2417f), .domains = (FORCEWAKE_BLITTER
) }
,
1148 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER){ .start = (0x24180), .end = (0x242ff), .domains = (FORCEWAKE_RENDER
) }
,
1149 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER){ .start = (0x24300), .end = (0x243ff), .domains = (FORCEWAKE_BLITTER
) }
,
1150 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER){ .start = (0x24400), .end = (0x24fff), .domains = (FORCEWAKE_RENDER
) }
,
1151 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER){ .start = (0x25000), .end = (0x3ffff), .domains = (FORCEWAKE_BLITTER
) }
,
1152 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) },
1153 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1154 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0){ .start = (0x1c4000), .end = (0x1c7fff), .domains = (0) },
1155 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0){ .start = (0x1c8000), .end = (0x1cffff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }
,
1156 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
,
1157 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0){ .start = (0x1d4000), .end = (0x1dbfff), .domains = (0) }
1158};
1159
1160/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1161static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1162 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER){ .start = (0x0), .end = (0xaff), .domains = (FORCEWAKE_BLITTER
) }
,
1163 GEN_FW_RANGE(0xb00, 0x1fff, 0){ .start = (0xb00), .end = (0x1fff), .domains = (0) }, /* uncore range */
1164 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER){ .start = (0x2000), .end = (0x26ff), .domains = (FORCEWAKE_RENDER
) }
,
1165 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER){ .start = (0x2700), .end = (0x2fff), .domains = (FORCEWAKE_BLITTER
) }
,
1166 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER){ .start = (0x3000), .end = (0x3fff), .domains = (FORCEWAKE_RENDER
) }
,
1167 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER){ .start = (0x4000), .end = (0x51ff), .domains = (FORCEWAKE_BLITTER
) }
,
1168 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER){ .start = (0x5200), .end = (0x7fff), .domains = (FORCEWAKE_RENDER
) }
,
1169 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER){ .start = (0x8000), .end = (0x813f), .domains = (FORCEWAKE_BLITTER
) }
,
1170 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER){ .start = (0x8140), .end = (0x815f), .domains = (FORCEWAKE_RENDER
) }
,
1171 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER){ .start = (0x8160), .end = (0x82ff), .domains = (FORCEWAKE_BLITTER
) }
,
1172 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER){ .start = (0x8300), .end = (0x84ff), .domains = (FORCEWAKE_RENDER
) }
,
1173 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER){ .start = (0x8500), .end = (0x8bff), .domains = (FORCEWAKE_BLITTER
) }
,
1174 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER){ .start = (0x8c00), .end = (0x8cff), .domains = (FORCEWAKE_RENDER
) }
,
1175 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER){ .start = (0x8d00), .end = (0x93ff), .domains = (FORCEWAKE_BLITTER
) }
,
1176 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL){ .start = (0x9400), .end = (0x97ff), .domains = (FORCEWAKE_ALL
) }
,
1177 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER){ .start = (0x9800), .end = (0xafff), .domains = (FORCEWAKE_BLITTER
) }
,
1178 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER){ .start = (0xb000), .end = (0xb47f), .domains = (FORCEWAKE_RENDER
) }
,
1179 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER){ .start = (0xb480), .end = (0xdfff), .domains = (FORCEWAKE_BLITTER
) }
,
1180 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER){ .start = (0xe000), .end = (0xe8ff), .domains = (FORCEWAKE_RENDER
) }
,
1181 GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER){ .start = (0xe900), .end = (0x147ff), .domains = (FORCEWAKE_BLITTER
) }
,
1182 GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER){ .start = (0x14800), .end = (0x148ff), .domains = (FORCEWAKE_RENDER
) }
,
1183 GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER){ .start = (0x14900), .end = (0x19fff), .domains = (FORCEWAKE_BLITTER
) }
,
1184 GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER){ .start = (0x1a000), .end = (0x1a7ff), .domains = (FORCEWAKE_RENDER
) }
,
1185 GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER){ .start = (0x1a800), .end = (0x1afff), .domains = (FORCEWAKE_BLITTER
) }
,
1186 GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER){ .start = (0x1b000), .end = (0x1bfff), .domains = (FORCEWAKE_RENDER
) }
,
1187 GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER){ .start = (0x1c000), .end = (0x243ff), .domains = (FORCEWAKE_BLITTER
) }
,
1188 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER){ .start = (0x24400), .end = (0x247ff), .domains = (FORCEWAKE_RENDER
) }
,
1189 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER){ .start = (0x24800), .end = (0x3ffff), .domains = (FORCEWAKE_BLITTER
) }
,
1190 GEN_FW_RANGE(0x40000, 0x1bffff, 0){ .start = (0x40000), .end = (0x1bffff), .domains = (0) },
1191 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0){ .start = (0x1c0000), .end = (0x1c3fff), .domains = (FORCEWAKE_MEDIA_VDBOX0
) }
,
1192 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1){ .start = (0x1c4000), .end = (0x1c7fff), .domains = (FORCEWAKE_MEDIA_VDBOX1
) }
,
1193 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0){ .start = (0x1c8000), .end = (0x1cbfff), .domains = (FORCEWAKE_MEDIA_VEBOX0
) }
,
1194 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER){ .start = (0x1cc000), .end = (0x1cffff), .domains = (FORCEWAKE_BLITTER
) }
,
1195 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2){ .start = (0x1d0000), .end = (0x1d3fff), .domains = (FORCEWAKE_MEDIA_VDBOX2
) }
,
1196 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3){ .start = (0x1d4000), .end = (0x1d7fff), .domains = (FORCEWAKE_MEDIA_VDBOX3
) }
,
1197 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1){ .start = (0x1d8000), .end = (0x1dbfff), .domains = (FORCEWAKE_MEDIA_VEBOX1
) }
1198};
1199
1200static void
1201ilk_dummy_write(struct intel_uncore *uncore)
1202{
1203 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1204 * the chip from rc6 before touching it for real. MI_MODE is masked,
1205 * hence harmless to write 0 into. */
1206 __raw_uncore_write32(uncore, MI_MODE((const i915_reg_t){ .reg = (0x209c) }), 0);
1207}
1208
1209static void
1210__unclaimed_reg_debug(struct intel_uncore *uncore,
1211 const i915_reg_t reg,
1212 const bool_Bool read,
1213 const bool_Bool before)
1214{
1215 if (drm_WARN(&uncore->i915->drm,({ int __ret = !!(check_for_unclaimed_mmio(uncore) &&
!before); if (__ret) printf("%s %s: " "Unclaimed %s register 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, read ? "read from" : "write to", i915_mmio_reg_offset(reg))
; __builtin_expect(!!(__ret), 0); })
1216 check_for_unclaimed_mmio(uncore) && !before,({ int __ret = !!(check_for_unclaimed_mmio(uncore) &&
!before); if (__ret) printf("%s %s: " "Unclaimed %s register 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, read ? "read from" : "write to", i915_mmio_reg_offset(reg))
; __builtin_expect(!!(__ret), 0); })
1217 "Unclaimed %s register 0x%x\n",({ int __ret = !!(check_for_unclaimed_mmio(uncore) &&
!before); if (__ret) printf("%s %s: " "Unclaimed %s register 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, read ? "read from" : "write to", i915_mmio_reg_offset(reg))
; __builtin_expect(!!(__ret), 0); })
1218 read ? "read from" : "write to",({ int __ret = !!(check_for_unclaimed_mmio(uncore) &&
!before); if (__ret) printf("%s %s: " "Unclaimed %s register 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, read ? "read from" : "write to", i915_mmio_reg_offset(reg))
; __builtin_expect(!!(__ret), 0); })
1219 i915_mmio_reg_offset(reg))({ int __ret = !!(check_for_unclaimed_mmio(uncore) &&
!before); if (__ret) printf("%s %s: " "Unclaimed %s register 0x%x\n"
, dev_driver_string((&uncore->i915->drm)->dev), ""
, read ? "read from" : "write to", i915_mmio_reg_offset(reg))
; __builtin_expect(!!(__ret), 0); })
)
1220 /* Only report the first N failures */
1221 uncore->i915->params.mmio_debug--;
1222}
1223
1224static inline void
1225unclaimed_reg_debug(struct intel_uncore *uncore,
1226 const i915_reg_t reg,
1227 const bool_Bool read,
1228 const bool_Bool before)
1229{
1230 if (likely(!uncore->i915->params.mmio_debug)__builtin_expect(!!(!uncore->i915->params.mmio_debug), 1
)
)
1231 return;
1232
1233 /* interrupts are disabled and re-enabled around uncore->lock usage */
1234 lockdep_assert_held(&uncore->lock)do { (void)(&uncore->lock); } while(0);
1235
1236 if (before)
1237 spin_lock(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
1238
1239 __unclaimed_reg_debug(uncore, reg, read, before);
1240
1241 if (!before)
1242 spin_unlock(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
1243}
1244
1245#define __vgpu_read(x)static ux vgpu_readx(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { ux val = __raw_uncore_readx(uncore, reg); ; return
val; }
\
1246static u##x \
1247vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1248 u##x val = __raw_uncore_read##x(uncore, reg); \
1249 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1250 return val; \
1251}
1252__vgpu_read(8)static u8 vgpu_read8(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { u8 val = __raw_uncore_read8(uncore, reg); ; return
val; }
1253__vgpu_read(16)static u16 vgpu_read16(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u16 val = __raw_uncore_read16(uncore, reg
); ; return val; }
1254__vgpu_read(32)static u32 vgpu_read32(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u32 val = __raw_uncore_read32(uncore, reg
); ; return val; }
1255__vgpu_read(64)static u64 vgpu_read64(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { u64 val = __raw_uncore_read64(uncore, reg
); ; return val; }
1256
1257#define GEN2_READ_HEADER(x) \
1258 u##x val = 0; \
1259 assert_rpm_wakelock_held(uncore->rpm);
1260
1261#define GEN2_READ_FOOTER \
1262 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1263 return val
1264
1265#define __gen2_read(x) \
1266static u##x \
1267gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1268 GEN2_READ_HEADER(x); \
1269 val = __raw_uncore_read##x(uncore, reg); \
1270 GEN2_READ_FOOTER; \
1271}
1272
1273#define __gen5_read(x) \
1274static u##x \
1275gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1276 GEN2_READ_HEADER(x); \
1277 ilk_dummy_write(uncore); \
1278 val = __raw_uncore_read##x(uncore, reg); \
1279 GEN2_READ_FOOTER; \
1280}
1281
1282__gen5_read(8)
1283__gen5_read(16)
1284__gen5_read(32)
1285__gen5_read(64)
1286__gen2_read(8)
1287__gen2_read(16)
1288__gen2_read(32)
1289__gen2_read(64)
1290
1291#undef __gen5_read
1292#undef __gen2_read
1293
1294#undef GEN2_READ_FOOTER
1295#undef GEN2_READ_HEADER
1296
1297#define GEN6_READ_HEADER(x) \
1298 u32 offset = i915_mmio_reg_offset(reg); \
1299 unsigned long irqflags; \
1300 u##x val = 0; \
1301 assert_rpm_wakelock_held(uncore->rpm); \
1302 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
; \
1303 unclaimed_reg_debug(uncore, reg, true1, true1)
1304
1305#define GEN6_READ_FOOTER \
1306 unclaimed_reg_debug(uncore, reg, true1, false0); \
1307 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
; \
1308 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1309 return val
1310
1311static noinline__attribute__((__noinline__)) void ___force_wake_auto(struct intel_uncore *uncore,
1312 enum forcewake_domains fw_domains)
1313{
1314 struct intel_uncore_forcewake_domain *domain;
1315 unsigned int tmp;
1316
1317 GEM_BUG_ON(fw_domains & ~uncore->fw_domains)((void)0);
1318
1319 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)for (tmp = (fw_domains); tmp ;) if (!(domain = (uncore)->fw_domain
[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL << (__idx
)); __idx; })])) {} else
1320 fw_domain_arm_timer(domain);
1321
1322 uncore->funcs.force_wake_get(uncore, fw_domains);
1323}
1324
1325static inline void __force_wake_auto(struct intel_uncore *uncore,
1326 enum forcewake_domains fw_domains)
1327{
1328 GEM_BUG_ON(!fw_domains)((void)0);
1329
1330 /* Turn on all requested but inactive supported forcewake domains. */
1331 fw_domains &= uncore->fw_domains;
1332 fw_domains &= ~uncore->fw_domains_active;
1333
1334 if (fw_domains)
1335 ___force_wake_auto(uncore, fw_domains);
1336}
1337
1338#define __gen_read(func, x)static ux func_readx(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(x); fw_engine = __func_reg_read_fw_domains(uncore, offset); if
(fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_readx
(uncore, reg); GEN6_READ_FOOTER; }
\
1339static u##x \
1340func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool_Bool trace) { \
1341 enum forcewake_domains fw_engine; \
1342 GEN6_READ_HEADER(x); \
1343 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1344 if (fw_engine) \
1345 __force_wake_auto(uncore, fw_engine); \
1346 val = __raw_uncore_read##x(uncore, reg); \
1347 GEN6_READ_FOOTER; \
1348}
1349
1350#define __gen_reg_read_funcs(func) \
1351static enum forcewake_domains \
1352func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1353 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1354} \static u8 func_read8(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(8); fw_engine = __func_reg_read_fw_domains(uncore, offset); if
(fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read8
(uncore, reg); GEN6_READ_FOOTER; }
1355\static u8 func_read8(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(8); fw_engine = __func_reg_read_fw_domains(uncore, offset); if
(fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read8
(uncore, reg); GEN6_READ_FOOTER; }
1356__gen_read(func, 8)static u8 func_read8(struct intel_uncore *uncore, i915_reg_t reg
, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(8); fw_engine = __func_reg_read_fw_domains(uncore, offset); if
(fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read8
(uncore, reg); GEN6_READ_FOOTER; }
\static u16 func_read16(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(16); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read16
(uncore, reg); GEN6_READ_FOOTER; }
1357__gen_read(func, 16)static u16 func_read16(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(16); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read16
(uncore, reg); GEN6_READ_FOOTER; }
\static u32 func_read32(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(32); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read32
(uncore, reg); GEN6_READ_FOOTER; }
1358__gen_read(func, 32)static u32 func_read32(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(32); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read32
(uncore, reg); GEN6_READ_FOOTER; }
\static u64 func_read64(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(64); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read64
(uncore, reg); GEN6_READ_FOOTER; }
1359__gen_read(func, 64)static u64 func_read64(struct intel_uncore *uncore, i915_reg_t
reg, _Bool trace) { enum forcewake_domains fw_engine; GEN6_READ_HEADER
(64); fw_engine = __func_reg_read_fw_domains(uncore, offset);
if (fw_engine) __force_wake_auto(uncore, fw_engine); val = __raw_uncore_read64
(uncore, reg); GEN6_READ_FOOTER; }
1360
1361__gen_reg_read_funcs(gen12_fwtable);
1362__gen_reg_read_funcs(gen11_fwtable);
1363__gen_reg_read_funcs(fwtable);
1364__gen_reg_read_funcs(gen6);
1365
1366#undef __gen_reg_read_funcs
1367#undef GEN6_READ_FOOTER
1368#undef GEN6_READ_HEADER
1369
1370#define GEN2_WRITE_HEADER \
1371 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1372 assert_rpm_wakelock_held(uncore->rpm); \
1373
1374#define GEN2_WRITE_FOOTER
1375
1376#define __gen2_write(x) \
1377static void \
1378gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1379 GEN2_WRITE_HEADER; \
1380 __raw_uncore_write##x(uncore, reg, val); \
1381 GEN2_WRITE_FOOTER; \
1382}
1383
1384#define __gen5_write(x) \
1385static void \
1386gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1387 GEN2_WRITE_HEADER; \
1388 ilk_dummy_write(uncore); \
1389 __raw_uncore_write##x(uncore, reg, val); \
1390 GEN2_WRITE_FOOTER; \
1391}
1392
1393__gen5_write(8)
1394__gen5_write(16)
1395__gen5_write(32)
1396__gen2_write(8)
1397__gen2_write(16)
1398__gen2_write(32)
1399
1400#undef __gen5_write
1401#undef __gen2_write
1402
1403#undef GEN2_WRITE_FOOTER
1404#undef GEN2_WRITE_HEADER
1405
1406#define GEN6_WRITE_HEADER \
1407 u32 offset = i915_mmio_reg_offset(reg); \
1408 unsigned long irqflags; \
1409 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1410 assert_rpm_wakelock_held(uncore->rpm); \
1411 spin_lock_irqsave(&uncore->lock, irqflags)do { irqflags = 0; mtx_enter(&uncore->lock); } while (
0)
; \
1412 unclaimed_reg_debug(uncore, reg, false0, true1)
1413
1414#define GEN6_WRITE_FOOTER \
1415 unclaimed_reg_debug(uncore, reg, false0, false0); \
1416 spin_unlock_irqrestore(&uncore->lock, irqflags)do { (void)(irqflags); mtx_leave(&uncore->lock); } while
(0)
1417
1418#define __gen6_write(x)static void gen6_writex(struct intel_uncore *uncore, i915_reg_t
reg, ux val, _Bool trace) { GEN6_WRITE_HEADER; if (((offset)
< 0x40000)) __gen6_gt_wait_for_fifo(uncore); __raw_uncore_writex
(uncore, reg, val); GEN6_WRITE_FOOTER; }
\
1419static void \
1420gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1421 GEN6_WRITE_HEADER; \
1422 if (NEEDS_FORCE_WAKE(offset)((offset) < 0x40000)) \
1423 __gen6_gt_wait_for_fifo(uncore); \
1424 __raw_uncore_write##x(uncore, reg, val); \
1425 GEN6_WRITE_FOOTER; \
1426}
1427__gen6_write(8)static void gen6_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { GEN6_WRITE_HEADER; if (((offset)
< 0x40000)) __gen6_gt_wait_for_fifo(uncore); __raw_uncore_write8
(uncore, reg, val); GEN6_WRITE_FOOTER; }
1428__gen6_write(16)static void gen6_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { GEN6_WRITE_HEADER; if (((offset
) < 0x40000)) __gen6_gt_wait_for_fifo(uncore); __raw_uncore_write16
(uncore, reg, val); GEN6_WRITE_FOOTER; }
1429__gen6_write(32)static void gen6_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { GEN6_WRITE_HEADER; if (((offset
) < 0x40000)) __gen6_gt_wait_for_fifo(uncore); __raw_uncore_write32
(uncore, reg, val); GEN6_WRITE_FOOTER; }
1430
1431#define __gen_write(func, x)static void func_writex(struct intel_uncore *uncore, i915_reg_t
reg, ux val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_writex(uncore, reg, val); GEN6_WRITE_FOOTER; }
\
1432static void \
1433func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1434 enum forcewake_domains fw_engine; \
1435 GEN6_WRITE_HEADER; \
1436 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1437 if (fw_engine) \
1438 __force_wake_auto(uncore, fw_engine); \
1439 __raw_uncore_write##x(uncore, reg, val); \
1440 GEN6_WRITE_FOOTER; \
1441}
1442
1443#define __gen_reg_write_funcs(func) \
1444static enum forcewake_domains \
1445func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1446 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1447} \static void func_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write8(uncore, reg, val); GEN6_WRITE_FOOTER; }
1448\static void func_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write8(uncore, reg, val); GEN6_WRITE_FOOTER; }
1449__gen_write(func, 8)static void func_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write8(uncore, reg, val); GEN6_WRITE_FOOTER; }
\static void func_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write16(uncore, reg, val); GEN6_WRITE_FOOTER;
}
1450__gen_write(func, 16)static void func_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write16(uncore, reg, val); GEN6_WRITE_FOOTER;
}
\static void func_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write32(uncore, reg, val); GEN6_WRITE_FOOTER;
}
1451__gen_write(func, 32)static void func_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { enum forcewake_domains fw_engine
; GEN6_WRITE_HEADER; fw_engine = __func_reg_write_fw_domains(
uncore, offset); if (fw_engine) __force_wake_auto(uncore, fw_engine
); __raw_uncore_write32(uncore, reg, val); GEN6_WRITE_FOOTER;
}
1452
1453__gen_reg_write_funcs(gen12_fwtable);
1454__gen_reg_write_funcs(gen11_fwtable);
1455__gen_reg_write_funcs(fwtable);
1456__gen_reg_write_funcs(gen8);
1457
1458#undef __gen_reg_write_funcs
1459#undef GEN6_WRITE_FOOTER
1460#undef GEN6_WRITE_HEADER
1461
1462#define __vgpu_write(x)static void vgpu_writex(struct intel_uncore *uncore, i915_reg_t
reg, ux val, _Bool trace) { ; __raw_uncore_writex(uncore, reg
, val); }
\
1463static void \
1464vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool_Bool trace) { \
1465 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1466 __raw_uncore_write##x(uncore, reg, val); \
1467}
1468__vgpu_write(8)static void vgpu_write8(struct intel_uncore *uncore, i915_reg_t
reg, u8 val, _Bool trace) { ; __raw_uncore_write8(uncore, reg
, val); }
1469__vgpu_write(16)static void vgpu_write16(struct intel_uncore *uncore, i915_reg_t
reg, u16 val, _Bool trace) { ; __raw_uncore_write16(uncore, reg
, val); }
1470__vgpu_write(32)static void vgpu_write32(struct intel_uncore *uncore, i915_reg_t
reg, u32 val, _Bool trace) { ; __raw_uncore_write32(uncore, reg
, val); }
1471
1472#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_writeb = x_write8; (uncore)->
funcs.mmio_writew = x_write16; (uncore)->funcs.mmio_writel
= x_write32; } while (0)
\
1473do { \
1474 (uncore)->funcs.mmio_writeb = x##_write8; \
1475 (uncore)->funcs.mmio_writew = x##_write16; \
1476 (uncore)->funcs.mmio_writel = x##_write32; \
1477} while (0)
1478
1479#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->funcs
.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0)
\
1480do { \
1481 (uncore)->funcs.mmio_readb = x##_read8; \
1482 (uncore)->funcs.mmio_readw = x##_read16; \
1483 (uncore)->funcs.mmio_readl = x##_read32; \
1484 (uncore)->funcs.mmio_readq = x##_read64; \
1485} while (0)
1486
1487#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x)do { do { ((uncore))->funcs.mmio_writeb = x_write8; ((uncore
))->funcs.mmio_writew = x_write16; ((uncore))->funcs.mmio_writel
= x_write32; } while (0); (uncore)->funcs.write_fw_domains
= x_reg_write_fw_domains; } while (0)
\
1488do { \
1489 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x)do { ((uncore))->funcs.mmio_writeb = x_write8; ((uncore))->
funcs.mmio_writew = x_write16; ((uncore))->funcs.mmio_writel
= x_write32; } while (0)
; \
1490 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1491} while (0)
1492
1493#define ASSIGN_READ_MMIO_VFUNCS(uncore, x)do { do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->
funcs.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0); (uncore
)->funcs.read_fw_domains = x_reg_read_fw_domains; } while (
0)
\
1494do { \
1495 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x)do { (uncore)->funcs.mmio_readb = x_read8; (uncore)->funcs
.mmio_readw = x_read16; (uncore)->funcs.mmio_readl = x_read32
; (uncore)->funcs.mmio_readq = x_read64; } while (0)
; \
1496 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1497} while (0)
1498
1499static int __fw_domain_init(struct intel_uncore *uncore,
1500 enum forcewake_domain_id domain_id,
1501 i915_reg_t reg_set,
1502 i915_reg_t reg_ack)
1503{
1504 struct intel_uncore_forcewake_domain *d;
1505
1506 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT)((void)0);
1507 GEM_BUG_ON(uncore->fw_domain[domain_id])((void)0);
1508
1509 if (i915_inject_probe_failure(uncore->i915)({ ((void)0); 0; }))
1510 return -ENOMEM12;
1511
1512 d = kzalloc(sizeof(*d), GFP_KERNEL(0x0001 | 0x0004));
1513 if (!d)
1514 return -ENOMEM12;
1515
1516 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set))({ int __ret = !!((!i915_mmio_reg_valid(reg_set))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "!i915_mmio_reg_valid(reg_set)"
")"); __builtin_expect(!!(__ret), 0); })
;
1517 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack))({ int __ret = !!((!i915_mmio_reg_valid(reg_ack))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "!i915_mmio_reg_valid(reg_ack)"
")"); __builtin_expect(!!(__ret), 0); })
;
1518
1519 d->uncore = uncore;
1520 d->wake_count = 0;
1521 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1522 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1523
1524 d->id = domain_id;
1525
1526 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER))extern char _ctassert[(!(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER
))) ? 1 : -1 ] __attribute__((__unused__))
;
1527 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER))extern char _ctassert[(!(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER
))) ? 1 : -1 ] __attribute__((__unused__))
;
1528 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA))extern char _ctassert[(!(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA
))) ? 1 : -1 ] __attribute__((__unused__))
;
1529 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX0 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX0))) ? 1 : -1 ] __attribute__((__unused__
))
;
1530 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX1 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX1))) ? 1 : -1 ] __attribute__((__unused__
))
;
1531 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX2 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX2))) ? 1 : -1 ] __attribute__((__unused__
))
;
1532 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3))extern char _ctassert[(!(FORCEWAKE_MEDIA_VDBOX3 != (1 <<
FW_DOMAIN_ID_MEDIA_VDBOX3))) ? 1 : -1 ] __attribute__((__unused__
))
;
1533 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX0 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX0))) ? 1 : -1 ] __attribute__((__unused__
))
;
1534 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1))extern char _ctassert[(!(FORCEWAKE_MEDIA_VEBOX1 != (1 <<
FW_DOMAIN_ID_MEDIA_VEBOX1))) ? 1 : -1 ] __attribute__((__unused__
))
;
1535
1536 d->mask = BIT(domain_id)(1UL << (domain_id));
1537
1538#ifdef __linux__
1539 hrtimer_init(&d->timer, CLOCK_MONOTONIC3, HRTIMER_MODE_REL1);
1540 d->timer.function = intel_uncore_fw_release_timer;
1541#else
1542 timeout_set(&d->timer, intel_uncore_fw_release_timer, d);
1543#endif
1544
1545 uncore->fw_domains |= BIT(domain_id)(1UL << (domain_id));
1546
1547 fw_domain_reset(d);
1548
1549 uncore->fw_domain[domain_id] = d;
1550
1551 return 0;
1552}
1553
1554static void fw_domain_fini(struct intel_uncore *uncore,
1555 enum forcewake_domain_id domain_id)
1556{
1557 struct intel_uncore_forcewake_domain *d;
1558
1559 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT)((void)0);
1560
1561 d = fetch_and_zero(&uncore->fw_domain[domain_id])({ typeof(*&uncore->fw_domain[domain_id]) __T = *(&
uncore->fw_domain[domain_id]); *(&uncore->fw_domain
[domain_id]) = (typeof(*&uncore->fw_domain[domain_id])
)0; __T; })
;
1562 if (!d)
1563 return;
1564
1565 uncore->fw_domains &= ~BIT(domain_id)(1UL << (domain_id));
1566 drm_WARN_ON(&uncore->i915->drm, d->wake_count)({ int __ret = !!((d->wake_count)); if (__ret) printf("%s %s: "
"%s", dev_driver_string(((&uncore->i915->drm))->
dev), "", "drm_WARN_ON(" "d->wake_count" ")"); __builtin_expect
(!!(__ret), 0); })
;
1567 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer))({ int __ret = !!((timeout_del(&d->timer))); if (__ret
) printf("%s %s: " "%s", dev_driver_string(((&uncore->
i915->drm))->dev), "", "drm_WARN_ON(" "timeout_del(&d->timer)"
")"); __builtin_expect(!!(__ret), 0); })
;
1568 kfree(d);
1569}
1570
1571static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1572{
1573 struct intel_uncore_forcewake_domain *d;
1574 int tmp;
1575
1576 for_each_fw_domain(d, uncore, tmp)for (tmp = ((uncore)->fw_domains); tmp ;) if (!(d = (uncore
)->fw_domain[({ int __idx = ffs(tmp) - 1; tmp &= ~(1UL
<< (__idx)); __idx; })])) {} else
1577 fw_domain_fini(uncore, d->id);
1578}
1579
1580static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1581{
1582 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1583 int ret = 0;
1584
1585 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
1586
1587#define fw_domain_init(uncore__, id__, set__, ack__) \
1588 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1589
1590 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11) {
1591 /* we'll prune the domains of missing engines later */
1592 intel_engine_mask_t emask = INTEL_INFO(i915)(&(i915)->__info)->platform_engine_mask;
1593 int i;
1594
1595 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1596 uncore->funcs.force_wake_put = fw_domains_put;
1597 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1598 FORCEWAKE_RENDER_GEN9((const i915_reg_t){ .reg = (0xa278) }),
1599 FORCEWAKE_ACK_RENDER_GEN9((const i915_reg_t){ .reg = (0x0D84) }));
1600 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1601 FORCEWAKE_BLITTER_GEN9((const i915_reg_t){ .reg = (0xa188) }),
1602 FORCEWAKE_ACK_BLITTER_GEN9((const i915_reg_t){ .reg = (0x130044) }));
1603
1604 for (i = 0; i < I915_MAX_VCS4; i++) {
1605 if (!__HAS_ENGINE(emask, _VCS(i))((emask) & (1UL << ((VCS0 + (i))))))
1606 continue;
1607
1608 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1609 FORCEWAKE_MEDIA_VDBOX_GEN11(i)((const i915_reg_t){ .reg = (0xa540 + (i) * 4) }),
1610 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)((const i915_reg_t){ .reg = (0x0D50 + (i) * 4) }));
1611 }
1612 for (i = 0; i < I915_MAX_VECS2; i++) {
1613 if (!__HAS_ENGINE(emask, _VECS(i))((emask) & (1UL << ((VECS0 + (i))))))
1614 continue;
1615
1616 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1617 FORCEWAKE_MEDIA_VEBOX_GEN11(i)((const i915_reg_t){ .reg = (0xa560 + (i) * 4) }),
1618 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)((const i915_reg_t){ .reg = (0x0D70 + (i) * 4) }));
1619 }
1620 } else if (IS_GEN_RANGE(i915, 9, 10)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((10)) - 1) - 1)) & ((~0UL) <<
(((9)) - 1))))))
) {
1621 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1622 uncore->funcs.force_wake_put = fw_domains_put;
1623 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1624 FORCEWAKE_RENDER_GEN9((const i915_reg_t){ .reg = (0xa278) }),
1625 FORCEWAKE_ACK_RENDER_GEN9((const i915_reg_t){ .reg = (0x0D84) }));
1626 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1627 FORCEWAKE_BLITTER_GEN9((const i915_reg_t){ .reg = (0xa188) }),
1628 FORCEWAKE_ACK_BLITTER_GEN9((const i915_reg_t){ .reg = (0x130044) }));
1629 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1630 FORCEWAKE_MEDIA_GEN9((const i915_reg_t){ .reg = (0xa270) }), FORCEWAKE_ACK_MEDIA_GEN9((const i915_reg_t){ .reg = (0x0D88) }));
1631 } else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) {
1632 uncore->funcs.force_wake_get = fw_domains_get;
1633 uncore->funcs.force_wake_put = fw_domains_put;
1634 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1635 FORCEWAKE_VLV((const i915_reg_t){ .reg = (0x1300b0) }), FORCEWAKE_ACK_VLV((const i915_reg_t){ .reg = (0x1300b4) }));
1636 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1637 FORCEWAKE_MEDIA_VLV((const i915_reg_t){ .reg = (0x1300b8) }), FORCEWAKE_ACK_MEDIA_VLV((const i915_reg_t){ .reg = (0x1300bc) }));
1638 } else if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) || IS_BROADWELL(i915)IS_PLATFORM(i915, INTEL_BROADWELL)) {
1639 uncore->funcs.force_wake_get =
1640 fw_domains_get_with_thread_status;
1641 uncore->funcs.force_wake_put = fw_domains_put;
1642 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1643 FORCEWAKE_MT((const i915_reg_t){ .reg = (0xa188) }), FORCEWAKE_ACK_HSW((const i915_reg_t){ .reg = (0x130044) }));
1644 } else if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE)) {
1645 u32 ecobus;
1646
1647 /* IVB configs may use multi-threaded forcewake */
1648
1649 /* A small trick here - if the bios hasn't configured
1650 * MT forcewake, and if the device is in RC6, then
1651 * force_wake_mt_get will not wake the device and the
1652 * ECOBUS read will return zero. Which will be
1653 * (correctly) interpreted by the test below as MT
1654 * forcewake being disabled.
1655 */
1656 uncore->funcs.force_wake_get =
1657 fw_domains_get_with_thread_status;
1658 uncore->funcs.force_wake_put = fw_domains_put;
1659
1660 /* We need to init first for ECOBUS access and then
1661 * determine later if we want to reinit, in case of MT access is
1662 * not working. In this stage we don't know which flavour this
1663 * ivb is, so it is better to reset also the gen6 fw registers
1664 * before the ecobus check.
1665 */
1666
1667 __raw_uncore_write32(uncore, FORCEWAKE((const i915_reg_t){ .reg = (0xA18C) }), 0);
1668 __raw_posting_read(uncore, ECOBUS)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
(0xa180) })))
;
1669
1670 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1671 FORCEWAKE_MT((const i915_reg_t){ .reg = (0xa188) }), FORCEWAKE_MT_ACK((const i915_reg_t){ .reg = (0x130040) }));
1672 if (ret)
1673 goto out;
1674
1675 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
1676 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1677 ecobus = __raw_uncore_read32(uncore, ECOBUS((const i915_reg_t){ .reg = (0xa180) }));
1678 fw_domains_put(uncore, FORCEWAKE_RENDER);
1679 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
1680
1681 if (!(ecobus & FORCEWAKE_MT_ENABLE(1 << 5))) {
1682 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n")do { } while(0);
1683 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n")do { } while(0);
1684 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1685 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1686 FORCEWAKE((const i915_reg_t){ .reg = (0xA18C) }), FORCEWAKE_ACK((const i915_reg_t){ .reg = (0x130090) }));
1687 }
1688 } else if (IS_GEN(i915, 6)(0 + (&(i915)->__info)->gen == (6))) {
1689 uncore->funcs.force_wake_get =
1690 fw_domains_get_with_thread_status;
1691 uncore->funcs.force_wake_put = fw_domains_put;
1692 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1693 FORCEWAKE((const i915_reg_t){ .reg = (0xA18C) }), FORCEWAKE_ACK((const i915_reg_t){ .reg = (0x130090) }));
1694 }
1695
1696#undef fw_domain_init
1697
1698 /* All future platforms are expected to require complex power gating */
1699 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0)({ int __ret = !!((!ret && uncore->fw_domains == 0
)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&
i915->drm))->dev), "", "drm_WARN_ON(" "!ret && uncore->fw_domains == 0"
")"); __builtin_expect(!!(__ret), 0); })
;
1700
1701out:
1702 if (ret)
1703 intel_uncore_fw_domains_fini(uncore);
1704
1705 return ret;
1706}
1707
1708#define ASSIGN_FW_DOMAINS_TABLE(uncore, d){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(d); (uncore)->fw_domains_table_entries = (sizeof(((d))
) / sizeof(((d))[0])); }
\
1709{ \
1710 (uncore)->fw_domains_table = \
1711 (struct intel_forcewake_range *)(d); \
1712 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d))(sizeof(((d))) / sizeof(((d))[0])); \
1713}
1714
1715static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1716 unsigned long action, void *data)
1717{
1718 struct intel_uncore *uncore = container_of(nb,({ const __typeof( ((struct intel_uncore *)0)->pmic_bus_access_nb
) *__mptr = (nb); (struct intel_uncore *)( (char *)__mptr - __builtin_offsetof
(struct intel_uncore, pmic_bus_access_nb) );})
1719 struct intel_uncore, pmic_bus_access_nb)({ const __typeof( ((struct intel_uncore *)0)->pmic_bus_access_nb
) *__mptr = (nb); (struct intel_uncore *)( (char *)__mptr - __builtin_offsetof
(struct intel_uncore, pmic_bus_access_nb) );})
;
1720
1721 switch (action) {
1722 case MBI_PMIC_BUS_ACCESS_BEGIN1:
1723 /*
1724 * forcewake all now to make sure that we don't need to do a
1725 * forcewake later which on systems where this notifier gets
1726 * called requires the punit to access to the shared pmic i2c
1727 * bus, which will be busy after this notification, leading to:
1728 * "render: timed out waiting for forcewake ack request."
1729 * errors.
1730 *
1731 * The notifier is unregistered during intel_runtime_suspend(),
1732 * so it's ok to access the HW here without holding a RPM
1733 * wake reference -> disable wakeref asserts for the time of
1734 * the access.
1735 */
1736 disable_rpm_wakeref_asserts(uncore->rpm);
1737 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1738 enable_rpm_wakeref_asserts(uncore->rpm);
1739 break;
1740 case MBI_PMIC_BUS_ACCESS_END2:
1741 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1742 break;
1743 }
1744
1745 return NOTIFY_OK1;
1746}
1747
1748static int uncore_mmio_setup(struct intel_uncore *uncore)
1749{
1750 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1751 struct pci_dev *pdev = i915->drm.pdev;
1752 int mmio_bar;
1753 int mmio_size;
1754
1755 mmio_bar = IS_GEN(i915, 2)(0 + (&(i915)->__info)->gen == (2)) ? 1 : 0;
1756 /*
1757 * Before gen4, the registers and the GTT are behind different BARs.
1758 * However, from gen4 onwards, the registers and the GTT are shared
1759 * in the same BAR, so we want to restrict this ioremap from
1760 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1761 * the register BAR remains the same size for all the earlier
1762 * generations up to Ironlake.
1763 */
1764 if (INTEL_GEN(i915)((&(i915)->__info)->gen) < 5)
1765 mmio_size = 512 * 1024;
Value stored to 'mmio_size' is never read
1766 else
1767 mmio_size = 2 * 1024 * 1024;
1768#ifdef __linux__
1769 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1770 if (uncore->regs == NULL((void *)0)) {
1771 drm_err(&i915->drm, "failed to map registers\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to map registers\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1772 return -EIO5;
1773 }
1774#endif
1775
1776 return 0;
1777}
1778
1779static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1780{
1781#ifdef __linux__
1782 struct pci_dev *pdev = uncore->i915->drm.pdev;
1783
1784 pci_iounmap(pdev, uncore->regs);
1785#endif
1786}
1787
1788void intel_uncore_init_early(struct intel_uncore *uncore,
1789 struct drm_i915_privateinteldrm_softc *i915)
1790{
1791 mtx_init(&uncore->lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&uncore->
lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 :
((0x9)))); } while (0)
;
1792 uncore->i915 = i915;
1793 uncore->rpm = &i915->runtime_pm;
1794 uncore->debug = &i915->mmio_debug;
1795}
1796
1797static void uncore_raw_init(struct intel_uncore *uncore)
1798{
1799 GEM_BUG_ON(intel_uncore_has_forcewake(uncore))((void)0);
1800
1801 if (intel_vgpu_active(uncore->i915)) {
1802 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu)do { (uncore)->funcs.mmio_writeb = vgpu_write8; (uncore)->
funcs.mmio_writew = vgpu_write16; (uncore)->funcs.mmio_writel
= vgpu_write32; } while (0)
;
1803 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu)do { (uncore)->funcs.mmio_readb = vgpu_read8; (uncore)->
funcs.mmio_readw = vgpu_read16; (uncore)->funcs.mmio_readl
= vgpu_read32; (uncore)->funcs.mmio_readq = vgpu_read64; }
while (0)
;
1804 } else if (IS_GEN(uncore->i915, 5)(0 + (&(uncore->i915)->__info)->gen == (5))) {
1805 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5)do { (uncore)->funcs.mmio_writeb = gen5_write8; (uncore)->
funcs.mmio_writew = gen5_write16; (uncore)->funcs.mmio_writel
= gen5_write32; } while (0)
;
1806 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5)do { (uncore)->funcs.mmio_readb = gen5_read8; (uncore)->
funcs.mmio_readw = gen5_read16; (uncore)->funcs.mmio_readl
= gen5_read32; (uncore)->funcs.mmio_readq = gen5_read64; }
while (0)
;
1807 } else {
1808 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2)do { (uncore)->funcs.mmio_writeb = gen2_write8; (uncore)->
funcs.mmio_writew = gen2_write16; (uncore)->funcs.mmio_writel
= gen2_write32; } while (0)
;
1809 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2)do { (uncore)->funcs.mmio_readb = gen2_read8; (uncore)->
funcs.mmio_readw = gen2_read16; (uncore)->funcs.mmio_readl
= gen2_read32; (uncore)->funcs.mmio_readq = gen2_read64; }
while (0)
;
1810 }
1811}
1812
1813static int uncore_forcewake_init(struct intel_uncore *uncore)
1814{
1815 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1816 int ret;
1817
1818 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore))((void)0);
1819
1820 ret = intel_uncore_fw_domains_init(uncore);
1821 if (ret)
1822 return ret;
1823 forcewake_early_sanitize(uncore, 0);
1824
1825 if (IS_GEN_RANGE(i915, 6, 7)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((7)) - 1) - 1)) & ((~0UL) <<
(((6)) - 1))))))
) {
1826 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6)do { do { ((uncore))->funcs.mmio_writeb = gen6_write8; ((uncore
))->funcs.mmio_writew = gen6_write16; ((uncore))->funcs
.mmio_writel = gen6_write32; } while (0); (uncore)->funcs.
write_fw_domains = gen6_reg_write_fw_domains; } while (0)
;
1827
1828 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
1829 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__vlv_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__vlv_fw_ranges))) / sizeof(((__vlv_fw_ranges))[0]
)); }
;
1830 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable)do { do { (uncore)->funcs.mmio_readb = fwtable_read8; (uncore
)->funcs.mmio_readw = fwtable_read16; (uncore)->funcs.mmio_readl
= fwtable_read32; (uncore)->funcs.mmio_readq = fwtable_read64
; } while (0); (uncore)->funcs.read_fw_domains = fwtable_reg_read_fw_domains
; } while (0)
;
1831 } else {
1832 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6)do { do { (uncore)->funcs.mmio_readb = gen6_read8; (uncore
)->funcs.mmio_readw = gen6_read16; (uncore)->funcs.mmio_readl
= gen6_read32; (uncore)->funcs.mmio_readq = gen6_read64; }
while (0); (uncore)->funcs.read_fw_domains = gen6_reg_read_fw_domains
; } while (0)
;
1833 }
1834 } else if (IS_GEN(i915, 8)(0 + (&(i915)->__info)->gen == (8))) {
1835 if (IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW)) {
1836 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__chv_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__chv_fw_ranges))) / sizeof(((__chv_fw_ranges))[0]
)); }
;
1837 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
1838 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable)do { do { (uncore)->funcs.mmio_readb = fwtable_read8; (uncore
)->funcs.mmio_readw = fwtable_read16; (uncore)->funcs.mmio_readl
= fwtable_read32; (uncore)->funcs.mmio_readq = fwtable_read64
; } while (0); (uncore)->funcs.read_fw_domains = fwtable_reg_read_fw_domains
; } while (0)
;
1839 } else {
1840 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8)do { do { ((uncore))->funcs.mmio_writeb = gen8_write8; ((uncore
))->funcs.mmio_writew = gen8_write16; ((uncore))->funcs
.mmio_writel = gen8_write32; } while (0); (uncore)->funcs.
write_fw_domains = gen8_reg_write_fw_domains; } while (0)
;
1841 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6)do { do { (uncore)->funcs.mmio_readb = gen6_read8; (uncore
)->funcs.mmio_readw = gen6_read16; (uncore)->funcs.mmio_readl
= gen6_read32; (uncore)->funcs.mmio_readq = gen6_read64; }
while (0); (uncore)->funcs.read_fw_domains = gen6_reg_read_fw_domains
; } while (0)
;
1842 }
1843 } else if (IS_GEN_RANGE(i915, 9, 10)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((10)) - 1) - 1)) & ((~0UL) <<
(((9)) - 1))))))
) {
1844 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen9_fw_ranges); (uncore)->fw_domains_table_entries =
(sizeof(((__gen9_fw_ranges))) / sizeof(((__gen9_fw_ranges))[
0])); }
;
1845 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable)do { do { ((uncore))->funcs.mmio_writeb = fwtable_write8; (
(uncore))->funcs.mmio_writew = fwtable_write16; ((uncore))
->funcs.mmio_writel = fwtable_write32; } while (0); (uncore
)->funcs.write_fw_domains = fwtable_reg_write_fw_domains; }
while (0)
;
1846 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable)do { do { (uncore)->funcs.mmio_readb = fwtable_read8; (uncore
)->funcs.mmio_readw = fwtable_read16; (uncore)->funcs.mmio_readl
= fwtable_read32; (uncore)->funcs.mmio_readq = fwtable_read64
; } while (0); (uncore)->funcs.read_fw_domains = fwtable_reg_read_fw_domains
; } while (0)
;
1847 } else if (IS_GEN(i915, 11)(0 + (&(i915)->__info)->gen == (11))) {
1848 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen11_fw_ranges); (uncore)->fw_domains_table_entries
= (sizeof(((__gen11_fw_ranges))) / sizeof(((__gen11_fw_ranges
))[0])); }
;
1849 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable)do { do { ((uncore))->funcs.mmio_writeb = gen11_fwtable_write8
; ((uncore))->funcs.mmio_writew = gen11_fwtable_write16; (
(uncore))->funcs.mmio_writel = gen11_fwtable_write32; } while
(0); (uncore)->funcs.write_fw_domains = gen11_fwtable_reg_write_fw_domains
; } while (0)
;
1850 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable)do { do { (uncore)->funcs.mmio_readb = gen11_fwtable_read8
; (uncore)->funcs.mmio_readw = gen11_fwtable_read16; (uncore
)->funcs.mmio_readl = gen11_fwtable_read32; (uncore)->funcs
.mmio_readq = gen11_fwtable_read64; } while (0); (uncore)->
funcs.read_fw_domains = gen11_fwtable_reg_read_fw_domains; } while
(0)
;
1851 } else {
1852 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges){ (uncore)->fw_domains_table = (struct intel_forcewake_range
*)(__gen12_fw_ranges); (uncore)->fw_domains_table_entries
= (sizeof(((__gen12_fw_ranges))) / sizeof(((__gen12_fw_ranges
))[0])); }
;
1853 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable)do { do { ((uncore))->funcs.mmio_writeb = gen12_fwtable_write8
; ((uncore))->funcs.mmio_writew = gen12_fwtable_write16; (
(uncore))->funcs.mmio_writel = gen12_fwtable_write32; } while
(0); (uncore)->funcs.write_fw_domains = gen12_fwtable_reg_write_fw_domains
; } while (0)
;
1854 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable)do { do { (uncore)->funcs.mmio_readb = gen12_fwtable_read8
; (uncore)->funcs.mmio_readw = gen12_fwtable_read16; (uncore
)->funcs.mmio_readl = gen12_fwtable_read32; (uncore)->funcs
.mmio_readq = gen12_fwtable_read64; } while (0); (uncore)->
funcs.read_fw_domains = gen12_fwtable_reg_read_fw_domains; } while
(0)
;
1855 }
1856
1857 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1858 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb)0;
1859
1860 return 0;
1861}
1862
1863int intel_uncore_init_mmio(struct intel_uncore *uncore)
1864{
1865 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1866 int ret;
1867
1868 ret = uncore_mmio_setup(uncore);
1869 if (ret)
1870 return ret;
1871
1872 if (INTEL_GEN(i915)((&(i915)->__info)->gen) > 5 && !intel_vgpu_active(i915))
1873 uncore->flags |= UNCORE_HAS_FORCEWAKE(1UL << (0));
1874
1875 if (!intel_uncore_has_forcewake(uncore)) {
1876 uncore_raw_init(uncore);
1877 } else {
1878 ret = uncore_forcewake_init(uncore);
1879 if (ret)
1880 goto out_mmio_cleanup;
1881 }
1882
1883 /* make sure fw funcs are set if and only if we have fw*/
1884 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get)((void)0);
1885 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put)((void)0);
1886 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains)((void)0);
1887 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains)((void)0);
1888
1889 if (HAS_FPGA_DBG_UNCLAIMED(i915)((&(i915)->__info)->has_fpga_dbg))
1890 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED(1UL << (1));
1891
1892 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW) || IS_CHERRYVIEW(i915)IS_PLATFORM(i915, INTEL_CHERRYVIEW))
1893 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED(1UL << (2));
1894
1895 if (IS_GEN_RANGE(i915, 6, 7)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((7)) - 1) - 1)) & ((~0UL) <<
(((6)) - 1))))))
)
1896 uncore->flags |= UNCORE_HAS_FIFO(1UL << (3));
1897
1898 /* clear out unclaimed reg detection bit */
1899 if (intel_uncore_unclaimed_mmio(uncore))
1900 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n")drm_dev_dbg((&i915->drm)->dev, DRM_UT_DRIVER, "unclaimed mmio detected on uncore init, clearing\n"
)
;
1901
1902 return 0;
1903
1904out_mmio_cleanup:
1905 uncore_mmio_cleanup(uncore);
1906
1907 return ret;
1908}
1909
1910/*
1911 * We might have detected that some engines are fused off after we initialized
1912 * the forcewake domains. Prune them, to make sure they only reference existing
1913 * engines.
1914 */
1915void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
1916 struct intel_gt *gt)
1917{
1918 enum forcewake_domains fw_domains = uncore->fw_domains;
1919 enum forcewake_domain_id domain_id;
1920 int i;
1921
1922 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915)((&(uncore->i915)->__info)->gen) < 11)
1923 return;
1924
1925 for (i = 0; i < I915_MAX_VCS4; i++) {
1926 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1927
1928 if (HAS_ENGINE(gt, _VCS(i))(((gt)->info.engine_mask) & (1UL << ((VCS0 + (i)
))))
)
1929 continue;
1930
1931 if (fw_domains & BIT(domain_id)(1UL << (domain_id)))
1932 fw_domain_fini(uncore, domain_id);
1933 }
1934
1935 for (i = 0; i < I915_MAX_VECS2; i++) {
1936 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1937
1938 if (HAS_ENGINE(gt, _VECS(i))(((gt)->info.engine_mask) & (1UL << ((VECS0 + (i
)))))
)
1939 continue;
1940
1941 if (fw_domains & BIT(domain_id)(1UL << (domain_id)))
1942 fw_domain_fini(uncore, domain_id);
1943 }
1944}
1945
1946void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1947{
1948 if (intel_uncore_has_forcewake(uncore)) {
1949 iosf_mbi_punit_acquire();
1950 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(0
1951 &uncore->pmic_bus_access_nb)0;
1952 intel_uncore_forcewake_reset(uncore);
1953 intel_uncore_fw_domains_fini(uncore);
1954 iosf_mbi_punit_release();
1955 }
1956
1957 uncore_mmio_cleanup(uncore);
1958}
1959
1960static const struct reg_whitelist {
1961 i915_reg_t offset_ldw;
1962 i915_reg_t offset_udw;
1963 u16 gen_mask;
1964 u8 size;
1965} reg_read_whitelist[] = { {
1966 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x358) }),
1967 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x358 + 4) }),
1968 .gen_mask = INTEL_GEN_MASK(4, 12)( 0 + 0 + (((~0UL) >> (64 - ((12) - 1) - 1)) & ((~0UL
) << ((4) - 1))))
,
1969 .size = 8
1970} };
1971
1972int i915_reg_read_ioctl(struct drm_device *dev,
1973 void *data, struct drm_file *file)
1974{
1975 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
1976 struct intel_uncore *uncore = &i915->uncore;
1977 struct drm_i915_reg_read *reg = data;
1978 struct reg_whitelist const *entry;
1979 intel_wakeref_t wakeref;
1980 unsigned int flags;
1981 int remain;
1982 int ret = 0;
1983
1984 entry = reg_read_whitelist;
1985 remain = ARRAY_SIZE(reg_read_whitelist)(sizeof((reg_read_whitelist)) / sizeof((reg_read_whitelist)[0
]))
;
1986 while (remain) {
1987 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1988
1989 GEM_BUG_ON(!is_power_of_2(entry->size))((void)0);
1990 GEM_BUG_ON(entry->size > 8)((void)0);
1991 GEM_BUG_ON(entry_offset & (entry->size - 1))((void)0);
1992
1993 if (INTEL_INFO(i915)(&(i915)->__info)->gen_mask & entry->gen_mask &&
1994 entry_offset == (reg->offset & -entry->size))
1995 break;
1996 entry++;
1997 remain--;
1998 }
1999
2000 if (!remain)
2001 return -EINVAL22;
2002
2003 flags = reg->offset & (entry->size - 1);
2004
2005 with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm
); (wakeref); intel_runtime_pm_put((&i915->runtime_pm)
, (wakeref)), (wakeref) = 0)
{
2006 if (entry->size == 8 && flags == I915_REG_READ_8B_WA(1ul << 0))
2007 reg->val = intel_uncore_read64_2x32(uncore,
2008 entry->offset_ldw,
2009 entry->offset_udw);
2010 else if (entry->size == 8 && flags == 0)
2011 reg->val = intel_uncore_read64(uncore,
2012 entry->offset_ldw);
2013 else if (entry->size == 4 && flags == 0)
2014 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
2015 else if (entry->size == 2 && flags == 0)
2016 reg->val = intel_uncore_read16(uncore,
2017 entry->offset_ldw);
2018 else if (entry->size == 1 && flags == 0)
2019 reg->val = intel_uncore_read8(uncore,
2020 entry->offset_ldw);
2021 else
2022 ret = -EINVAL22;
2023 }
2024
2025 return ret;
2026}
2027
2028/**
2029 * __intel_wait_for_register_fw - wait until register matches expected state
2030 * @uncore: the struct intel_uncore
2031 * @reg: the register to read
2032 * @mask: mask to apply to register value
2033 * @value: expected value
2034 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2035 * @slow_timeout_ms: slow timeout in millisecond
2036 * @out_value: optional placeholder to hold registry value
2037 *
2038 * This routine waits until the target register @reg contains the expected
2039 * @value after applying the @mask, i.e. it waits until ::
2040 *
2041 * (I915_READ_FW(reg) & mask) == value
2042 *
2043 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2044 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2045 * must be not larger than 20,0000 microseconds.
2046 *
2047 * Note that this routine assumes the caller holds forcewake asserted, it is
2048 * not suitable for very long waits. See intel_wait_for_register() if you
2049 * wish to wait without holding forcewake for the duration (i.e. you expect
2050 * the wait to be slow).
2051 *
2052 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2053 */
2054int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2055 i915_reg_t reg,
2056 u32 mask,
2057 u32 value,
2058 unsigned int fast_timeout_us,
2059 unsigned int slow_timeout_ms,
2060 u32 *out_value)
2061{
2062 u32 reg_value = 0;
2063#define done (((reg_value = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg)) & mask) == value)
2064 int ret;
2065
2066 /* Catch any overuse of this function */
2067 might_sleep_if(slow_timeout_ms)do { if (slow_timeout_ms) assertwaitok(); } while (0);
2068 GEM_BUG_ON(fast_timeout_us > 20000)((void)0);
2069 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms)((void)0);
2070
2071 ret = -ETIMEDOUT60;
2072 if (fast_timeout_us && fast_timeout_us <= 20000)
2073 ret = _wait_for_atomic(done, fast_timeout_us, 0)({ int cpu, ret, timeout = (fast_timeout_us) * 1000; u64 base
; do { } while (0); if (!(0)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(0))
; __asm volatile("" : : : "memory"); if (done) { ret = 0; break
; } if (now - base >= timeout) { ret = -60; break; } cpu_relax
(); if (!(0)) { ; if (__builtin_expect(!!(cpu != (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid)), 0)) { timeout
-= now - base; cpu = (({struct cpu_info *__ci; asm volatile(
"movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_cpuid); base = local_clock
(); } } } ret; })
;
2074 if (ret && slow_timeout_ms)
2075 ret = wait_for(done, slow_timeout_ms)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (((slow_timeout_ms) * 1000))); long wait__ = ((10)); int ret__
; assertwaitok(); for (;;) { const _Bool expired__ = ktime_after
(ktime_get_raw(), end__); ; __asm volatile("" : : : "memory")
; if (((done))) { ret__ = 0; break; } if (expired__) { ret__ =
-60; break; } usleep_range(wait__, wait__ * 2); if (wait__ <
((1000))) wait__ <<= 1; } ret__; })
;
2076
2077 if (out_value)
2078 *out_value = reg_value;
2079
2080 return ret;
2081#undef done
2082}
2083
2084/**
2085 * __intel_wait_for_register - wait until register matches expected state
2086 * @uncore: the struct intel_uncore
2087 * @reg: the register to read
2088 * @mask: mask to apply to register value
2089 * @value: expected value
2090 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2091 * @slow_timeout_ms: slow timeout in millisecond
2092 * @out_value: optional placeholder to hold registry value
2093 *
2094 * This routine waits until the target register @reg contains the expected
2095 * @value after applying the @mask, i.e. it waits until ::
2096 *
2097 * (I915_READ(reg) & mask) == value
2098 *
2099 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2100 *
2101 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2102 */
2103int __intel_wait_for_register(struct intel_uncore *uncore,
2104 i915_reg_t reg,
2105 u32 mask,
2106 u32 value,
2107 unsigned int fast_timeout_us,
2108 unsigned int slow_timeout_ms,
2109 u32 *out_value)
2110{
2111 unsigned fw =
2112 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ(1));
2113 u32 reg_value;
2114 int ret;
2115
2116 might_sleep_if(slow_timeout_ms)do { if (slow_timeout_ms) assertwaitok(); } while (0);
2117
2118 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock);
2119 intel_uncore_forcewake_get__locked(uncore, fw);
2120
2121 ret = __intel_wait_for_register_fw(uncore,
2122 reg, mask, value,
2123 fast_timeout_us, 0, &reg_value);
2124
2125 intel_uncore_forcewake_put__locked(uncore, fw);
2126 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
2127
2128 if (ret && slow_timeout_ms)
2129 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2130 reg),({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2131 (reg_value & mask) == value,({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
2132 slow_timeout_ms * 1000, 10, 1000)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll
* (slow_timeout_ms * 1000)); long wait__ = (10); int ret__; assertwaitok
(); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw
(), end__); reg_value = intel_uncore_read_notrace(uncore, reg
); __asm volatile("" : : : "memory"); if ((reg_value & mask
) == value) { ret__ = 0; break; } if (expired__) { ret__ = -60
; break; } usleep_range(wait__, wait__ * 2); if (wait__ < (
1000)) wait__ <<= 1; } ret__; })
;
2133
2134 /* just trace the final value */
2135 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2136
2137 if (out_value)
2138 *out_value = reg_value;
2139
2140 return ret;
2141}
2142
2143bool_Bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2144{
2145 bool_Bool ret;
2146
2147 spin_lock_irq(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
2148 ret = check_for_unclaimed_mmio(uncore);
2149 spin_unlock_irq(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
2150
2151 return ret;
2152}
2153
2154bool_Bool
2155intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2156{
2157 bool_Bool ret = false0;
2158
2159 spin_lock_irq(&uncore->debug->lock)mtx_enter(&uncore->debug->lock);
2160
2161 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)__builtin_expect(!!(uncore->debug->unclaimed_mmio_check
<= 0), 0)
)
2162 goto out;
2163
2164 if (unlikely(check_for_unclaimed_mmio(uncore))__builtin_expect(!!(check_for_unclaimed_mmio(uncore)), 0)) {
2165 if (!uncore->i915->params.mmio_debug) {
2166 drm_dbg(&uncore->i915->drm,drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2167 "Unclaimed register detected, "drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2168 "enabling oneshot unclaimed register reporting. "drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
2169 "Please use i915.mmio_debug=N for more information.\n")drm_dev_dbg((&uncore->i915->drm)->dev, DRM_UT_DRIVER
, "Unclaimed register detected, " "enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n")
;
2170 uncore->i915->params.mmio_debug++;
2171 }
2172 uncore->debug->unclaimed_mmio_check--;
2173 ret = true1;
2174 }
2175
2176out:
2177 spin_unlock_irq(&uncore->debug->lock)mtx_leave(&uncore->debug->lock);
2178
2179 return ret;
2180}
2181
2182/**
2183 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2184 * a register
2185 * @uncore: pointer to struct intel_uncore
2186 * @reg: register in question
2187 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2188 *
2189 * Returns a set of forcewake domains required to be taken with for example
2190 * intel_uncore_forcewake_get for the specified register to be accessible in the
2191 * specified mode (read, write or read/write) with raw mmio accessors.
2192 *
2193 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2194 * callers to do FIFO management on their own or risk losing writes.
2195 */
2196enum forcewake_domains
2197intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2198 i915_reg_t reg, unsigned int op)
2199{
2200 enum forcewake_domains fw_domains = 0;
2201
2202 drm_WARN_ON(&uncore->i915->drm, !op)({ int __ret = !!((!op)); if (__ret) printf("%s %s: " "%s", dev_driver_string
(((&uncore->i915->drm))->dev), "", "drm_WARN_ON("
"!op" ")"); __builtin_expect(!!(__ret), 0); })
;
2203
2204 if (!intel_uncore_has_forcewake(uncore))
2205 return 0;
2206
2207 if (op & FW_REG_READ(1))
2208 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2209
2210 if (op & FW_REG_WRITE(2))
2211 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2212
2213 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains)({ int __ret = !!((fw_domains & ~uncore->fw_domains));
if (__ret) printf("%s %s: " "%s", dev_driver_string(((&uncore
->i915->drm))->dev), "", "drm_WARN_ON(" "fw_domains & ~uncore->fw_domains"
")"); __builtin_expect(!!(__ret), 0); })
;
2214
2215 return fw_domains;
2216}
2217
2218#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
2219#include "selftests/mock_uncore.c"
2220#include "selftests/intel_uncore.c"
2221#endif