Bug Summary

File:dev/pci/drm/i915/gt/intel_gt.c
Warning:line 919, column 18
Value stored to 'pdev' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name intel_gt.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gt/intel_gt.c
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <drm/drm_managed.h>
7#include <drm/intel-gtt.h>
8
9#include "gem/i915_gem_internal.h"
10#include "gem/i915_gem_lmem.h"
11#include "pxp/intel_pxp.h"
12
13#include "i915_drv.h"
14#include "i915_perf_oa_regs.h"
15#include "intel_context.h"
16#include "intel_engine_pm.h"
17#include "intel_engine_regs.h"
18#include "intel_ggtt_gmch.h"
19#include "intel_gt.h"
20#include "intel_gt_buffer_pool.h"
21#include "intel_gt_clock_utils.h"
22#include "intel_gt_debugfs.h"
23#include "intel_gt_mcr.h"
24#include "intel_gt_pm.h"
25#include "intel_gt_regs.h"
26#include "intel_gt_requests.h"
27#include "intel_migrate.h"
28#include "intel_mocs.h"
29#include "intel_pci_config.h"
30#include "intel_pm.h"
31#include "intel_rc6.h"
32#include "intel_renderstate.h"
33#include "intel_rps.h"
34#include "intel_sa_media.h"
35#include "intel_gt_sysfs.h"
36#include "intel_uncore.h"
37#include "shmem_utils.h"
38
39void intel_gt_common_init_early(struct intel_gt *gt)
40{
41 mtx_init(gt->irq_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((gt->irq_lock
), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((
0x9)))); } while (0)
;
42
43 INIT_LIST_HEAD(&gt->closed_vma);
44 mtx_init(&gt->closed_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&gt->
closed_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9)
? 0x9 : ((0x9)))); } while (0)
;
45
46 init_llist_head(&gt->watchdog.list);
47 INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
48
49 intel_gt_init_buffer_pool(gt);
50 intel_gt_init_reset(gt);
51 intel_gt_init_requests(gt);
52 intel_gt_init_timelines(gt);
53 rw_init(&gt->tlb.invalidate_lock, "itlbinv")_rw_init_flags(&gt->tlb.invalidate_lock, "itlbinv", 0,
((void *)0))
;
54 seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock)seqcount_init(&(&gt->tlb.seqno)->seq);
55 intel_gt_pm_init_early(gt);
56
57 intel_uc_init_early(&gt->uc);
58 intel_rps_init_early(&gt->rps);
59}
60
61/* Preliminary initialization of Tile 0 */
62int intel_root_gt_init_early(struct drm_i915_privateinteldrm_softc *i915)
63{
64 struct intel_gt *gt = to_gt(i915);
65
66 gt->i915 = i915;
67 gt->uncore = &i915->uncore;
68 gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL(0x0001 | 0x0004));
69 if (!gt->irq_lock)
70 return -ENOMEM12;
71
72 intel_gt_common_init_early(gt);
73
74 return 0;
75}
76
77static int intel_gt_probe_lmem(struct intel_gt *gt)
78{
79 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
80 unsigned int instance = gt->info.id;
81 int id = INTEL_REGION_LMEM_0 + instance;
82 struct intel_memory_region *mem;
83 int err;
84
85 mem = intel_gt_setup_lmem(gt);
86 if (IS_ERR(mem)) {
87 err = PTR_ERR(mem);
88 if (err == -ENODEV19)
89 return 0;
90
91 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , err, INTEL_MEMORY_LOCAL
)
92 "Failed to setup region(%d) type=%d\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , err, INTEL_MEMORY_LOCAL
)
93 err, INTEL_MEMORY_LOCAL)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , err, INTEL_MEMORY_LOCAL
)
;
94 return err;
95 }
96
97 mem->id = id;
98 mem->instance = instance;
99
100 intel_memory_region_set_name(mem, "local%u", mem->instance);
101
102 GEM_BUG_ON(!HAS_REGION(i915, id))((void)0);
103 GEM_BUG_ON(i915->mm.regions[id])((void)0);
104 i915->mm.regions[id] = mem;
105
106 return 0;
107}
108
109int intel_gt_assign_ggtt(struct intel_gt *gt)
110{
111 gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL(0x0001 | 0x0004));
112
113 return gt->ggtt ? 0 : -ENOMEM12;
114}
115
116int intel_gt_init_mmio(struct intel_gt *gt)
117{
118 intel_gt_init_clock_frequency(gt);
119
120 intel_uc_init_mmio(&gt->uc);
121 intel_sseu_info_init(gt);
122 intel_gt_mcr_init(gt);
123
124 return intel_engines_init_mmio(gt);
125}
126
127static void init_unused_ring(struct intel_gt *gt, u32 base)
128{
129 struct intel_uncore *uncore = gt->uncore;
130
131 intel_uncore_write(uncore, RING_CTL(base)((const i915_reg_t){ .reg = ((base) + 0x3c) }), 0);
132 intel_uncore_write(uncore, RING_HEAD(base)((const i915_reg_t){ .reg = ((base) + 0x34) }), 0);
133 intel_uncore_write(uncore, RING_TAIL(base)((const i915_reg_t){ .reg = ((base) + 0x30) }), 0);
134 intel_uncore_write(uncore, RING_START(base)((const i915_reg_t){ .reg = ((base) + 0x38) }), 0);
135}
136
137static void init_unused_rings(struct intel_gt *gt)
138{
139 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
140
141 if (IS_I830(i915)IS_PLATFORM(i915, INTEL_I830)) {
142 init_unused_ring(gt, PRB1_BASE(0x2040 - 0x30));
143 init_unused_ring(gt, SRB0_BASE(0x2100 - 0x30));
144 init_unused_ring(gt, SRB1_BASE(0x2110 - 0x30));
145 init_unused_ring(gt, SRB2_BASE(0x2120 - 0x30));
146 init_unused_ring(gt, SRB3_BASE(0x2130 - 0x30));
147 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 2) {
148 init_unused_ring(gt, SRB0_BASE(0x2100 - 0x30));
149 init_unused_ring(gt, SRB1_BASE(0x2110 - 0x30));
150 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 3) {
151 init_unused_ring(gt, PRB1_BASE(0x2040 - 0x30));
152 init_unused_ring(gt, PRB2_BASE(0x2050 - 0x30));
153 }
154}
155
156int intel_gt_init_hw(struct intel_gt *gt)
157{
158 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
159 struct intel_uncore *uncore = gt->uncore;
160 int ret;
161
162 gt->last_init_time = ktime_get();
163
164 /* Double layer security blanket, see i915_gem_init() */
165 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
166
167 if (HAS_EDRAM(i915)((i915)->edram_size_mb) && GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 9)
168 intel_uncore_rmw(uncore, HSW_IDICR((const i915_reg_t){ .reg = (0x9008) }), 0, IDIHASHMSK(0xf)(((0xf) & 0x3f) << 16));
169
170 if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL))
171 intel_uncore_write(uncore,
172 HSW_MI_PREDICATE_RESULT_2((const i915_reg_t){ .reg = (0x2214) }),
173 IS_HSW_GT3(i915)(IS_PLATFORM(i915, INTEL_HASWELL) && (&(i915)->
__info)->gt == 3)
?
174 LOWER_SLICE_ENABLED(1 << 0) : LOWER_SLICE_DISABLED(0 << 0));
175
176 /* Apply the GT workarounds... */
177 intel_gt_apply_workarounds(gt);
178 /* ...and determine whether they are sticking. */
179 intel_gt_verify_workarounds(gt, "init");
180
181 intel_gt_init_swizzling(gt);
182
183 /*
184 * At least 830 can leave some of the unused rings
185 * "active" (ie. head != tail) after resume which
186 * will prevent c3 entry. Makes sure all unused rings
187 * are totally idle.
188 */
189 init_unused_rings(gt);
190
191 ret = i915_ppgtt_init_hw(gt);
192 if (ret) {
193 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret)__drm_err("Enabling PPGTT failed (%d)\n", ret);
194 goto out;
195 }
196
197 /* We can't enable contexts until all firmware is loaded */
198 ret = intel_uc_init_hw(&gt->uc);
199 if (ret) {
200 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret)__i915_printk(i915, 0 ? "\0017" : "\0013", "Enabling uc failed (%d)\n"
, ret)
;
201 goto out;
202 }
203
204 intel_mocs_init(gt);
205
206out:
207 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
208 return ret;
209}
210
211static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
212{
213 intel_uncore_rmw(uncore, reg, 0, set);
214}
215
216static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
217{
218 intel_uncore_rmw(uncore, reg, clr, 0);
219}
220
221static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
222{
223 intel_uncore_rmw(uncore, reg, 0, 0);
224}
225
226static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
227{
228 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0)({ u32 __val; __val = intel_uncore_read((engine)->uncore, (
(const i915_reg_t){ .reg = ((((const u32 []){ 0x4094, 0x4194,
0x4394, 0x4294 })[(engine)->class])) })); __val &= ~(
(1 << 0)); __val |= (0); intel_uncore_write((engine)->
uncore, ((const i915_reg_t){ .reg = ((((const u32 []){ 0x4094
, 0x4194, 0x4394, 0x4294 })[(engine)->class])) }), __val);
})
;
229 GEN6_RING_FAULT_REG_POSTING_READ(engine)((void)intel_uncore_read_notrace((engine)->uncore, ((const
i915_reg_t){ .reg = ((((const u32 []){ 0x4094, 0x4194, 0x4394
, 0x4294 })[(engine)->class])) })))
;
230}
231
232void
233intel_gt_clear_error_registers(struct intel_gt *gt,
234 intel_engine_mask_t engine_mask)
235{
236 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
237 struct intel_uncore *uncore = gt->uncore;
238 u32 eir;
239
240 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) != 2)
241 clear_register(uncore, PGTBL_ER((const i915_reg_t){ .reg = (0x02024) }));
242
243 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 4)
244 clear_register(uncore, IPEIR(RENDER_RING_BASE)((const i915_reg_t){ .reg = ((0x02000) + 0x88) }));
245 else
246 clear_register(uncore, IPEIR_I965((const i915_reg_t){ .reg = (0x2064) }));
247
248 clear_register(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
249 eir = intel_uncore_read(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
250 if (eir) {
251 /*
252 * some errors might have become stuck,
253 * mask them.
254 */
255 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir)___drm_dbg(((void *)0), DRM_UT_DRIVER, "EIR stuck: 0x%08x, masking\n"
, eir)
;
256 rmw_set(uncore, EMR((const i915_reg_t){ .reg = (0x20b4) }), eir);
257 intel_uncore_write(uncore, GEN2_IIR((const i915_reg_t){ .reg = (0x20a4) }),
258 I915_MASTER_ERROR_INTERRUPT(1 << 15));
259 }
260
261 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) {
262 rmw_clear(uncore, GEN12_RING_FAULT_REG((const i915_reg_t){ .reg = (0xcec4) }), RING_FAULT_VALID(1 << 0));
263 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG)((void)intel_uncore_read_notrace(uncore, ((const i915_reg_t){
.reg = (0xcec4) })))
;
264 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
265 rmw_clear(uncore, GEN8_RING_FAULT_REG((const i915_reg_t){ .reg = (0x4094) }), RING_FAULT_VALID(1 << 0));
266 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG)((void)intel_uncore_read_notrace(uncore, ((const i915_reg_t){
.reg = (0x4094) })))
;
267 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
268 struct intel_engine_cs *engine;
269 enum intel_engine_id id;
270
271 for_each_engine_masked(engine, gt, engine_mask, id)for ((id) = (engine_mask) & (gt)->info.engine_mask; (id
) ? ((engine) = (gt)->engine[({ int __idx = ffs(id) - 1; id
&= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
272 gen6_clear_engine_error_register(engine);
273 }
274}
275
276static void gen6_check_faults(struct intel_gt *gt)
277{
278 struct intel_engine_cs *engine;
279 enum intel_engine_id id;
280 u32 fault;
281
282 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
283 fault = GEN6_RING_FAULT_REG_READ(engine)intel_uncore_read((engine)->uncore, ((const i915_reg_t){ .
reg = ((((const u32 []){ 0x4094, 0x4194, 0x4394, 0x4294 })[(engine
)->class])) }))
;
284 if (fault & RING_FAULT_VALID(1 << 0)) {
285 drm_dbg(&engine->i915->drm, "Unexpected fault\n"__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
286 "\tAddr: 0x%08lx\n"__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
287 "\tAddress space: %s\n"__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
288 "\tSource ID: %d\n"__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
289 "\tType: %d\n",__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
290 (unsigned long)(fault & LINUX_PAGE_MASK),__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
291 fault & RING_FAULT_GTTSEL_MASK ?__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
292 "GGTT" : "PPGTT",__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
293 RING_FAULT_SRCID(fault),__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
294 RING_FAULT_FAULT_TYPE(fault))__drm_dev_dbg(((void *)0), (&engine->i915->drm) ? (
&engine->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08lx\n" "\tAddress space: %s\n"
"\tSource ID: %d\n" "\tType: %d\n", (unsigned long)(fault &
(~((1 << 12) - 1))), fault & (1 << 11) ? "GGTT"
: "PPGTT", (((fault) >> 3) & 0xff), (((fault) >>
1) & 0x3))
;
295 }
296 }
297}
298
299static void gen8_check_faults(struct intel_gt *gt)
300{
301 struct intel_uncore *uncore = gt->uncore;
302 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
303 u32 fault;
304
305 if (GRAPHICS_VER(gt->i915)((&(gt->i915)->__runtime)->graphics.ip.ver) >= 12) {
306 fault_reg = GEN12_RING_FAULT_REG((const i915_reg_t){ .reg = (0xcec4) });
307 fault_data0_reg = GEN12_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0xceb8) });
308 fault_data1_reg = GEN12_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0xcebc) });
309 } else {
310 fault_reg = GEN8_RING_FAULT_REG((const i915_reg_t){ .reg = (0x4094) });
311 fault_data0_reg = GEN8_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0x4b10) });
312 fault_data1_reg = GEN8_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0x4b14) });
313 }
314
315 fault = intel_uncore_read(uncore, fault_reg);
316 if (fault & RING_FAULT_VALID(1 << 0)) {
317 u32 fault_data0, fault_data1;
318 u64 fault_addr;
319
320 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
321 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
322
323 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS(0xf << 0)) << 44) |
324 ((u64)fault_data0 << 12);
325
326 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
327 "\tAddr: 0x%08x_%08x\n"__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
328 "\tAddress space: %s\n"__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
329 "\tEngine ID: %d\n"__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
330 "\tSource ID: %d\n"__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
331 "\tType: %d\n",__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
332 upper_32_bits(fault_addr), lower_32_bits(fault_addr),__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
333 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
334 GEN8_RING_FAULT_ENGINE_ID(fault),__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
335 RING_FAULT_SRCID(fault),__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
336 RING_FAULT_FAULT_TYPE(fault))__drm_dev_dbg(((void *)0), (&uncore->i915->drm) ? (
&uncore->i915->drm)->dev : ((void *)0), DRM_UT_DRIVER
, "Unexpected fault\n" "\tAddr: 0x%08x_%08x\n" "\tAddress space: %s\n"
"\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", ((u32
)(((fault_addr) >> 16) >> 16)), ((u32)(fault_addr
)), fault_data1 & (1 << 4) ? "GGTT" : "PPGTT", (((fault
) >> 12) & 0x7), (((fault) >> 3) & 0xff),
(((fault) >> 1) & 0x3))
;
337 }
338}
339
340void intel_gt_check_and_clear_faults(struct intel_gt *gt)
341{
342 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
343
344 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
345 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8)
346 gen8_check_faults(gt);
347 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6)
348 gen6_check_faults(gt);
349 else
350 return;
351
352 intel_gt_clear_error_registers(gt, ALL_ENGINES((intel_engine_mask_t)~0ul));
353}
354
355void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
356{
357 struct intel_uncore *uncore = gt->uncore;
358 intel_wakeref_t wakeref;
359
360 /*
361 * No actual flushing is required for the GTT write domain for reads
362 * from the GTT domain. Writes to it "immediately" go to main memory
363 * as far as we know, so there's no chipset flush. It also doesn't
364 * land in the GPU render cache.
365 *
366 * However, we do have to enforce the order so that all writes through
367 * the GTT land before any writes to the device, such as updates to
368 * the GATT itself.
369 *
370 * We also have to wait a bit for the writes to land from the GTT.
371 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
372 * timing. This issue has only been observed when switching quickly
373 * between GTT writes and CPU reads from inside the kernel on recent hw,
374 * and it appears to only affect discrete GTT blocks (i.e. on LLC
375 * system agents we cannot reproduce this behaviour, until Cannonlake
376 * that was!).
377 */
378
379 wmb()do { __asm volatile("sfence" ::: "memory"); } while (0);
380
381 if (INTEL_INFO(gt->i915)(&(gt->i915)->__info)->has_coherent_ggtt)
382 return;
383
384 intel_gt_chipset_flush(gt);
385
386 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)for ((wakeref) = intel_runtime_pm_get_if_in_use(uncore->rpm
); (wakeref); intel_runtime_pm_put((uncore->rpm), (wakeref
)), (wakeref) = 0)
{
387 unsigned long flags;
388
389 spin_lock_irqsave(&uncore->lock, flags)do { flags = 0; mtx_enter(&uncore->lock); } while (0);
390 intel_uncore_posting_read_fw(uncore,((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
((0x02000) + 0x34) })))
391 RING_HEAD(RENDER_RING_BASE))((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
((0x02000) + 0x34) })))
;
392 spin_unlock_irqrestore(&uncore->lock, flags)do { (void)(flags); mtx_leave(&uncore->lock); } while (
0)
;
393 }
394}
395
396void intel_gt_chipset_flush(struct intel_gt *gt)
397{
398 wmb()do { __asm volatile("sfence" ::: "memory"); } while (0);
399 if (GRAPHICS_VER(gt->i915)((&(gt->i915)->__runtime)->graphics.ip.ver) < 6)
400 intel_ggtt_gmch_flush();
401}
402
403void intel_gt_driver_register(struct intel_gt *gt)
404{
405 intel_gsc_init(&gt->gsc, gt->i915);
406
407 intel_rps_driver_register(&gt->rps);
408
409 intel_gt_debugfs_register(gt);
410 intel_gt_sysfs_register(gt);
411}
412
413static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
414{
415 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
416 struct drm_i915_gem_object *obj;
417 struct i915_vma *vma;
418 int ret;
419
420 obj = i915_gem_object_create_lmem(i915, size,
421 I915_BO_ALLOC_VOLATILE(1UL << (1)) |
422 I915_BO_ALLOC_GPU_ONLY(1UL << (6)));
423 if (IS_ERR(obj))
424 obj = i915_gem_object_create_stolen(i915, size);
425 if (IS_ERR(obj))
426 obj = i915_gem_object_create_internal(i915, size);
427 if (IS_ERR(obj)) {
428 drm_err(&i915->drm, "Failed to allocate scratch page\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to allocate scratch page\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
429 return PTR_ERR(obj);
430 }
431
432 vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL((void *)0));
433 if (IS_ERR(vma)) {
434 ret = PTR_ERR(vma);
435 goto err_unref;
436 }
437
438 ret = i915_ggtt_pin(vma, NULL((void *)0), 0, PIN_HIGH(1ULL << (5)));
439 if (ret)
440 goto err_unref;
441
442 gt->scratch = i915_vma_make_unshrinkable(vma);
443
444 return 0;
445
446err_unref:
447 i915_gem_object_put(obj);
448 return ret;
449}
450
451static void intel_gt_fini_scratch(struct intel_gt *gt)
452{
453 i915_vma_unpin_and_release(&gt->scratch, 0);
454}
455
456static struct i915_address_space *kernel_vm(struct intel_gt *gt)
457{
458 if (INTEL_PPGTT(gt->i915)((&(gt->i915)->__runtime)->ppgtt_type) > INTEL_PPGTT_ALIASING)
459 return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY(1UL << (5)))->vm;
460 else
461 return i915_vm_get(&gt->ggtt->vm);
462}
463
464static int __engines_record_defaults(struct intel_gt *gt)
465{
466 struct i915_request *requests[I915_NUM_ENGINES] = {};
467 struct intel_engine_cs *engine;
468 enum intel_engine_id id;
469 int err = 0;
470
471 /*
472 * As we reset the gpu during very early sanitisation, the current
473 * register state on the GPU should reflect its defaults values.
474 * We load a context onto the hw (with restore-inhibit), then switch
475 * over to a second context to save that default register state. We
476 * can then prime every new context with that state so they all start
477 * from the same default HW values.
478 */
479
480 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
481 struct intel_renderstate so;
482 struct intel_context *ce;
483 struct i915_request *rq;
484
485 /* We must be able to switch to something! */
486 GEM_BUG_ON(!engine->kernel_context)((void)0);
487
488 ce = intel_context_create(engine);
489 if (IS_ERR(ce)) {
490 err = PTR_ERR(ce);
491 goto out;
492 }
493
494 err = intel_renderstate_init(&so, ce);
495 if (err)
496 goto err;
497
498 rq = i915_request_create(ce);
499 if (IS_ERR(rq)) {
500 err = PTR_ERR(rq);
501 goto err_fini;
502 }
503
504 err = intel_engine_emit_ctx_wa(rq);
505 if (err)
506 goto err_rq;
507
508 err = intel_renderstate_emit(&so, rq);
509 if (err)
510 goto err_rq;
511
512err_rq:
513 requests[id] = i915_request_get(rq);
514 i915_request_add(rq);
515err_fini:
516 intel_renderstate_fini(&so, ce);
517err:
518 if (err) {
519 intel_context_put(ce);
520 goto out;
521 }
522 }
523
524 /* Flush the default context image to memory, and enable powersaving. */
525 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT(hz / 5)) == -ETIME60) {
526 err = -EIO5;
527 goto out;
528 }
529
530 for (id = 0; id < ARRAY_SIZE(requests)(sizeof((requests)) / sizeof((requests)[0])); id++) {
531 struct i915_request *rq;
532 struct uvm_object *state;
533
534 rq = requests[id];
535 if (!rq)
536 continue;
537
538 if (rq->fence.error) {
539 err = -EIO5;
540 goto out;
541 }
542
543 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags))((void)0);
544 if (!rq->context->state)
545 continue;
546
547 /* Keep a copy of the state's backing pages; free the obj */
548#ifdef __linux__
549 state = shmem_create_from_object(rq->context->state->obj);
550#else
551 state = uao_create_from_object(rq->context->state->obj);
552#endif
553 if (IS_ERR(state)) {
554 err = PTR_ERR(state);
555 goto out;
556 }
557 rq->engine->default_state = state;
558 }
559
560out:
561 /*
562 * If we have to abandon now, we expect the engines to be idle
563 * and ready to be torn-down. The quickest way we can accomplish
564 * this is by declaring ourselves wedged.
565 */
566 if (err)
567 intel_gt_set_wedged(gt);
568
569 for (id = 0; id < ARRAY_SIZE(requests)(sizeof((requests)) / sizeof((requests)[0])); id++) {
570 struct intel_context *ce;
571 struct i915_request *rq;
572
573 rq = requests[id];
574 if (!rq)
575 continue;
576
577 ce = rq->context;
578 i915_request_put(rq);
579 intel_context_put(ce);
580 }
581 return err;
582}
583
584static int __engines_verify_workarounds(struct intel_gt *gt)
585{
586 struct intel_engine_cs *engine;
587 enum intel_engine_id id;
588 int err = 0;
589
590 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0)
591 return 0;
592
593 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
594 if (intel_engine_verify_workarounds(engine, "load"))
595 err = -EIO5;
596 }
597
598 /* Flush and restore the kernel context for safety */
599 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT(hz / 5)) == -ETIME60)
600 err = -EIO5;
601
602 return err;
603}
604
605static void __intel_gt_disable(struct intel_gt *gt)
606{
607 intel_gt_set_wedged_on_fini(gt);
608
609 intel_gt_suspend_prepare(gt);
610 intel_gt_suspend_late(gt);
611
612 GEM_BUG_ON(intel_gt_pm_is_awake(gt))((void)0);
613}
614
615int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
616{
617 long remaining_timeout;
618
619 /* If the device is asleep, we have no requests outstanding */
620 if (!intel_gt_pm_is_awake(gt))
621 return 0;
622
623 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
624 &remaining_timeout)) > 0) {
625 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
626 if (signal_pending(current)(((({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_curproc)->p_siglist | (({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc)->p_p
->ps_siglist) & ~(({struct cpu_info *__ci; asm volatile
("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct
cpu_info, ci_self))); __ci;})->ci_curproc)->p_sigmask)
)
627 return -EINTR4;
628 }
629
630 if (timeout)
631 return timeout;
632
633 if (remaining_timeout < 0)
634 remaining_timeout = 0;
635
636 return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
637}
638
639int intel_gt_init(struct intel_gt *gt)
640{
641 int err;
642
643 err = i915_inject_probe_error(gt->i915, -ENODEV)({ ((void)0); 0; });
644 if (err)
645 return err;
646
647 intel_gt_init_workarounds(gt);
648
649 /*
650 * This is just a security blanket to placate dragons.
651 * On some systems, we very sporadically observe that the first TLBs
652 * used by the CS may be stale, despite us poking the TLB reset. If
653 * we hold the forcewake during initialisation these problems
654 * just magically go away.
655 */
656 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
657
658 err = intel_gt_init_scratch(gt,
659 GRAPHICS_VER(gt->i915)((&(gt->i915)->__runtime)->graphics.ip.ver) == 2 ? SZ_256K(256 << 10) : SZ_4K(4 << 10));
660 if (err)
661 goto out_fw;
662
663 intel_gt_pm_init(gt);
664
665 gt->vm = kernel_vm(gt);
666 if (!gt->vm) {
667 err = -ENOMEM12;
668 goto err_pm;
669 }
670
671 intel_set_mocs_index(gt);
672
673 err = intel_engines_init(gt);
674 if (err)
675 goto err_engines;
676
677 err = intel_uc_init(&gt->uc);
678 if (err)
679 goto err_engines;
680
681 err = intel_gt_resume(gt);
682 if (err)
683 goto err_uc_init;
684
685 err = intel_gt_init_hwconfig(gt);
686 if (err)
687 drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to retrieve hwconfig table: %pe\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ERR_PTR
(err))
688 ERR_PTR(err))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to retrieve hwconfig table: %pe\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ERR_PTR
(err))
;
689
690 err = __engines_record_defaults(gt);
691 if (err)
692 goto err_gt;
693
694 err = __engines_verify_workarounds(gt);
695 if (err)
696 goto err_gt;
697
698 err = i915_inject_probe_error(gt->i915, -EIO)({ ((void)0); 0; });
699 if (err)
700 goto err_gt;
701
702 intel_uc_init_late(&gt->uc);
703
704 intel_migrate_init(&gt->migrate, gt);
705
706 intel_pxp_init(&gt->pxp);
707
708 goto out_fw;
709err_gt:
710 __intel_gt_disable(gt);
711 intel_uc_fini_hw(&gt->uc);
712err_uc_init:
713 intel_uc_fini(&gt->uc);
714err_engines:
715 intel_engines_release(gt);
716 i915_vm_put(fetch_and_zero(&gt->vm)({ typeof(*&gt->vm) __T = *(&gt->vm); *(&gt
->vm) = (typeof(*&gt->vm))0; __T; })
);
717err_pm:
718 intel_gt_pm_fini(gt);
719 intel_gt_fini_scratch(gt);
720out_fw:
721 if (err)
722 intel_gt_set_wedged_on_init(gt);
723 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
724 return err;
725}
726
727void intel_gt_driver_remove(struct intel_gt *gt)
728{
729 __intel_gt_disable(gt);
730
731 intel_migrate_fini(&gt->migrate);
732 intel_uc_driver_remove(&gt->uc);
733
734 intel_engines_release(gt);
735
736 intel_gt_flush_buffer_pool(gt);
737}
738
739void intel_gt_driver_unregister(struct intel_gt *gt)
740{
741 intel_wakeref_t wakeref;
742
743 intel_gt_sysfs_unregister(gt);
744 intel_rps_driver_unregister(&gt->rps);
745 intel_gsc_fini(&gt->gsc);
746
747 intel_pxp_fini(&gt->pxp);
748
749 /*
750 * Upon unregistering the device to prevent any new users, cancel
751 * all in-flight requests so that we can quickly unbind the active
752 * resources.
753 */
754 intel_gt_set_wedged_on_fini(gt);
755
756 /* Scrub all HW state upon release */
757 with_intel_runtime_pm(gt->uncore->rpm, wakeref)for ((wakeref) = intel_runtime_pm_get(gt->uncore->rpm);
(wakeref); intel_runtime_pm_put((gt->uncore->rpm), (wakeref
)), (wakeref) = 0)
758 __intel_gt_reset(gt, ALL_ENGINES((intel_engine_mask_t)~0ul));
759}
760
761void intel_gt_driver_release(struct intel_gt *gt)
762{
763 struct i915_address_space *vm;
764
765 vm = fetch_and_zero(&gt->vm)({ typeof(*&gt->vm) __T = *(&gt->vm); *(&gt
->vm) = (typeof(*&gt->vm))0; __T; })
;
766 if (vm) /* FIXME being called twice on error paths :( */
767 i915_vm_put(vm);
768
769 intel_wa_list_free(&gt->wa_list);
770 intel_gt_pm_fini(gt);
771 intel_gt_fini_scratch(gt);
772 intel_gt_fini_buffer_pool(gt);
773 intel_gt_fini_hwconfig(gt);
774}
775
776void intel_gt_driver_late_release_all(struct drm_i915_privateinteldrm_softc *i915)
777{
778 struct intel_gt *gt;
779 unsigned int id;
780
781 /* We need to wait for inflight RCU frees to release their grip */
782 rcu_barrier()__asm volatile("" : : : "memory");
783
784 for_each_gt(gt, i915, id)for ((id) = 0; (id) < 4; (id)++) if (!(((gt) = (i915)->
gt[(id)]))) {} else
{
785 intel_uc_driver_late_release(&gt->uc);
786 intel_gt_fini_requests(gt);
787 intel_gt_fini_reset(gt);
788 intel_gt_fini_timelines(gt);
789 mutex_destroy(&gt->tlb.invalidate_lock);
790 intel_engines_free(gt);
791 }
792}
793
794static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
795{
796 int ret;
797
798 if (!gt_is_root(gt)) {
799 struct intel_uncore *uncore;
800 spinlock_t *irq_lock;
801
802 uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL(0x0001 | 0x0004));
803 if (!uncore)
804 return -ENOMEM12;
805
806 irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL(0x0001 | 0x0004));
807 if (!irq_lock)
808 return -ENOMEM12;
809
810 gt->uncore = uncore;
811 gt->irq_lock = irq_lock;
812
813 intel_gt_common_init_early(gt);
814 }
815
816 intel_uncore_init_early(gt->uncore, gt);
817
818 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
819 if (ret)
820 return ret;
821
822 gt->phys_addr = phys_addr;
823
824 return 0;
825}
826
827#ifdef __linux__
828
829int intel_gt_probe_all(struct drm_i915_privateinteldrm_softc *i915)
830{
831 struct pci_dev *pdev = i915->drm.pdev;
832 struct intel_gt *gt = &i915->gt0;
833 const struct intel_gt_definition *gtdef;
834 phys_addr_t phys_addr;
835 unsigned int mmio_bar;
836 unsigned int i;
837 int ret;
838
839 mmio_bar = GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 2 ? GEN2_GTTMMADR_BAR1 : GTTMMADR_BAR0;
840 phys_addr = pci_resource_start(pdev, mmio_bar);
841
842 /*
843 * We always have at least one primary GT on any device
844 * and it has been already initialized early during probe
845 * in i915_driver_probe()
846 */
847 gt->i915 = i915;
848 gt->name = "Primary GT";
849 gt->info.engine_mask = RUNTIME_INFO(i915)(&(i915)->__runtime)->platform_engine_mask;
850
851 drm_dbg(&i915->drm, "Setting up %s\n", gt->name)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Setting up %s\n",
gt->name)
;
852 ret = intel_gt_tile_setup(gt, phys_addr);
853 if (ret)
854 return ret;
855
856 i915->gt[0] = gt;
857
858 if (!HAS_EXTRA_GT_LIST(i915)((&(i915)->__info)->extra_gt_list))
859 return 0;
860
861 for (i = 1, gtdef = &INTEL_INFO(i915)(&(i915)->__info)->extra_gt_list[i - 1];
862 gtdef->name != NULL((void *)0);
863 i++, gtdef = &INTEL_INFO(i915)(&(i915)->__info)->extra_gt_list[i - 1]) {
864 gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL(0x0001 | 0x0004));
865 if (!gt) {
866 ret = -ENOMEM12;
867 goto err;
868 }
869
870 gt->i915 = i915;
871 gt->name = gtdef->name;
872 gt->type = gtdef->type;
873 gt->info.engine_mask = gtdef->engine_mask;
874 gt->info.id = i;
875
876 drm_dbg(&i915->drm, "Setting up %s\n", gt->name)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Setting up %s\n",
gt->name)
;
877 if (GEM_WARN_ON(range_overflows_t(resource_size_t,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(pci_resource_len
(pdev, mmio_bar))) max__ = ((resource_size_t)(pci_resource_len
(pdev, mmio_bar))); (void)(&start__ == &size__); (void
)(&start__ == &max__); start__ >= max__ || size__ >
max__ - start__; }))), 0); })
878 gtdef->mapping_base,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(pci_resource_len
(pdev, mmio_bar))) max__ = ((resource_size_t)(pci_resource_len
(pdev, mmio_bar))); (void)(&start__ == &size__); (void
)(&start__ == &max__); start__ >= max__ || size__ >
max__ - start__; }))), 0); })
879 SZ_16M,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(pci_resource_len
(pdev, mmio_bar))) max__ = ((resource_size_t)(pci_resource_len
(pdev, mmio_bar))); (void)(&start__ == &size__); (void
)(&start__ == &max__); start__ >= max__ || size__ >
max__ - start__; }))), 0); })
880 pci_resource_len(pdev, mmio_bar)))({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(pci_resource_len
(pdev, mmio_bar))) max__ = ((resource_size_t)(pci_resource_len
(pdev, mmio_bar))); (void)(&start__ == &size__); (void
)(&start__ == &max__); start__ >= max__ || size__ >
max__ - start__; }))), 0); })
) {
881 ret = -ENODEV19;
882 goto err;
883 }
884
885 switch (gtdef->type) {
886 case GT_TILE:
887 ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
888 break;
889
890 case GT_MEDIA:
891 ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
892 gtdef->gsi_offset);
893 break;
894
895 case GT_PRIMARY:
896 /* Primary GT should not appear in extra GT list */
897 default:
898 MISSING_CASE(gtdef->type)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "gtdef->type", (long)(gtdef->type)); __builtin_expect
(!!(__ret), 0); })
;
899 ret = -ENODEV19;
900 }
901
902 if (ret)
903 goto err;
904
905 i915->gt[i] = gt;
906 }
907
908 return 0;
909
910err:
911 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret)__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to initialize %s! (%d)\n"
, gtdef->name, ret)
;
912 return ret;
913}
914
915#else
916
917int intel_gt_probe_all(struct drm_i915_privateinteldrm_softc *i915)
918{
919 struct pci_dev *pdev = i915->drm.pdev;
Value stored to 'pdev' during its initialization is never read
920 struct intel_gt *gt = &i915->gt0;
921 const struct intel_gt_definition *gtdef;
922 phys_addr_t phys_addr;
923 bus_size_t len;
924 pcireg_t type;
925 int flags;
926 unsigned int mmio_bar;
927 unsigned int i;
928 int ret;
929
930 mmio_bar = GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 2 ? GEN2_GTTMMADR_BAR1 : GTTMMADR_BAR0;
931 type = pci_mapreg_type(i915->pc, i915->tag, 0x10 + (mmio_bar * 4));
932 ret = -pci_mapreg_info(i915->pc, i915->tag, 0x10 + (mmio_bar * 4), type,
933 &phys_addr, &len, NULL((void *)0));
934 if (ret)
935 return ret;
936
937 /*
938 * We always have at least one primary GT on any device
939 * and it has been already initialized early during probe
940 * in i915_driver_probe()
941 */
942 gt->i915 = i915;
943 gt->name = "Primary GT";
944 gt->info.engine_mask = RUNTIME_INFO(i915)(&(i915)->__runtime)->platform_engine_mask;
945
946 drm_dbg(&i915->drm, "Setting up %s\n", gt->name)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Setting up %s\n",
gt->name)
;
947 ret = intel_gt_tile_setup(gt, phys_addr);
948 if (ret)
949 return ret;
950
951 i915->gt[0] = gt;
952
953 if (!HAS_EXTRA_GT_LIST(i915)((&(i915)->__info)->extra_gt_list))
954 return 0;
955
956 for (i = 1, gtdef = &INTEL_INFO(i915)(&(i915)->__info)->extra_gt_list[i - 1];
957 gtdef->name != NULL((void *)0);
958 i++, gtdef = &INTEL_INFO(i915)(&(i915)->__info)->extra_gt_list[i - 1]) {
959 gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL(0x0001 | 0x0004));
960 if (!gt) {
961 ret = -ENOMEM12;
962 goto err;
963 }
964
965 gt->i915 = i915;
966 gt->name = gtdef->name;
967 gt->type = gtdef->type;
968 gt->info.engine_mask = gtdef->engine_mask;
969 gt->info.id = i;
970
971 drm_dbg(&i915->drm, "Setting up %s\n", gt->name)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Setting up %s\n",
gt->name)
;
972 if (GEM_WARN_ON(range_overflows_t(resource_size_t,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(len)) max__ = (
(resource_size_t)(len)); (void)(&start__ == &size__);
(void)(&start__ == &max__); start__ >= max__ || size__
> max__ - start__; }))), 0); })
973 gtdef->mapping_base,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(len)) max__ = (
(resource_size_t)(len)); (void)(&start__ == &size__);
(void)(&start__ == &max__); start__ >= max__ || size__
> max__ - start__; }))), 0); })
974 SZ_16M,({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(len)) max__ = (
(resource_size_t)(len)); (void)(&start__ == &size__);
(void)(&start__ == &max__); start__ >= max__ || size__
> max__ - start__; }))), 0); })
975 len))({ __builtin_expect(!!(!!(({ typeof((resource_size_t)(gtdef->
mapping_base)) start__ = ((resource_size_t)(gtdef->mapping_base
)); typeof((resource_size_t)((16 << 20))) size__ = ((resource_size_t
)((16 << 20))); typeof((resource_size_t)(len)) max__ = (
(resource_size_t)(len)); (void)(&start__ == &size__);
(void)(&start__ == &max__); start__ >= max__ || size__
> max__ - start__; }))), 0); })
) {
976 ret = -ENODEV19;
977 goto err;
978 }
979
980 switch (gtdef->type) {
981 case GT_TILE:
982 ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
983 break;
984
985 case GT_MEDIA:
986 ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
987 gtdef->gsi_offset);
988 break;
989
990 case GT_PRIMARY:
991 /* Primary GT should not appear in extra GT list */
992 default:
993 MISSING_CASE(gtdef->type)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "gtdef->type", (long)(gtdef->type)); __builtin_expect
(!!(__ret), 0); })
;
994 ret = -ENODEV19;
995 }
996
997 if (ret)
998 goto err;
999
1000 i915->gt[i] = gt;
1001 }
1002
1003 return 0;
1004
1005err:
1006 i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret)__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to initialize %s! (%d)\n"
, gtdef->name, ret)
;
1007 return ret;
1008}
1009
1010#endif
1011
1012int intel_gt_tiles_init(struct drm_i915_privateinteldrm_softc *i915)
1013{
1014 struct intel_gt *gt;
1015 unsigned int id;
1016 int ret;
1017
1018 for_each_gt(gt, i915, id)for ((id) = 0; (id) < 4; (id)++) if (!(((gt) = (i915)->
gt[(id)]))) {} else
{
1019 ret = intel_gt_probe_lmem(gt);
1020 if (ret)
1021 return ret;
1022 }
1023
1024 return 0;
1025}
1026
1027void intel_gt_info_print(const struct intel_gt_info *info,
1028 struct drm_printer *p)
1029{
1030 drm_printf(p, "available engines: %x\n", info->engine_mask);
1031
1032 intel_sseu_dump(&info->sseu, p);
1033}
1034
1035struct reg_and_bit {
1036 i915_reg_t reg;
1037 u32 bit;
1038};
1039
1040static struct reg_and_bit
1041get_reg_and_bit(const struct intel_engine_cs *engine, const bool_Bool gen8,
1042 const i915_reg_t *regs, const unsigned int num)
1043{
1044 const unsigned int class = engine->class;
1045 struct reg_and_bit rb = { };
1046
1047 if (drm_WARN_ON_ONCE(&engine->i915->drm,({ static int __warned; int __ret = !!((class >= num || !regs
[class].reg)); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&engine->i915->drm))->
dev), "", "drm_WARN_ON_ONCE(" "class >= num || !regs[class].reg"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
1048 class >= num || !regs[class].reg)({ static int __warned; int __ret = !!((class >= num || !regs
[class].reg)); if (__ret && !__warned) { printf("%s %s: "
"%s", dev_driver_string(((&engine->i915->drm))->
dev), "", "drm_WARN_ON_ONCE(" "class >= num || !regs[class].reg"
")"); __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
1049 return rb;
1050
1051 rb.reg = regs[class];
1052 if (gen8 && class == VIDEO_DECODE_CLASS1)
1053 rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
1054 else
1055 rb.bit = engine->instance;
1056
1057 rb.bit = BIT(rb.bit)(1UL << (rb.bit));
1058
1059 return rb;
1060}
1061
1062static void mmio_invalidate_full(struct intel_gt *gt)
1063{
1064 static const i915_reg_t gen8_regs[] = {
1065 [RENDER_CLASS0] = GEN8_RTCR((const i915_reg_t){ .reg = (0x4260) }),
1066 [VIDEO_DECODE_CLASS1] = GEN8_M1TCR((const i915_reg_t){ .reg = (0x4264) }), /* , GEN8_M2TCR */
1067 [VIDEO_ENHANCEMENT_CLASS2] = GEN8_VTCR((const i915_reg_t){ .reg = (0x4270) }),
1068 [COPY_ENGINE_CLASS3] = GEN8_BTCR((const i915_reg_t){ .reg = (0x426c) }),
1069 };
1070 static const i915_reg_t gen12_regs[] = {
1071 [RENDER_CLASS0] = GEN12_GFX_TLB_INV_CR((const i915_reg_t){ .reg = (0xced8) }),
1072 [VIDEO_DECODE_CLASS1] = GEN12_VD_TLB_INV_CR((const i915_reg_t){ .reg = (0xcedc) }),
1073 [VIDEO_ENHANCEMENT_CLASS2] = GEN12_VE_TLB_INV_CR((const i915_reg_t){ .reg = (0xcee0) }),
1074 [COPY_ENGINE_CLASS3] = GEN12_BLT_TLB_INV_CR((const i915_reg_t){ .reg = (0xcee4) }),
1075 [COMPUTE_CLASS5] = GEN12_COMPCTX_TLB_INV_CR((const i915_reg_t){ .reg = (0xcf04) }),
1076 };
1077 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
1078 struct intel_uncore *uncore = gt->uncore;
1079 struct intel_engine_cs *engine;
1080 intel_engine_mask_t awake, tmp;
1081 enum intel_engine_id id;
1082 const i915_reg_t *regs;
1083 unsigned int num = 0;
1084
1085 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12) {
1086 regs = gen12_regs;
1087 num = ARRAY_SIZE(gen12_regs)(sizeof((gen12_regs)) / sizeof((gen12_regs)[0]));
1088 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8 && GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) <= 11) {
1089 regs = gen8_regs;
1090 num = ARRAY_SIZE(gen8_regs)(sizeof((gen8_regs)) / sizeof((gen8_regs)[0]));
1091 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 8) {
1092 return;
1093 }
1094
1095 if (drm_WARN_ONCE(&i915->drm, !num,({ static int __warned; int __ret = !!(!num); if (__ret &&
!__warned) { printf("%s %s: " "Platform does not implement TLB invalidation!"
, dev_driver_string((&i915->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
1096 "Platform does not implement TLB invalidation!")({ static int __warned; int __ret = !!(!num); if (__ret &&
!__warned) { printf("%s %s: " "Platform does not implement TLB invalidation!"
, dev_driver_string((&i915->drm)->dev), ""); __warned
= 1; } __builtin_expect(!!(__ret), 0); })
)
1097 return;
1098
1099 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1100
1101 spin_lock_irq(&uncore->lock)mtx_enter(&uncore->lock); /* serialise invalidate with GT reset */
1102
1103 awake = 0;
1104 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
1105 struct reg_and_bit rb;
1106
1107 if (!intel_engine_pm_is_awake(engine))
1108 continue;
1109
1110 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1111 if (!i915_mmio_reg_offset(rb.reg))
1112 continue;
1113
1114 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12 && (engine->class == VIDEO_DECODE_CLASS1 ||
1115 engine->class == VIDEO_ENHANCEMENT_CLASS2 ||
1116 engine->class == COMPUTE_CLASS5))
1117 rb.bit = _MASKED_BIT_ENABLE(rb.bit)({ typeof(rb.bit) _a = (rb.bit); ({ if (__builtin_constant_p(
_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })
;
1118
1119 intel_uncore_write_fw(uncore, rb.reg, rb.bit)__raw_uncore_write32(uncore, rb.reg, rb.bit);
1120 awake |= engine->mask;
1121 }
1122
1123 GT_TRACE(gt, "invalidated engines %08x\n", awake)do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
1124
1125 /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
1126 if (awake &&
1127 (IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE) ||
1128 IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1) ||
1129 IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE) ||
1130 IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) ||
1131 IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P)))
1132 intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0xceec
) }), 1)
;
1133
1134 spin_unlock_irq(&uncore->lock)mtx_leave(&uncore->lock);
1135
1136 for_each_engine_masked(engine, gt, awake, tmp)for ((tmp) = (awake) & (gt)->info.engine_mask; (tmp) ?
((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1; tmp
&= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
{
1137 struct reg_and_bit rb;
1138
1139 /*
1140 * HW architecture suggest typical invalidation time at 40us,
1141 * with pessimistic cases up to 100us and a recommendation to
1142 * cap at 1ms. We go a bit higher just in case.
1143 */
1144 const unsigned int timeout_us = 100;
1145 const unsigned int timeout_ms = 4;
1146
1147 rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1148 if (__intel_wait_for_register_fw(uncore,
1149 rb.reg, rb.bit, 0,
1150 timeout_us, timeout_ms,
1151 NULL((void *)0)))
1152 drm_err_ratelimited(&gt->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s TLB invalidation did not complete in %ums!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, timeout_ms)
1153 "%s TLB invalidation did not complete in %ums!\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s TLB invalidation did not complete in %ums!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, timeout_ms)
1154 engine->name, timeout_ms)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s TLB invalidation did not complete in %ums!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, timeout_ms)
;
1155 }
1156
1157 /*
1158 * Use delayed put since a) we mostly expect a flurry of TLB
1159 * invalidations so it is good to avoid paying the forcewake cost and
1160 * b) it works around a bug in Icelake which cannot cope with too rapid
1161 * transitions.
1162 */
1163 intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1164}
1165
1166static bool_Bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1167{
1168 u32 cur = intel_gt_tlb_seqno(gt);
1169
1170 /* Only skip if a *full* TLB invalidate barrier has passed */
1171 return (s32)(cur - roundup2(seqno, 2)(((seqno) + ((2) - 1)) & (~((__typeof(seqno))(2) - 1)))) > 0;
1172}
1173
1174void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1175{
1176 intel_wakeref_t wakeref;
1177
1178 if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)0)
1179 return;
1180
1181 if (intel_gt_is_wedged(gt))
1182 return;
1183
1184 if (tlb_seqno_passed(gt, seqno))
1185 return;
1186
1187 with_intel_gt_pm_if_awake(gt, wakeref)for (wakeref = intel_gt_pm_get_if_awake(gt); wakeref; intel_gt_pm_put_async
(gt), wakeref = 0)
{
1188 mutex_lock(&gt->tlb.invalidate_lock)rw_enter_write(&gt->tlb.invalidate_lock);
1189 if (tlb_seqno_passed(gt, seqno))
1190 goto unlock;
1191
1192 mmio_invalidate_full(gt);
1193
1194#ifdef notyet
1195 write_seqcount_invalidate(&gt->tlb.seqno);
1196#else
1197 barrier()__asm volatile("" : : : "memory");
1198 gt->tlb.seqno.seq.sequence += 2;
1199#endif
1200unlock:
1201 mutex_unlock(&gt->tlb.invalidate_lock)rw_exit_write(&gt->tlb.invalidate_lock);
1202 }
1203}