Bug Summary

File:dev/pci/drm/i915/gt/intel_ring_submission.c
Warning:line 1234, column 27
Value stored to 'i915' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name intel_ring_submission.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gt/intel_ring_submission.c
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2008-2021 Intel Corporation
4 */
5
6#include <drm/drm_cache.h>
7
8#include "gem/i915_gem_internal.h"
9
10#include "gen2_engine_cs.h"
11#include "gen6_engine_cs.h"
12#include "gen6_ppgtt.h"
13#include "gen7_renderclear.h"
14#include "i915_drv.h"
15#include "i915_mitigations.h"
16#include "intel_breadcrumbs.h"
17#include "intel_context.h"
18#include "intel_engine_regs.h"
19#include "intel_gt.h"
20#include "intel_gt_irq.h"
21#include "intel_gt_regs.h"
22#include "intel_reset.h"
23#include "intel_ring.h"
24#include "shmem_utils.h"
25#include "intel_engine_heartbeat.h"
26#include "intel_engine_pm.h"
27
28/* Rough estimate of the typical request size, performing a flush,
29 * set-context and then emitting the batch.
30 */
31#define LEGACY_REQUEST_SIZE200 200
32
33static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
34{
35 /*
36 * Keep the render interrupt unmasked as this papers over
37 * lost interrupts following a reset.
38 */
39 if (engine->class == RENDER_CLASS0) {
40 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) >= 6)
41 mask &= ~BIT(0)(1UL << (0));
42 else
43 mask &= ~I915_USER_INTERRUPT(1 << 1);
44 }
45
46 intel_engine_set_hwsp_writemask(engine, mask);
47}
48
49static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
50{
51 u32 addr;
52
53 addr = lower_32_bits(phys)((u32)(phys));
54 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) >= 4)
55 addr |= (phys >> 28) & 0xf0;
56
57 intel_uncore_write(engine->uncore, HWS_PGA((const i915_reg_t){ .reg = (0x2080) }), addr);
58}
59
60static struct vm_page *status_page(struct intel_engine_cs *engine)
61{
62 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
63
64 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj))((void)0);
65 return sg_page(obj->mm.pages->sgl);
66}
67
68static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
69{
70 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))(((((status_page(engine))->phys_addr) / (1 << 12))) <<
12)
);
71 set_hwstam(engine, ~0u);
72}
73
74static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
75{
76 i915_reg_t hwsp;
77
78 /*
79 * The ring status page addresses are no longer next to the rest of
80 * the ring registers as of gen7.
81 */
82 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) == 7) {
83 switch (engine->id) {
84 /*
85 * No more rings exist on Gen7. Default case is only to shut up
86 * gcc switch check warning.
87 */
88 default:
89 GEM_BUG_ON(engine->id)((void)0);
90 fallthroughdo {} while (0);
91 case RCS0:
92 hwsp = RENDER_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4080) });
93 break;
94 case BCS0:
95 hwsp = BLT_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4280) });
96 break;
97 case VCS0:
98 hwsp = BSD_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4180) });
99 break;
100 case VECS0:
101 hwsp = VEBOX_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4380) });
102 break;
103 }
104 } else if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) == 6) {
105 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x2080)
})
;
106 } else {
107 hwsp = RING_HWS_PGA(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x80) }
)
;
108 }
109
110 intel_uncore_write_fw(engine->uncore, hwsp, offset)__raw_uncore_write32(engine->uncore, hwsp, offset);
111 intel_uncore_posting_read_fw(engine->uncore, hwsp)((void)__raw_uncore_read32(engine->uncore, hwsp));
112}
113
114static void flush_cs_tlb(struct intel_engine_cs *engine)
115{
116 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)(((&(engine->i915)->__runtime)->graphics.ip.ver)
>= (6) && ((&(engine->i915)->__runtime)
->graphics.ip.ver) <= (7))
)
117 return;
118
119 /* ring should be idle before issuing a sync flush*/
120 if ((ENGINE_READ(engine, RING_MI_MODE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x9c) }))
& MODE_IDLE((u32)((1UL << (9)) + 0))) == 0)
121 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",printf("drm:pid%d:%s *WARNING* " "[drm] " "%s not idle before sync flush!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name)
122 engine->name)printf("drm:pid%d:%s *WARNING* " "[drm] " "%s not idle before sync flush!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name)
;
123
124 ENGINE_WRITE_FW(engine, RING_INSTPM,__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0xc0) }), (({ typeof((1
<< 9) | (1 << 5)) _a = ((1 << 9) | (1 <<
5)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (
__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); })))
125 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0xc0) }), (({ typeof((1
<< 9) | (1 << 5)) _a = ((1 << 9) | (1 <<
5)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (
__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); })))
126 INSTPM_SYNC_FLUSH))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0xc0) }), (({ typeof((1
<< 9) | (1 << 5)) _a = ((1 << 9) | (1 <<
5)); ({ if (__builtin_constant_p(_a)) do { } while (0); if (
__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); })))
;
127 if (__intel_wait_for_register_fw(engine->uncore,
128 RING_INSTPM(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0xc0) }
)
,
129 INSTPM_SYNC_FLUSH(1 << 5), 0,
130 2000, 0, NULL((void *)0)))
131 ENGINE_TRACE(engine,do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
132 "wait for SyncFlush to complete for TLB invalidation timed out\n")do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
;
133}
134
135static void ring_setup_status_page(struct intel_engine_cs *engine)
136{
137 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
138 set_hwstam(engine, ~0u);
139
140 flush_cs_tlb(engine);
141}
142
143static struct i915_address_space *vm_alias(struct i915_address_space *vm)
144{
145 if (i915_is_ggtt(vm)((vm)->is_ggtt))
146 vm = &i915_vm_to_ggtt(vm)->alias->vm;
147
148 return vm;
149}
150
151static u32 pp_dir(struct i915_address_space *vm)
152{
153 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
154}
155
156static void set_pp_dir(struct intel_engine_cs *engine)
157{
158 struct i915_address_space *vm = vm_alias(engine->gt->vm);
159
160 if (!vm)
161 return;
162
163 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x220) }), (0xffffffff)
)
;
164 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x228) }), (pp_dir(vm))
)
;
165
166 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) >= 7) {
167 ENGINE_WRITE_FW(engine,__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x29c) }), (({ typeof((
1 << 9)) _a = ((1 << 9)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })))
168 RING_MODE_GEN7,__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x29c) }), (({ typeof((
1 << 9)) _a = ((1 << 9)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })))
169 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x29c) }), (({ typeof((
1 << 9)) _a = ((1 << 9)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })))
;
170 }
171}
172
173static bool_Bool stop_ring(struct intel_engine_cs *engine)
174{
175 /* Empty the ring by skipping to the end */
176 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) }), (__raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) }))))
;
177 ENGINE_POSTING_READ(engine, RING_HEAD)((void)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) })))
;
178
179 /* The ring must be empty before it is disabled */
180 ENGINE_WRITE_FW(engine, RING_CTL, 0)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) }), (0))
;
181 ENGINE_POSTING_READ(engine, RING_CTL)((void)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })))
;
182
183 /* Then reset the disabled ring */
184 ENGINE_WRITE_FW(engine, RING_HEAD, 0)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) }), (0))
;
185 ENGINE_WRITE_FW(engine, RING_TAIL, 0)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x30) }), (0))
;
186
187 return (ENGINE_READ_FW(engine, RING_HEAD)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) }))
& HEAD_ADDR0x001FFFFC) == 0;
188}
189
190static int xcs_resume(struct intel_engine_cs *engine)
191{
192 struct intel_ring *ring = engine->legacy.ring;
193
194 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
195 ring->head, ring->tail)do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
;
196
197 /*
198 * Double check the ring is empty & disabled before we resume. Called
199 * from atomic context during PCI probe, so _hardirq().
200 */
201 intel_synchronize_hardirq(engine->i915);
202 if (!stop_ring(engine))
203 goto err;
204
205 if (HWS_NEEDS_PHYSICAL(engine->i915)((&(engine->i915)->__info)->hws_needs_physical))
206 ring_setup_phys_status_page(engine);
207 else
208 ring_setup_status_page(engine);
209
210 intel_breadcrumbs_reset(engine->breadcrumbs);
211
212 /* Enforce ordering by reading HEAD register back */
213 ENGINE_POSTING_READ(engine, RING_HEAD)((void)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) })))
;
214
215 /*
216 * Initialize the ring. This must happen _after_ we've cleared the ring
217 * registers with the above sequence (the readback of the HEAD registers
218 * also enforces ordering), otherwise the hw might lose the new ring
219 * register values.
220 */
221 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x38) }), (i915_ggtt_offset
(ring->vma)))
;
222
223 /* Check that the ring offsets point within the ring! */
224 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head))((void)0);
225 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail))((void)0);
226 intel_ring_update_space(ring);
227
228 set_pp_dir(engine);
229
230 /* First wake the ring up to an empty/idle ring */
231 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x34) }), (ring->head
))
;
232 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x30) }), (ring->head
))
;
233 ENGINE_POSTING_READ(engine, RING_TAIL)((void)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x30) })))
;
234
235 ENGINE_WRITE_FW(engine, RING_CTL,__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) }), (((ring->size
) - (1 << 12)) | 0x00000001))
236 RING_CTL_SIZE(ring->size) | RING_VALID)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) }), (((ring->size
) - (1 << 12)) | 0x00000001))
;
237
238 /* If the head is still not zero, the ring is dead */
239 if (__intel_wait_for_register_fw(engine->uncore,
240 RING_CTL(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x3c) }
)
,
241 RING_VALID0x00000001, RING_VALID0x00000001,
242 5000, 0, NULL((void *)0)))
243 goto err;
244
245 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) > 2)
246 ENGINE_WRITE_FW(engine,__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x9c) }), ((({ if (__builtin_constant_p
((((u32)((1UL << (8)) + 0))))) do { } while (0); if (__builtin_constant_p
(0)) do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(8)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (8)) + 0)))) << 16 | (0))
; }))))
247 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING))__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x9c) }), ((({ if (__builtin_constant_p
((((u32)((1UL << (8)) + 0))))) do { } while (0); if (__builtin_constant_p
(0)) do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(8)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (8)) + 0)))) << 16 | (0))
; }))))
;
248
249 /* Now awake, let it get started */
250 if (ring->tail != ring->head) {
251 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail)__raw_uncore_write32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x30) }), (ring->tail
))
;
252 ENGINE_POSTING_READ(engine, RING_TAIL)((void)__raw_uncore_read32(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x30) })))
;
253 }
254
255 /* Papering over lost _interrupts_ immediately following the restart */
256 intel_engine_signal_breadcrumbs(engine);
257 return 0;
258
259err:
260 drm_err(&engine->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
261 "%s initialization failed; "printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
262 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
263 engine->name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
264 ENGINE_READ(engine, RING_CTL),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
265 ENGINE_READ(engine, RING_CTL) & RING_VALID,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
266 ENGINE_READ(engine, RING_HEAD), ring->head,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
267 ENGINE_READ(engine, RING_TAIL), ring->tail,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
268 ENGINE_READ(engine, RING_START),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
269 i915_ggtt_offset(ring->vma))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, intel_uncore_read(((engine))->uncore, ((const i915_reg_t
){ .reg = (((engine)->mmio_base) + 0x3c) })), intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x3c) })) & 0x00000001, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x34) })), ring->head, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), ring->tail, intel_uncore_read
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x38) })), i915_ggtt_offset(ring->vma))
;
270 return -EIO5;
271}
272
273static void sanitize_hwsp(struct intel_engine_cs *engine)
274{
275 struct intel_timeline *tl;
276
277 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)for (tl = ({ const __typeof( ((__typeof(*tl) *)0)->engine_link
) *__mptr = ((&engine->status_page.timelines)->next
); (__typeof(*tl) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*tl), engine_link) );}); &tl->engine_link != (&engine
->status_page.timelines); tl = ({ const __typeof( ((__typeof
(*tl) *)0)->engine_link ) *__mptr = (tl->engine_link.next
); (__typeof(*tl) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*tl), engine_link) );}))
278 intel_timeline_reset_seqno(tl);
279}
280
281static void xcs_sanitize(struct intel_engine_cs *engine)
282{
283 /*
284 * Poison residual state on resume, in case the suspend didn't!
285 *
286 * We have to assume that across suspend/resume (or other loss
287 * of control) that the contents of our pinned buffers has been
288 * lost, replaced by garbage. Since this doesn't always happen,
289 * let's poison such state so that we more quickly spot when
290 * we falsely assume it has been preserved.
291 */
292 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0)
293 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE)__builtin_memset((engine->status_page.addr), (0xdb), ((1 <<
12)))
;
294
295 /*
296 * The kernel_context HWSP is stored in the status_page. As above,
297 * that may be lost on resume/initialisation, and so we need to
298 * reset the value in the HWSP.
299 */
300 sanitize_hwsp(engine);
301
302 /* And scrub the dirty cachelines for the HWSP */
303 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE(1 << 12));
304
305 intel_engine_reset_pinned_contexts(engine);
306}
307
308static void reset_prepare(struct intel_engine_cs *engine)
309{
310 /*
311 * We stop engines, otherwise we might get failed reset and a
312 * dead gpu (on elk). Also as modern gpu as kbl can suffer
313 * from system hang if batchbuffer is progressing when
314 * the reset is issued, regardless of READY_TO_RESET ack.
315 * Thus assume it is best to stop engines on all gens
316 * where we have a gpu reset.
317 *
318 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
319 *
320 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
321 * WaClearRingBufHeadRegAtInit:ctg,elk
322 *
323 * FIXME: Wa for more modern gens needs to be validated
324 */
325 ENGINE_TRACE(engine, "\n")do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
;
326 intel_engine_stop_cs(engine);
327
328 if (!stop_ring(engine)) {
329 /* G45 ring initialization often fails to reset head to zero */
330 ENGINE_TRACE(engine,do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
331 "HEAD not reset to zero, "do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
332 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
333 ENGINE_READ_FW(engine, RING_CTL),do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
334 ENGINE_READ_FW(engine, RING_HEAD),do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
335 ENGINE_READ_FW(engine, RING_TAIL),do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
336 ENGINE_READ_FW(engine, RING_START))do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
;
337 if (!stop_ring(engine)) {
338 drm_err(&engine->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
339 "failed to set %s head to zero "printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
340 "ctl %08x head %08x tail %08x start %08x\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
341 engine->name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
342 ENGINE_READ_FW(engine, RING_CTL),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
343 ENGINE_READ_FW(engine, RING_HEAD),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
344 ENGINE_READ_FW(engine, RING_TAIL),printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
345 ENGINE_READ_FW(engine, RING_START))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__ , engine->name, __raw_uncore_read32(((engine
))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base
) + 0x3c) })), __raw_uncore_read32(((engine))->uncore, ((const
i915_reg_t){ .reg = (((engine)->mmio_base) + 0x34) })), __raw_uncore_read32
(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine
)->mmio_base) + 0x30) })), __raw_uncore_read32(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x38) })))
;
346 }
347 }
348}
349
350static void reset_rewind(struct intel_engine_cs *engine, bool_Bool stalled)
351{
352 struct i915_request *pos, *rq;
353 unsigned long flags;
354 u32 head;
355
356 rq = NULL((void *)0);
357 spin_lock_irqsave(&engine->sched_engine->lock, flags)do { flags = 0; mtx_enter(&engine->sched_engine->lock
); } while (0)
;
358 rcu_read_lock();
359 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link)for (pos = ({ const __typeof( ((__typeof(*pos) *)0)->sched
.link ) *__mptr = ((&engine->sched_engine->requests
)->next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), sched.link) );}); &pos->sched.link !=
(&engine->sched_engine->requests); pos = ({ const __typeof
( ((__typeof(*pos) *)0)->sched.link ) *__mptr = (pos->sched
.link.next); (__typeof(*pos) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*pos), sched.link) );}))
{
360 if (!__i915_request_is_complete(pos)) {
361 rq = pos;
362 break;
363 }
364 }
365 rcu_read_unlock();
366
367 /*
368 * The guilty request will get skipped on a hung engine.
369 *
370 * Users of client default contexts do not rely on logical
371 * state preserved between batches so it is safe to execute
372 * queued requests following the hang. Non default contexts
373 * rely on preserved state, so skipping a batch loses the
374 * evolution of the state and it needs to be considered corrupted.
375 * Executing more queued batches on top of corrupted state is
376 * risky. But we take the risk by trying to advance through
377 * the queued requests in order to make the client behaviour
378 * more predictable around resets, by not throwing away random
379 * amount of batches it has prepared for execution. Sophisticated
380 * clients can use gem_reset_stats_ioctl and dma fence status
381 * (exported via sync_file info ioctl on explicit fences) to observe
382 * when it loses the context state and should rebuild accordingly.
383 *
384 * The context ban, and ultimately the client ban, mechanism are safety
385 * valves if client submission ends up resulting in nothing more than
386 * subsequent hangs.
387 */
388
389 if (rq) {
390 /*
391 * Try to restore the logical GPU state to match the
392 * continuation of the request queue. If we skip the
393 * context/PD restore, then the next request may try to execute
394 * assuming that its context is valid and loaded on the GPU and
395 * so may try to access invalid memory, prompting repeated GPU
396 * hangs.
397 *
398 * If the request was guilty, we still restore the logical
399 * state in case the next request requires it (e.g. the
400 * aliasing ppgtt), but skip over the hung batch.
401 *
402 * If the request was innocent, we try to replay the request
403 * with the restored context.
404 */
405 __i915_request_reset(rq, stalled);
406
407 GEM_BUG_ON(rq->ring != engine->legacy.ring)((void)0);
408 head = rq->head;
409 } else {
410 head = engine->legacy.ring->tail;
411 }
412 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
413
414 spin_unlock_irqrestore(&engine->sched_engine->lock, flags)do { (void)(flags); mtx_leave(&engine->sched_engine->
lock); } while (0)
;
415}
416
417static void reset_finish(struct intel_engine_cs *engine)
418{
419}
420
421static void reset_cancel(struct intel_engine_cs *engine)
422{
423 struct i915_request *request;
424 unsigned long flags;
425
426 spin_lock_irqsave(&engine->sched_engine->lock, flags)do { flags = 0; mtx_enter(&engine->sched_engine->lock
); } while (0)
;
427
428 /* Mark all submitted requests as skipped. */
429 list_for_each_entry(request, &engine->sched_engine->requests, sched.link)for (request = ({ const __typeof( ((__typeof(*request) *)0)->
sched.link ) *__mptr = ((&engine->sched_engine->requests
)->next); (__typeof(*request) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*request), sched.link) );}); &request->sched
.link != (&engine->sched_engine->requests); request
= ({ const __typeof( ((__typeof(*request) *)0)->sched.link
) *__mptr = (request->sched.link.next); (__typeof(*request
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*request),
sched.link) );}))
430 i915_request_put(i915_request_mark_eio(request));
431 intel_engine_signal_breadcrumbs(engine);
432
433 /* Remaining _unready_ requests will be nop'ed when submitted */
434
435 spin_unlock_irqrestore(&engine->sched_engine->lock, flags)do { (void)(flags); mtx_leave(&engine->sched_engine->
lock); } while (0)
;
436}
437
438static void i9xx_submit_request(struct i915_request *request)
439{
440 i915_request_submit(request);
441 wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); /* paranoid flush writes out of the WCB before mmio */
442
443 ENGINE_WRITE(request->engine, RING_TAIL,intel_uncore_write(((request->engine))->uncore, ((const
i915_reg_t){ .reg = (((request->engine)->mmio_base) + 0x30
) }), (intel_ring_set_tail(request->ring, request->tail
)))
444 intel_ring_set_tail(request->ring, request->tail))intel_uncore_write(((request->engine))->uncore, ((const
i915_reg_t){ .reg = (((request->engine)->mmio_base) + 0x30
) }), (intel_ring_set_tail(request->ring, request->tail
)))
;
445}
446
447static void __ring_context_fini(struct intel_context *ce)
448{
449 i915_vma_put(ce->state);
450}
451
452static void ring_context_destroy(struct kref *ref)
453{
454 struct intel_context *ce = container_of(ref, typeof(*ce), ref)({ const __typeof( ((typeof(*ce) *)0)->ref ) *__mptr = (ref
); (typeof(*ce) *)( (char *)__mptr - __builtin_offsetof(typeof
(*ce), ref) );})
;
455
456 GEM_BUG_ON(intel_context_is_pinned(ce))((void)0);
457
458 if (ce->state)
459 __ring_context_fini(ce);
460
461 intel_context_fini(ce);
462 intel_context_free(ce);
463}
464
465static int ring_context_init_default_state(struct intel_context *ce,
466 struct i915_gem_ww_ctx *ww)
467{
468 struct drm_i915_gem_object *obj = ce->state->obj;
469 void *vaddr;
470
471 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
472 if (IS_ERR(vaddr))
473 return PTR_ERR(vaddr);
474
475#ifdef __linux__
476 shmem_read(ce->engine->default_state, 0,
477 vaddr, ce->engine->context_size);
478#else
479 uao_read(ce->engine->default_state, 0,
480 vaddr, ce->engine->context_size);
481#endif
482
483 i915_gem_object_flush_map(obj);
484 __i915_gem_object_release_map(obj);
485
486 __set_bit(CONTEXT_VALID_BIT3, &ce->flags);
487 return 0;
488}
489
490static int ring_context_pre_pin(struct intel_context *ce,
491 struct i915_gem_ww_ctx *ww,
492 void **unused)
493{
494 struct i915_address_space *vm;
495 int err = 0;
496
497 if (ce->engine->default_state &&
498 !test_bit(CONTEXT_VALID_BIT3, &ce->flags)) {
499 err = ring_context_init_default_state(ce, ww);
500 if (err)
501 return err;
502 }
503
504 vm = vm_alias(ce->vm);
505 if (vm)
506 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
507
508 return err;
509}
510
511static void __context_unpin_ppgtt(struct intel_context *ce)
512{
513 struct i915_address_space *vm;
514
515 vm = vm_alias(ce->vm);
516 if (vm)
517 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
518}
519
520static void ring_context_unpin(struct intel_context *ce)
521{
522}
523
524static void ring_context_post_unpin(struct intel_context *ce)
525{
526 __context_unpin_ppgtt(ce);
527}
528
529static struct i915_vma *
530alloc_context_vma(struct intel_engine_cs *engine)
531{
532 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
533 struct drm_i915_gem_object *obj;
534 struct i915_vma *vma;
535 int err;
536
537 obj = i915_gem_object_create_shmem(i915, engine->context_size);
538 if (IS_ERR(obj))
539 return ERR_CAST(obj);
540
541 /*
542 * Try to make the context utilize L3 as well as LLC.
543 *
544 * On VLV we don't have L3 controls in the PTEs so we
545 * shouldn't touch the cache level, especially as that
546 * would make the object snooped which might have a
547 * negative performance impact.
548 *
549 * Snooping is required on non-llc platforms in execlist
550 * mode, but since all GGTT accesses use PAT entry 0 we
551 * get snooping anyway regardless of cache_level.
552 *
553 * This is only applicable for Ivy Bridge devices since
554 * later platforms don't have L3 control bits in the PTE.
555 */
556 if (IS_IVYBRIDGE(i915)IS_PLATFORM(i915, INTEL_IVYBRIDGE))
557 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
558
559 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL((void *)0));
560 if (IS_ERR(vma)) {
561 err = PTR_ERR(vma);
562 goto err_obj;
563 }
564
565 return vma;
566
567err_obj:
568 i915_gem_object_put(obj);
569 return ERR_PTR(err);
570}
571
572static int ring_context_alloc(struct intel_context *ce)
573{
574 struct intel_engine_cs *engine = ce->engine;
575
576 /* One ringbuffer to rule them all */
577 GEM_BUG_ON(!engine->legacy.ring)((void)0);
578 ce->ring = engine->legacy.ring;
579 ce->timeline = intel_timeline_get(engine->legacy.timeline);
580
581 GEM_BUG_ON(ce->state)((void)0);
582 if (engine->context_size) {
583 struct i915_vma *vma;
584
585 vma = alloc_context_vma(engine);
586 if (IS_ERR(vma))
587 return PTR_ERR(vma);
588
589 ce->state = vma;
590 }
591
592 return 0;
593}
594
595static int ring_context_pin(struct intel_context *ce, void *unused)
596{
597 return 0;
598}
599
600static void ring_context_reset(struct intel_context *ce)
601{
602 intel_ring_reset(ce->ring, ce->ring->emit);
603 clear_bit(CONTEXT_VALID_BIT3, &ce->flags);
604}
605
606static void ring_context_revoke(struct intel_context *ce,
607 struct i915_request *rq,
608 unsigned int preempt_timeout_ms)
609{
610 struct intel_engine_cs *engine;
611
612 if (!rq || !i915_request_is_active(rq))
613 return;
614
615 engine = rq->engine;
616 lockdep_assert_held(&engine->sched_engine->lock)do { (void)(&engine->sched_engine->lock); } while(0
)
;
617 list_for_each_entry_continue(rq, &engine->sched_engine->requests,for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->sched.link
) *__mptr = ((rq)->sched.link.next); (__typeof(*rq) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*rq), sched.link)
);}); &rq->sched.link != (&engine->sched_engine
->requests); rq = ({ const __typeof( ((__typeof(*rq) *)0)->
sched.link ) *__mptr = (rq->sched.link.next); (__typeof(*rq
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*rq), sched
.link) );}))
618 sched.link)for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->sched.link
) *__mptr = ((rq)->sched.link.next); (__typeof(*rq) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*rq), sched.link)
);}); &rq->sched.link != (&engine->sched_engine
->requests); rq = ({ const __typeof( ((__typeof(*rq) *)0)->
sched.link ) *__mptr = (rq->sched.link.next); (__typeof(*rq
) *)( (char *)__mptr - __builtin_offsetof(__typeof(*rq), sched
.link) );}))
619 if (rq->context == ce) {
620 i915_request_set_error_once(rq, -EIO5);
621 __i915_request_skip(rq);
622 }
623}
624
625static void ring_context_cancel_request(struct intel_context *ce,
626 struct i915_request *rq)
627{
628 struct intel_engine_cs *engine = NULL((void *)0);
629
630 i915_request_active_engine(rq, &engine);
631
632 if (engine && intel_engine_pulse(engine))
633 intel_gt_handle_error(engine->gt, engine->mask, 0,
634 "request cancellation by %s",
635 curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
->p_p->ps_comm);
636}
637
638static const struct intel_context_ops ring_context_ops = {
639 .alloc = ring_context_alloc,
640
641 .cancel_request = ring_context_cancel_request,
642
643 .revoke = ring_context_revoke,
644
645 .pre_pin = ring_context_pre_pin,
646 .pin = ring_context_pin,
647 .unpin = ring_context_unpin,
648 .post_unpin = ring_context_post_unpin,
649
650 .enter = intel_context_enter_engine,
651 .exit = intel_context_exit_engine,
652
653 .reset = ring_context_reset,
654 .destroy = ring_context_destroy,
655};
656
657static int load_pd_dir(struct i915_request *rq,
658 struct i915_address_space *vm,
659 u32 valid)
660{
661 const struct intel_engine_cs * const engine = rq->engine;
662 u32 *cs;
663
664 cs = intel_ring_begin(rq, 12);
665 if (IS_ERR(cs))
666 return PTR_ERR(cs);
667
668 *cs++ = MI_LOAD_REGISTER_IMM(1)(((0x0) << 29) | (0x22) << 23 | (2*(1)-1));
669 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x220) }
)
);
670 *cs++ = valid;
671
672 *cs++ = MI_LOAD_REGISTER_IMM(1)(((0x0) << 29) | (0x22) << 23 | (2*(1)-1));
673 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x228) }
)
);
674 *cs++ = pp_dir(vm);
675
676 /* Stall until the page table load is complete? */
677 *cs++ = MI_STORE_REGISTER_MEM(((0x0) << 29) | (0x24) << 23 | (1)) | MI_SRM_LRM_GLOBAL_GTT(1<<22);
678 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x228) }
)
);
679 *cs++ = intel_gt_scratch_offset(engine->gt,
680 INTEL_GT_SCRATCH_FIELD_DEFAULT);
681
682 *cs++ = MI_LOAD_REGISTER_IMM(1)(((0x0) << 29) | (0x22) << 23 | (2*(1)-1));
683 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0xc0) }
)
);
684 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE)({ typeof((1 << 9)) _a = ((1 << 9)); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); })
;
685
686 intel_ring_advance(rq, cs);
687
688 return rq->engine->emit_flush(rq, EMIT_FLUSH(1UL << (1)));
689}
690
691static int mi_set_context(struct i915_request *rq,
692 struct intel_context *ce,
693 u32 flags)
694{
695 struct intel_engine_cs *engine = rq->engine;
696 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
697 enum intel_engine_id id;
698 const int num_engines =
699 IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL) ? engine->gt->info.num_engines - 1 : 0;
700 bool_Bool force_restore = false0;
701 int len;
702 u32 *cs;
703
704 len = 4;
705 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7)
706 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
707 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5)
708 len += 2;
709 if (flags & MI_FORCE_RESTORE(1<<1)) {
710 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT)((void)0);
711 flags &= ~MI_FORCE_RESTORE(1<<1);
712 force_restore = true1;
713 len += 2;
714 }
715
716 cs = intel_ring_begin(rq, len);
717 if (IS_ERR(cs))
718 return PTR_ERR(cs);
719
720 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
721 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) {
722 *cs++ = MI_ARB_ON_OFF(((0x0) << 29) | (0x08) << 23 | (0)) | MI_ARB_DISABLE(0<<0);
723 if (num_engines) {
724 struct intel_engine_cs *signaller;
725
726 *cs++ = MI_LOAD_REGISTER_IMM(num_engines)(((0x0) << 29) | (0x22) << 23 | (2*(num_engines)-
1))
;
727 for_each_engine(signaller, engine->gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((signaller
) = (engine->gt)->engine[(id)])) {} else
{
728 if (signaller == engine)
729 continue;
730
731 *cs++ = i915_mmio_reg_offset(
732 RING_PSMI_CTL(signaller->mmio_base)((const i915_reg_t){ .reg = ((signaller->mmio_base) + 0x50
) })
);
733 *cs++ = _MASKED_BIT_ENABLE(({ typeof(((u32)((1UL << (0)) + 0))) _a = (((u32)((1UL <<
(0)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while (
0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); })
734 GEN6_PSMI_SLEEP_MSG_DISABLE)({ typeof(((u32)((1UL << (0)) + 0))) _a = (((u32)((1UL <<
(0)) + 0))); ({ if (__builtin_constant_p(_a)) do { } while (
0); if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a) && __builtin_constant_p(_a)) do { } while (0); (
(_a) << 16 | (_a)); }); })
;
735 }
736 }
737 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5) {
738 /*
739 * This w/a is only listed for pre-production ilk a/b steppings,
740 * but is also mentioned for programming the powerctx. To be
741 * safe, just apply the workaround; we do not use SyncFlush so
742 * this should never take effect and so be a no-op!
743 */
744 *cs++ = MI_SUSPEND_FLUSH(((0x0) << 29) | (0x0b) << 23 | (0)) | MI_SUSPEND_FLUSH_EN(1<<0);
745 }
746
747 if (force_restore) {
748 /*
749 * The HW doesn't handle being told to restore the current
750 * context very well. Quite often it likes goes to go off and
751 * sulk, especially when it is meant to be reloading PP_DIR.
752 * A very simple fix to force the reload is to simply switch
753 * away from the current context and back again.
754 *
755 * Note that the kernel_context will contain random state
756 * following the INHIBIT_RESTORE. We accept this since we
757 * never use the kernel_context state; it is merely a
758 * placeholder we use to flush other contexts.
759 */
760 *cs++ = MI_SET_CONTEXT(((0x0) << 29) | (0x18) << 23 | (0));
761 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
762 MI_MM_SPACE_GTT(1<<8) |
763 MI_RESTORE_INHIBIT(1<<0);
764 }
765
766 *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0));
767 *cs++ = MI_SET_CONTEXT(((0x0) << 29) | (0x18) << 23 | (0));
768 *cs++ = i915_ggtt_offset(ce->state) | flags;
769 /*
770 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
771 * WaMiSetContext_Hang:snb,ivb,vlv
772 */
773 *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0));
774
775 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) {
776 if (num_engines) {
777 struct intel_engine_cs *signaller;
778 i915_reg_t last_reg = INVALID_MMIO_REG((const i915_reg_t){ .reg = (0) }); /* keep gcc quiet */
779
780 *cs++ = MI_LOAD_REGISTER_IMM(num_engines)(((0x0) << 29) | (0x22) << 23 | (2*(num_engines)-
1))
;
781 for_each_engine(signaller, engine->gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((signaller
) = (engine->gt)->engine[(id)])) {} else
{
782 if (signaller == engine)
783 continue;
784
785 last_reg = RING_PSMI_CTL(signaller->mmio_base)((const i915_reg_t){ .reg = ((signaller->mmio_base) + 0x50
) })
;
786 *cs++ = i915_mmio_reg_offset(last_reg);
787 *cs++ = _MASKED_BIT_DISABLE((({ if (__builtin_constant_p((((u32)((1UL << (0)) + 0))
))) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((u32)((1UL << (0)) + 0
)))) && __builtin_constant_p(0)) do { } while (0); ((
(((u32)((1UL << (0)) + 0)))) << 16 | (0)); }))
788 GEN6_PSMI_SLEEP_MSG_DISABLE)(({ if (__builtin_constant_p((((u32)((1UL << (0)) + 0))
))) do { } while (0); if (__builtin_constant_p(0)) do { } while
(0); if (__builtin_constant_p((((u32)((1UL << (0)) + 0
)))) && __builtin_constant_p(0)) do { } while (0); ((
(((u32)((1UL << (0)) + 0)))) << 16 | (0)); }))
;
789 }
790
791 /* Insert a delay before the next switch! */
792 *cs++ = MI_STORE_REGISTER_MEM(((0x0) << 29) | (0x24) << 23 | (1)) | MI_SRM_LRM_GLOBAL_GTT(1<<22);
793 *cs++ = i915_mmio_reg_offset(last_reg);
794 *cs++ = intel_gt_scratch_offset(engine->gt,
795 INTEL_GT_SCRATCH_FIELD_DEFAULT);
796 *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0));
797 }
798 *cs++ = MI_ARB_ON_OFF(((0x0) << 29) | (0x08) << 23 | (0)) | MI_ARB_ENABLE(1<<0);
799 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5) {
800 *cs++ = MI_SUSPEND_FLUSH(((0x0) << 29) | (0x0b) << 23 | (0));
801 }
802
803 intel_ring_advance(rq, cs);
804
805 return 0;
806}
807
808static int remap_l3_slice(struct i915_request *rq, int slice)
809{
810#define L3LOG_DW (GEN7_L3LOG_SIZE0x80 / sizeof(u32))
811 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
812 int i;
813
814 if (!remap_info)
815 return 0;
816
817 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
818 if (IS_ERR(cs))
819 return PTR_ERR(cs);
820
821 /*
822 * Note: We do not worry about the concurrent register cacheline hang
823 * here because no other code should access these registers other than
824 * at initialization time.
825 */
826 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW)(((0x0) << 29) | (0x22) << 23 | (2*(L3LOG_DW)-1));
827 for (i = 0; i < L3LOG_DW; i++) {
828 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)((const i915_reg_t){ .reg = (0xb070 + (slice) * 0x200 + (i) *
4) })
);
829 *cs++ = remap_info[i];
830 }
831 *cs++ = MI_NOOP(((0x0) << 29) | (0) << 23 | (0));
832 intel_ring_advance(rq, cs);
833
834 return 0;
835#undef L3LOG_DW
836}
837
838static int remap_l3(struct i915_request *rq)
839{
840 struct i915_gem_context *ctx = i915_request_gem_context(rq);
841 int i, err;
842
843 if (!ctx || !ctx->remap_slice)
844 return 0;
845
846 for (i = 0; i < MAX_L3_SLICES2; i++) {
847 if (!(ctx->remap_slice & BIT(i)(1UL << (i))))
848 continue;
849
850 err = remap_l3_slice(rq, i);
851 if (err)
852 return err;
853 }
854
855 ctx->remap_slice = 0;
856 return 0;
857}
858
859static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
860{
861 int ret;
862
863 if (!vm)
864 return 0;
865
866 ret = rq->engine->emit_flush(rq, EMIT_FLUSH(1UL << (1)));
867 if (ret)
868 return ret;
869
870 /*
871 * Not only do we need a full barrier (post-sync write) after
872 * invalidating the TLBs, but we need to wait a little bit
873 * longer. Whether this is merely delaying us, or the
874 * subsequent flush is a key part of serialising with the
875 * post-sync op, this extra pass appears vital before a
876 * mm switch!
877 */
878 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G0xffffffff);
879 if (ret)
880 return ret;
881
882 return rq->engine->emit_flush(rq, EMIT_INVALIDATE(1UL << (0)));
883}
884
885static int clear_residuals(struct i915_request *rq)
886{
887 struct intel_engine_cs *engine = rq->engine;
888 int ret;
889
890 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
891 if (ret)
892 return ret;
893
894 if (engine->kernel_context->state) {
895 ret = mi_set_context(rq,
896 engine->kernel_context,
897 MI_MM_SPACE_GTT(1<<8) | MI_RESTORE_INHIBIT(1<<0));
898 if (ret)
899 return ret;
900 }
901
902 ret = engine->emit_bb_start(rq,
903 engine->wa_ctx.vma->node.start, 0,
904 0);
905 if (ret)
906 return ret;
907
908 ret = engine->emit_flush(rq, EMIT_FLUSH(1UL << (1)));
909 if (ret)
910 return ret;
911
912 /* Always invalidate before the next switch_mm() */
913 return engine->emit_flush(rq, EMIT_INVALIDATE(1UL << (0)));
914}
915
916static int switch_context(struct i915_request *rq)
917{
918 struct intel_engine_cs *engine = rq->engine;
919 struct intel_context *ce = rq->context;
920 void **residuals = NULL((void *)0);
921 int ret;
922
923 GEM_BUG_ON(HAS_EXECLISTS(engine->i915))((void)0);
924
925 if (engine->wa_ctx.vma && ce != engine->kernel_context) {
926 if (engine->wa_ctx.vma->private != ce &&
927 i915_mitigate_clear_residuals()) {
928 ret = clear_residuals(rq);
929 if (ret)
930 return ret;
931
932 residuals = &engine->wa_ctx.vma->private;
933 }
934 }
935
936 ret = switch_mm(rq, vm_alias(ce->vm));
937 if (ret)
938 return ret;
939
940 if (ce->state) {
941 u32 flags;
942
943 GEM_BUG_ON(engine->id != RCS0)((void)0);
944
945 /* For resource streamer on HSW+ and power context elsewhere */
946 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN)extern char _ctassert[(!((1<<3) != (1<<3))) ? 1 :
-1 ] __attribute__((__unused__))
;
947 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN)extern char _ctassert[(!((1<<2) != (1<<2))) ? 1 :
-1 ] __attribute__((__unused__))
;
948
949 flags = MI_SAVE_EXT_STATE_EN(1<<3) | MI_MM_SPACE_GTT(1<<8);
950 if (test_bit(CONTEXT_VALID_BIT3, &ce->flags))
951 flags |= MI_RESTORE_EXT_STATE_EN(1<<2);
952 else
953 flags |= MI_RESTORE_INHIBIT(1<<0);
954
955 ret = mi_set_context(rq, ce, flags);
956 if (ret)
957 return ret;
958 }
959
960 ret = remap_l3(rq);
961 if (ret)
962 return ret;
963
964 /*
965 * Now past the point of no return, this request _will_ be emitted.
966 *
967 * Or at least this preamble will be emitted, the request may be
968 * interrupted prior to submitting the user payload. If so, we
969 * still submit the "empty" request in order to preserve global
970 * state tracking such as this, our tracking of the current
971 * dirty context.
972 */
973 if (residuals) {
974 intel_context_put(*residuals);
975 *residuals = intel_context_get(ce);
976 }
977
978 return 0;
979}
980
981static int ring_request_alloc(struct i915_request *request)
982{
983 int ret;
984
985 GEM_BUG_ON(!intel_context_is_pinned(request->context))((void)0);
986 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb)((void)0);
987
988 /*
989 * Flush enough space to reduce the likelihood of waiting after
990 * we start building the request - in which case we will just
991 * have to repeat work.
992 */
993 request->reserved_space += LEGACY_REQUEST_SIZE200;
994
995 /* Unconditionally invalidate GPU caches and TLBs. */
996 ret = request->engine->emit_flush(request, EMIT_INVALIDATE(1UL << (0)));
997 if (ret)
998 return ret;
999
1000 ret = switch_context(request);
1001 if (ret)
1002 return ret;
1003
1004 request->reserved_space -= LEGACY_REQUEST_SIZE200;
1005 return 0;
1006}
1007
1008static void gen6_bsd_submit_request(struct i915_request *request)
1009{
1010 struct intel_uncore *uncore = request->engine->uncore;
1011
1012 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1013
1014 /* Every tail move must follow the sequence below */
1015
1016 /* Disable notification that the ring is IDLE. The GT
1017 * will then assume that it is busy and bring it out of rc6.
1018 */
1019 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = ((0x12000
) + 0x50) }), ({ typeof(((u32)((1UL << (0)) + 0))) _a =
(((u32)((1UL << (0)) + 0))); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }))
1020 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE))__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = ((0x12000
) + 0x50) }), ({ typeof(((u32)((1UL << (0)) + 0))) _a =
(((u32)((1UL << (0)) + 0))); ({ if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a)) do { } while
(0); if (__builtin_constant_p(_a) && __builtin_constant_p
(_a)) do { } while (0); ((_a) << 16 | (_a)); }); }))
;
1021
1022 /* Clear the context id. Here be magic! */
1023 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0)__raw_uncore_write64(uncore, ((const i915_reg_t){ .reg = (0x12198
) }), 0x0)
;
1024
1025 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1026 if (__intel_wait_for_register_fw(uncore,
1027 RING_PSMI_CTL(GEN6_BSD_RING_BASE)((const i915_reg_t){ .reg = ((0x12000) + 0x50) }),
1028 GEN6_BSD_SLEEP_INDICATOR((u32)((1UL << (3)) + 0)),
1029 0,
1030 1000, 0, NULL((void *)0)))
1031 drm_err(&uncore->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timed out waiting for the BSD ring to wake up\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
1032 "timed out waiting for the BSD ring to wake up\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "timed out waiting for the BSD ring to wake up\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1033
1034 /* Now that the ring is fully powered up, update the tail */
1035 i9xx_submit_request(request);
1036
1037 /* Let the ring send IDLE messages to the GT again,
1038 * and so let it sleep to conserve power when idle.
1039 */
1040 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = ((0x12000
) + 0x50) }), (({ if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0))))) do { } while (0); if (__builtin_constant_p(0))
do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (0)) + 0)))) << 16 | (0))
; })))
1041 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE))__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = ((0x12000
) + 0x50) }), (({ if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0))))) do { } while (0); if (__builtin_constant_p(0))
do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (0)) + 0)))) << 16 | (0))
; })))
;
1042
1043 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1044}
1045
1046static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1047{
1048 engine->submit_request = i9xx_submit_request;
1049}
1050
1051static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1052{
1053 engine->submit_request = gen6_bsd_submit_request;
1054}
1055
1056static void ring_release(struct intel_engine_cs *engine)
1057{
1058 struct drm_i915_privateinteldrm_softc *dev_priv = engine->i915;
1059
1060 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&({ int __ret = !!((((&(dev_priv)->__runtime)->graphics
.ip.ver) > 2 && (intel_uncore_read(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x9c) })) & ((u32)((1UL << (9)) + 0))) == 0)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->graphics.ip.ver) > 2 && (intel_uncore_read(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base) + 0x9c) })) & ((u32)((1UL << (9)) + 0))) == 0"
")"); __builtin_expect(!!(__ret), 0); })
1061 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)({ int __ret = !!((((&(dev_priv)->__runtime)->graphics
.ip.ver) > 2 && (intel_uncore_read(((engine))->
uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base)
+ 0x9c) })) & ((u32)((1UL << (9)) + 0))) == 0)); if
(__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv
->drm))->dev), "", "drm_WARN_ON(" "((&(dev_priv)->__runtime)->graphics.ip.ver) > 2 && (intel_uncore_read(((engine))->uncore, ((const i915_reg_t){ .reg = (((engine)->mmio_base) + 0x9c) })) & ((u32)((1UL << (9)) + 0))) == 0"
")"); __builtin_expect(!!(__ret), 0); })
;
1062
1063 intel_engine_cleanup_common(engine);
1064
1065 if (engine->wa_ctx.vma) {
1066 intel_context_put(engine->wa_ctx.vma->private);
1067 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1068 }
1069
1070 intel_ring_unpin(engine->legacy.ring);
1071 intel_ring_put(engine->legacy.ring);
1072
1073 intel_timeline_unpin(engine->legacy.timeline);
1074 intel_timeline_put(engine->legacy.timeline);
1075}
1076
1077static void irq_handler(struct intel_engine_cs *engine, u16 iir)
1078{
1079 intel_engine_signal_breadcrumbs(engine);
1080}
1081
1082static void setup_irq(struct intel_engine_cs *engine)
1083{
1084 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1085
1086 intel_engine_set_irq_handler(engine, irq_handler);
1087
1088 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
1089 engine->irq_enable = gen6_irq_enable;
1090 engine->irq_disable = gen6_irq_disable;
1091 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 5) {
1092 engine->irq_enable = gen5_irq_enable;
1093 engine->irq_disable = gen5_irq_disable;
1094 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 3) {
1095 engine->irq_enable = gen3_irq_enable;
1096 engine->irq_disable = gen3_irq_disable;
1097 } else {
1098 engine->irq_enable = gen2_irq_enable;
1099 engine->irq_disable = gen2_irq_disable;
1100 }
1101}
1102
1103static void add_to_engine(struct i915_request *rq)
1104{
1105 lockdep_assert_held(&rq->engine->sched_engine->lock)do { (void)(&rq->engine->sched_engine->lock); } while
(0)
;
1106 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
1107}
1108
1109static void remove_from_engine(struct i915_request *rq)
1110{
1111 spin_lock_irq(&rq->engine->sched_engine->lock)mtx_enter(&rq->engine->sched_engine->lock);
1112 list_del_init(&rq->sched.link);
1113
1114 /* Prevent further __await_execution() registering a cb, then flush */
1115 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
1116
1117 spin_unlock_irq(&rq->engine->sched_engine->lock)mtx_leave(&rq->engine->sched_engine->lock);
1118
1119 i915_request_notify_execute_cb_imm(rq);
1120}
1121
1122static void setup_common(struct intel_engine_cs *engine)
1123{
1124 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1125
1126 /* gen8+ are only supported with execlists */
1127 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8)((void)0);
1128
1129 setup_irq(engine);
1130
1131 engine->resume = xcs_resume;
1132 engine->sanitize = xcs_sanitize;
1133
1134 engine->reset.prepare = reset_prepare;
1135 engine->reset.rewind = reset_rewind;
1136 engine->reset.cancel = reset_cancel;
1137 engine->reset.finish = reset_finish;
1138
1139 engine->add_active_request = add_to_engine;
1140 engine->remove_active_request = remove_from_engine;
1141
1142 engine->cops = &ring_context_ops;
1143 engine->request_alloc = ring_request_alloc;
1144
1145 /*
1146 * Using a global execution timeline; the previous final breadcrumb is
1147 * equivalent to our next initial bread so we can elide
1148 * engine->emit_init_breadcrumb().
1149 */
1150 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
1151 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5)
1152 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1153
1154 engine->set_default_submission = i9xx_set_default_submission;
1155
1156 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6)
1157 engine->emit_bb_start = gen6_emit_bb_start;
1158 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 4)
1159 engine->emit_bb_start = gen4_emit_bb_start;
1160 else if (IS_I830(i915)IS_PLATFORM(i915, INTEL_I830) || IS_I845G(i915)IS_PLATFORM(i915, INTEL_I845G))
1161 engine->emit_bb_start = i830_emit_bb_start;
1162 else
1163 engine->emit_bb_start = gen3_emit_bb_start;
1164}
1165
1166static void setup_rcs(struct intel_engine_cs *engine)
1167{
1168 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1169
1170 if (HAS_L3_DPF(i915)((&(i915)->__info)->has_l3_dpf))
1171 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT(1 << 5);
1172
1173 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT(1 << 0);
1174
1175 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 7) {
1176 engine->emit_flush = gen7_emit_flush_rcs;
1177 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
1178 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) {
1179 engine->emit_flush = gen6_emit_flush_rcs;
1180 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
1181 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5) {
1182 engine->emit_flush = gen4_emit_flush_rcs;
1183 } else {
1184 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 4)
1185 engine->emit_flush = gen2_emit_flush;
1186 else
1187 engine->emit_flush = gen4_emit_flush_rcs;
1188 engine->irq_enable_mask = I915_USER_INTERRUPT(1 << 1);
1189 }
1190
1191 if (IS_HASWELL(i915)IS_PLATFORM(i915, INTEL_HASWELL))
1192 engine->emit_bb_start = hsw_emit_bb_start;
1193}
1194
1195static void setup_vcs(struct intel_engine_cs *engine)
1196{
1197 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1198
1199 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
1200 /* gen6 bsd needs a special wa for tail updates */
1201 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6)
1202 engine->set_default_submission = gen6_bsd_set_default_submission;
1203 engine->emit_flush = gen6_emit_flush_vcs;
1204 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT(1 << 12);
1205
1206 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6)
1207 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1208 else
1209 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1210 } else {
1211 engine->emit_flush = gen4_emit_flush_vcs;
1212 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 5)
1213 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT(1 << 5);
1214 else
1215 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT(1 << 25);
1216 }
1217}
1218
1219static void setup_bcs(struct intel_engine_cs *engine)
1220{
1221 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1222
1223 engine->emit_flush = gen6_emit_flush_xcs;
1224 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT(1 << 22);
1225
1226 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6)
1227 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1228 else
1229 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1230}
1231
1232static void setup_vecs(struct intel_engine_cs *engine)
1233{
1234 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
Value stored to 'i915' during its initialization is never read
1235
1236 GEM_BUG_ON(GRAPHICS_VER(i915) < 7)((void)0);
1237
1238 engine->emit_flush = gen6_emit_flush_xcs;
1239 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT(1 << 10);
1240 engine->irq_enable = hsw_irq_enable_vecs;
1241 engine->irq_disable = hsw_irq_disable_vecs;
1242
1243 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1244}
1245
1246static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
1247 struct i915_vma * const vma)
1248{
1249 return gen7_setup_clear_gpr_bb(engine, vma);
1250}
1251
1252static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
1253 struct i915_gem_ww_ctx *ww,
1254 struct i915_vma *vma)
1255{
1256 int err;
1257
1258 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER(1ULL << (11)) | PIN_HIGH(1ULL << (5)));
1259 if (err)
1260 return err;
1261
1262 err = i915_vma_sync(vma);
1263 if (err)
1264 goto err_unpin;
1265
1266 err = gen7_ctx_switch_bb_setup(engine, vma);
1267 if (err)
1268 goto err_unpin;
1269
1270 engine->wa_ctx.vma = vma;
1271 return 0;
1272
1273err_unpin:
1274 i915_vma_unpin(vma);
1275 return err;
1276}
1277
1278static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
1279{
1280 struct drm_i915_gem_object *obj;
1281 struct i915_vma *vma;
1282 int size, err;
1283
1284 if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) != 7 || engine->class != RENDER_CLASS0)
1285 return NULL((void *)0);
1286
1287 err = gen7_ctx_switch_bb_setup(engine, NULL((void *)0) /* probe size */);
1288 if (err < 0)
1289 return ERR_PTR(err);
1290 if (!err)
1291 return NULL((void *)0);
1292
1293 size = roundup2(err, PAGE_SIZE)(((err) + (((1 << 12)) - 1)) & (~((__typeof(err))((
1 << 12)) - 1)))
;
1294
1295 obj = i915_gem_object_create_internal(engine->i915, size);
1296 if (IS_ERR(obj))
1297 return ERR_CAST(obj);
1298
1299 vma = i915_vma_instance(obj, engine->gt->vm, NULL((void *)0));
1300 if (IS_ERR(vma)) {
1301 i915_gem_object_put(obj);
1302 return ERR_CAST(vma);
1303 }
1304
1305 vma->private = intel_context_create(engine); /* dummy residuals */
1306 if (IS_ERR(vma->private)) {
1307 err = PTR_ERR(vma->private);
1308 vma->private = NULL((void *)0);
1309 i915_gem_object_put(obj);
1310 return ERR_PTR(err);
1311 }
1312
1313 return vma;
1314}
1315
1316int intel_ring_submission_setup(struct intel_engine_cs *engine)
1317{
1318 struct i915_gem_ww_ctx ww;
1319 struct intel_timeline *timeline;
1320 struct intel_ring *ring;
1321 struct i915_vma *gen7_wa_vma;
1322 int err;
1323
1324 setup_common(engine);
1325
1326 switch (engine->class) {
1327 case RENDER_CLASS0:
1328 setup_rcs(engine);
1329 break;
1330 case VIDEO_DECODE_CLASS1:
1331 setup_vcs(engine);
1332 break;
1333 case COPY_ENGINE_CLASS3:
1334 setup_bcs(engine);
1335 break;
1336 case VIDEO_ENHANCEMENT_CLASS2:
1337 setup_vecs(engine);
1338 break;
1339 default:
1340 MISSING_CASE(engine->class)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "engine->class", (long)(engine->class)); __builtin_expect
(!!(__ret), 0); })
;
1341 return -ENODEV19;
1342 }
1343
1344 timeline = intel_timeline_create_from_engine(engine,
1345 I915_GEM_HWS_SEQNO_ADDR(0x40 * sizeof(u32)));
1346 if (IS_ERR(timeline)) {
1347 err = PTR_ERR(timeline);
1348 goto err;
1349 }
1350 GEM_BUG_ON(timeline->has_initial_breadcrumb)((void)0);
1351
1352 ring = intel_engine_create_ring(engine, SZ_16K(16 << 10));
1353 if (IS_ERR(ring)) {
1354 err = PTR_ERR(ring);
1355 goto err_timeline;
1356 }
1357
1358 GEM_BUG_ON(engine->legacy.ring)((void)0);
1359 engine->legacy.ring = ring;
1360 engine->legacy.timeline = timeline;
1361
1362 gen7_wa_vma = gen7_ctx_vma(engine);
1363 if (IS_ERR(gen7_wa_vma)) {
1364 err = PTR_ERR(gen7_wa_vma);
1365 goto err_ring;
1366 }
1367
1368 i915_gem_ww_ctx_init(&ww, false0);
1369
1370retry:
1371 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
1372 if (!err && gen7_wa_vma)
1373 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
1374 if (!err)
1375 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
1376 if (!err)
1377 err = intel_timeline_pin(timeline, &ww);
1378 if (!err) {
1379 err = intel_ring_pin(ring, &ww);
1380 if (err)
1381 intel_timeline_unpin(timeline);
1382 }
1383 if (err)
1384 goto out;
1385
1386 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma)((void)0);
1387
1388 if (gen7_wa_vma) {
1389 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
1390 if (err) {
1391 intel_ring_unpin(ring);
1392 intel_timeline_unpin(timeline);
1393 }
1394 }
1395
1396out:
1397 if (err == -EDEADLK11) {
1398 err = i915_gem_ww_ctx_backoff(&ww);
1399 if (!err)
1400 goto retry;
1401 }
1402 i915_gem_ww_ctx_fini(&ww);
1403 if (err)
1404 goto err_gen7_put;
1405
1406 /* Finally, take ownership and responsibility for cleanup! */
1407 engine->release = ring_release;
1408
1409 return 0;
1410
1411err_gen7_put:
1412 if (gen7_wa_vma) {
1413 intel_context_put(gen7_wa_vma->private);
1414 i915_gem_object_put(gen7_wa_vma->obj);
1415 }
1416err_ring:
1417 intel_ring_put(ring);
1418err_timeline:
1419 intel_timeline_put(timeline);
1420err:
1421 intel_engine_cleanup_common(engine);
1422 return err;
1423}
1424
1425#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
1426#include "selftest_ring_submission.c"
1427#endif