Bug Summary

File:dev/pci/drm/i915/gt/intel_reset.c
Warning:line 710, column 3
Value stored to 'node' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name intel_reset.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2018 Intel Corporation
5 */
6
7#include <linux/sched/mm.h>
8#include <linux/stop_machine.h>
9
10#include "display/intel_display_types.h"
11#include "display/intel_overlay.h"
12
13#include "gem/i915_gem_context.h"
14
15#include "i915_drv.h"
16#include "i915_gpu_error.h"
17#include "i915_irq.h"
18#include "intel_breadcrumbs.h"
19#include "intel_engine_pm.h"
20#include "intel_gt.h"
21#include "intel_gt_pm.h"
22#include "intel_reset.h"
23
24#include "uc/intel_guc.h"
25#include "uc/intel_guc_submission.h"
26
27#define RESET_MAX_RETRIES3 3
28
29/* XXX How to handle concurrent GGTT updates using tiling registers? */
30#define RESET_UNDER_STOP_MACHINE0 0
31
32static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
33{
34 intel_uncore_rmw_fw(uncore, reg, 0, set);
35}
36
37static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
38{
39 intel_uncore_rmw_fw(uncore, reg, clr, 0);
40}
41
42static void engine_skip_context(struct i915_request *rq)
43{
44 struct intel_engine_cs *engine = rq->engine;
45 struct intel_context *hung_ctx = rq->context;
46
47 if (!i915_request_is_active(rq))
48 return;
49
50 lockdep_assert_held(&engine->active.lock)do { (void)(&engine->active.lock); } while(0);
51 list_for_each_entry_continue(rq, &engine->active.requests, sched.link)for (rq = ({ const __typeof( ((__typeof(*rq) *)0)->sched.link
) *__mptr = ((rq)->sched.link.next); (__typeof(*rq) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*rq), sched.link)
);}); &rq->sched.link != (&engine->active.requests
); rq = ({ const __typeof( ((__typeof(*rq) *)0)->sched.link
) *__mptr = (rq->sched.link.next); (__typeof(*rq) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*rq), sched.link) );}
))
52 if (rq->context == hung_ctx) {
53 i915_request_set_error_once(rq, -EIO5);
54 __i915_request_skip(rq);
55 }
56}
57
58static void client_mark_guilty(struct i915_gem_context *ctx, bool_Bool banned)
59{
60 struct drm_i915_file_private *file_priv = ctx->file_priv;
61 unsigned long prev_hang;
62 unsigned int score;
63
64 if (IS_ERR_OR_NULL(file_priv))
65 return;
66
67 score = 0;
68 if (banned)
69 score = I915_CLIENT_SCORE_CONTEXT_BAN3;
70
71 prev_hang = xchg(&file_priv->hang_timestamp, jiffies)__sync_lock_test_and_set(&file_priv->hang_timestamp, jiffies
)
;
72 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)((long)(jiffies) - (long)(prev_hang + (60 * hz)) < 0))
73 score += I915_CLIENT_SCORE_HANG_FAST1;
74
75 if (score) {
76 atomic_add(score, &file_priv->ban_score)__sync_fetch_and_add(&file_priv->ban_score, score);
77
78 drm_dbg(&ctx->i915->drm,drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "client %s: gained %u ban score, now %u\n", ctx->name, score
, ({ typeof(*(&file_priv->ban_score)) __tmp = *(volatile
typeof(*(&file_priv->ban_score)) *)&(*(&file_priv
->ban_score)); membar_datadep_consumer(); __tmp; }))
79 "client %s: gained %u ban score, now %u\n",drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "client %s: gained %u ban score, now %u\n", ctx->name, score
, ({ typeof(*(&file_priv->ban_score)) __tmp = *(volatile
typeof(*(&file_priv->ban_score)) *)&(*(&file_priv
->ban_score)); membar_datadep_consumer(); __tmp; }))
80 ctx->name, score,drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "client %s: gained %u ban score, now %u\n", ctx->name, score
, ({ typeof(*(&file_priv->ban_score)) __tmp = *(volatile
typeof(*(&file_priv->ban_score)) *)&(*(&file_priv
->ban_score)); membar_datadep_consumer(); __tmp; }))
81 atomic_read(&file_priv->ban_score))drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "client %s: gained %u ban score, now %u\n", ctx->name, score
, ({ typeof(*(&file_priv->ban_score)) __tmp = *(volatile
typeof(*(&file_priv->ban_score)) *)&(*(&file_priv
->ban_score)); membar_datadep_consumer(); __tmp; }))
;
82 }
83}
84
85static bool_Bool mark_guilty(struct i915_request *rq)
86{
87 struct i915_gem_context *ctx;
88 unsigned long prev_hang;
89 bool_Bool banned;
90 int i;
91
92 if (intel_context_is_closed(rq->context)) {
93 intel_context_set_banned(rq->context);
94 return true1;
95 }
96
97 rcu_read_lock();
98 ctx = rcu_dereference(rq->context->gem_context)(rq->context->gem_context);
99 if (ctx && !kref_get_unless_zero(&ctx->ref))
100 ctx = NULL((void *)0);
101 rcu_read_unlock();
102 if (!ctx)
103 return intel_context_is_banned(rq->context);
104
105 atomic_inc(&ctx->guilty_count)__sync_fetch_and_add(&ctx->guilty_count, 1);
106
107 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
108 if (!i915_gem_context_is_bannable(ctx)) {
109 banned = false0;
110 goto out;
111 }
112
113 drm_notice(&ctx->i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s context reset due to GPU hang\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ctx->
name)
114 "%s context reset due to GPU hang\n",printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s context reset due to GPU hang\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ctx->
name)
115 ctx->name)printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s context reset due to GPU hang\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ctx->
name)
;
116
117 /* Record the timestamp for the last N hangs */
118 prev_hang = ctx->hang_timestamp[0];
119 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp)(sizeof((ctx->hang_timestamp)) / sizeof((ctx->hang_timestamp
)[0]))
- 1; i++)
120 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
121 ctx->hang_timestamp[i] = jiffies;
122
123 /* If we have hung N+1 times in rapid succession, we ban the context! */
124 banned = !i915_gem_context_is_recoverable(ctx);
125 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)((long)(jiffies) - (long)(prev_hang + (120 * hz)) < 0))
126 banned = true1;
127 if (banned) {
128 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "context %s: guilty %d, banned\n", ctx->name, ({ typeof(
*(&ctx->guilty_count)) __tmp = *(volatile typeof(*(&
ctx->guilty_count)) *)&(*(&ctx->guilty_count));
membar_datadep_consumer(); __tmp; }))
129 ctx->name, atomic_read(&ctx->guilty_count))drm_dev_dbg((&ctx->i915->drm)->dev, DRM_UT_DRIVER
, "context %s: guilty %d, banned\n", ctx->name, ({ typeof(
*(&ctx->guilty_count)) __tmp = *(volatile typeof(*(&
ctx->guilty_count)) *)&(*(&ctx->guilty_count));
membar_datadep_consumer(); __tmp; }))
;
130 intel_context_set_banned(rq->context);
131 }
132
133 client_mark_guilty(ctx, banned);
134
135out:
136 i915_gem_context_put(ctx);
137 return banned;
138}
139
140static void mark_innocent(struct i915_request *rq)
141{
142 struct i915_gem_context *ctx;
143
144 rcu_read_lock();
145 ctx = rcu_dereference(rq->context->gem_context)(rq->context->gem_context);
146 if (ctx)
147 atomic_inc(&ctx->active_count)__sync_fetch_and_add(&ctx->active_count, 1);
148 rcu_read_unlock();
149}
150
151void __i915_request_reset(struct i915_request *rq, bool_Bool guilty)
152{
153 RQ_TRACE(rq, "guilty? %s\n", yesno(guilty))do { const struct i915_request *rq__ = (rq); do { const struct
intel_engine_cs *e__ __attribute__((__unused__)) = (rq__->
engine); do { } while (0); } while (0); } while (0)
;
154
155 GEM_BUG_ON(i915_request_completed(rq))((void)0);
156
157 rcu_read_lock(); /* protect the GEM context */
158 if (guilty) {
159 i915_request_set_error_once(rq, -EIO5);
160 __i915_request_skip(rq);
161 if (mark_guilty(rq))
162 engine_skip_context(rq);
163 } else {
164 i915_request_set_error_once(rq, -EAGAIN35);
165 mark_innocent(rq);
166 }
167 rcu_read_unlock();
168}
169
170static bool_Bool i915_in_reset(struct pci_dev *pdev)
171{
172 u8 gdrst;
173
174 pci_read_config_byte(pdev, I915_GDRST0xc0, &gdrst);
175 return gdrst & GRDOM_RESET_STATUS(1 << 1);
176}
177
178static int i915_do_reset(struct intel_gt *gt,
179 intel_engine_mask_t engine_mask,
180 unsigned int retry)
181{
182 struct pci_dev *pdev = gt->i915->drm.pdev;
183 int err;
184
185 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
186 pci_write_config_byte(pdev, I915_GDRST0xc0, GRDOM_RESET_ENABLE(1 << 0));
187 udelay(50);
188 err = wait_for_atomic(i915_in_reset(pdev), 50)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if (((i915_in_reset(pdev
)))) { ret = 0; break; } if (now - base >= timeout) { ret =
-60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
;
189
190 /* Clear the reset request. */
191 pci_write_config_byte(pdev, I915_GDRST0xc0, 0);
192 udelay(50);
193 if (!err)
194 err = wait_for_atomic(!i915_in_reset(pdev), 50)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if (((!i915_in_reset(pdev
)))) { ret = 0; break; } if (now - base >= timeout) { ret =
-60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
;
195
196 return err;
197}
198
199static bool_Bool g4x_reset_complete(struct pci_dev *pdev)
200{
201 u8 gdrst;
202
203 pci_read_config_byte(pdev, I915_GDRST0xc0, &gdrst);
204 return (gdrst & GRDOM_RESET_ENABLE(1 << 0)) == 0;
205}
206
207static int g33_do_reset(struct intel_gt *gt,
208 intel_engine_mask_t engine_mask,
209 unsigned int retry)
210{
211 struct pci_dev *pdev = gt->i915->drm.pdev;
212
213 pci_write_config_byte(pdev, I915_GDRST0xc0, GRDOM_RESET_ENABLE(1 << 0));
214 return wait_for_atomic(g4x_reset_complete(pdev), 50)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if (((g4x_reset_complete
(pdev)))) { ret = 0; break; } if (now - base >= timeout) {
ret = -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
;
215}
216
217static int g4x_do_reset(struct intel_gt *gt,
218 intel_engine_mask_t engine_mask,
219 unsigned int retry)
220{
221 struct pci_dev *pdev = gt->i915->drm.pdev;
222 struct intel_uncore *uncore = gt->uncore;
223 int ret;
224
225 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
226 rmw_set_fw(uncore, VDECCLK_GATE_D((const i915_reg_t){ .reg = (0x620C) }), VCP_UNIT_CLOCK_GATE_DISABLE(1 << 4));
227 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
(0x620C) })))
;
228
229 pci_write_config_byte(pdev, I915_GDRST0xc0,
230 GRDOM_MEDIA(3 << 2) | GRDOM_RESET_ENABLE(1 << 0));
231 ret = wait_for_atomic(g4x_reset_complete(pdev), 50)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if (((g4x_reset_complete
(pdev)))) { ret = 0; break; } if (now - base >= timeout) {
ret = -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
;
232 if (ret) {
233 drm_dbg(&gt->i915->drm, "Wait for media reset failed\n")drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for media reset failed\n")
;
234 goto out;
235 }
236
237 pci_write_config_byte(pdev, I915_GDRST0xc0,
238 GRDOM_RENDER(1 << 2) | GRDOM_RESET_ENABLE(1 << 0));
239 ret = wait_for_atomic(g4x_reset_complete(pdev), 50)({ extern char _ctassert[(!(!__builtin_constant_p((50) * 1000
))) ? 1 : -1 ] __attribute__((__unused__)); extern char _ctassert
[(!(((50) * 1000) > 50000)) ? 1 : -1 ] __attribute__((__unused__
)); ({ int cpu, ret, timeout = (((50) * 1000)) * 1000; u64 base
; do { } while (0); if (!(1)) { ; cpu = (({struct cpu_info *__ci
; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_cpuid); } base =
local_clock(); for (;;) { u64 now = local_clock(); if (!(1))
; __asm volatile("" : : : "memory"); if (((g4x_reset_complete
(pdev)))) { ret = 0; break; } if (now - base >= timeout) {
ret = -60; break; } cpu_relax(); if (!(1)) { ; if (__builtin_expect
(!!(cpu != (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid)), 0)) { timeout -= now - base; cpu
= (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" :
"=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_cpuid); base = local_clock(); } } } ret; }
); })
;
240 if (ret) {
241 drm_dbg(&gt->i915->drm, "Wait for render reset failed\n")drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for render reset failed\n")
;
242 goto out;
243 }
244
245out:
246 pci_write_config_byte(pdev, I915_GDRST0xc0, 0);
247
248 rmw_clear_fw(uncore, VDECCLK_GATE_D((const i915_reg_t){ .reg = (0x620C) }), VCP_UNIT_CLOCK_GATE_DISABLE(1 << 4));
249 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
(0x620C) })))
;
250
251 return ret;
252}
253
254static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
255 unsigned int retry)
256{
257 struct intel_uncore *uncore = gt->uncore;
258 int ret;
259
260 intel_uncore_write_fw(uncore, ILK_GDSR,__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x10000
+ 0x2ca4) }), (1 << 1) | (1 << 0))
261 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x10000
+ 0x2ca4) }), (1 << 1) | (1 << 0))
;
262 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR((const i915_reg_t){ .reg = (0x10000 + 0x2ca4) }),
263 ILK_GRDOM_RESET_ENABLE(1 << 0), 0,
264 5000, 0,
265 NULL((void *)0));
266 if (ret) {
267 drm_dbg(&gt->i915->drm, "Wait for render reset failed\n")drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for render reset failed\n")
;
268 goto out;
269 }
270
271 intel_uncore_write_fw(uncore, ILK_GDSR,__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x10000
+ 0x2ca4) }), (3 << 1) | (1 << 0))
272 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x10000
+ 0x2ca4) }), (3 << 1) | (1 << 0))
;
273 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR((const i915_reg_t){ .reg = (0x10000 + 0x2ca4) }),
274 ILK_GRDOM_RESET_ENABLE(1 << 0), 0,
275 5000, 0,
276 NULL((void *)0));
277 if (ret) {
278 drm_dbg(&gt->i915->drm, "Wait for media reset failed\n")drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for media reset failed\n")
;
279 goto out;
280 }
281
282out:
283 intel_uncore_write_fw(uncore, ILK_GDSR, 0)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x10000
+ 0x2ca4) }), 0)
;
284 intel_uncore_posting_read_fw(uncore, ILK_GDSR)((void)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg =
(0x10000 + 0x2ca4) })))
;
285 return ret;
286}
287
288/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
289static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
290{
291 struct intel_uncore *uncore = gt->uncore;
292 int err;
293
294 /*
295 * GEN6_GDRST is not in the gt power well, no need to check
296 * for fifo space for the write or forcewake the chip for
297 * the read
298 */
299 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask)__raw_uncore_write32(uncore, ((const i915_reg_t){ .reg = (0x941c
) }), hw_domain_mask)
;
300
301 /* Wait for the device to ack the reset requests */
302 err = __intel_wait_for_register_fw(uncore,
303 GEN6_GDRST((const i915_reg_t){ .reg = (0x941c) }), hw_domain_mask, 0,
304 500, 0,
305 NULL((void *)0));
306 if (err)
307 drm_dbg(&gt->i915->drm,drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for 0x%08x engines reset failed\n", hw_domain_mask)
308 "Wait for 0x%08x engines reset failed\n",drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for 0x%08x engines reset failed\n", hw_domain_mask)
309 hw_domain_mask)drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for 0x%08x engines reset failed\n", hw_domain_mask)
;
310
311 return err;
312}
313
314static int gen6_reset_engines(struct intel_gt *gt,
315 intel_engine_mask_t engine_mask,
316 unsigned int retry)
317{
318 static const u32 hw_engine_mask[] = {
319 [RCS0] = GEN6_GRDOM_RENDER(1 << 1),
320 [BCS0] = GEN6_GRDOM_BLT(1 << 3),
321 [VCS0] = GEN6_GRDOM_MEDIA(1 << 2),
322 [VCS1] = GEN8_GRDOM_MEDIA2(1 << 7),
323 [VECS0] = GEN6_GRDOM_VECS(1 << 4),
324 };
325 struct intel_engine_cs *engine;
326 u32 hw_mask;
327
328 if (engine_mask == ALL_ENGINES((intel_engine_mask_t)~0ul)) {
329 hw_mask = GEN6_GRDOM_FULL(1 << 0);
330 } else {
331 intel_engine_mask_t tmp;
332
333 hw_mask = 0;
334 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
{
335 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask))((void)0);
336 hw_mask |= hw_engine_mask[engine->id];
337 }
338 }
339
340 return gen6_hw_domain_reset(gt, hw_mask);
341}
342
343static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
344{
345 struct intel_uncore *uncore = engine->uncore;
346 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
347 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
348 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
349 i915_reg_t sfc_usage;
350 u32 sfc_usage_bit;
351 u32 sfc_reset_bit;
352 int ret;
353
354 switch (engine->class) {
355 case VIDEO_DECODE_CLASS1:
356 if ((BIT(engine->instance)(1UL << (engine->instance)) & vdbox_sfc_access) == 0)
357 return 0;
358
359 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x88C) }
)
;
360 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT(1 << 0);
361
362 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x890) }
)
;
363 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT(1 << 1);
364
365 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x890) }
)
;
366 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT(1 << 0);
367 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance)((1 << 17) << ((engine->instance) >> 1));
368 break;
369
370 case VIDEO_ENHANCEMENT_CLASS2:
371 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x201C)
})
;
372 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT(1 << 0);
373
374 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x2018)
})
;
375 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT(1 << 0);
376
377 sfc_usage = GEN11_VECS_SFC_USAGE(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x2014)
})
;
378 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT(1 << 0);
379 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance)((1 << 17) << (engine->instance));
380 break;
381
382 default:
383 return 0;
384 }
385
386 /*
387 * If the engine is using a SFC, tell the engine that a software reset
388 * is going to happen. The engine will then try to force lock the SFC.
389 * If SFC ends up being locked to the engine we want to reset, we have
390 * to reset it as well (we will unlock it once the reset sequence is
391 * completed).
392 */
393 if (!(intel_uncore_read_fw(uncore, sfc_usage)__raw_uncore_read32(uncore, sfc_usage) & sfc_usage_bit))
394 return 0;
395
396 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
397
398 ret = __intel_wait_for_register_fw(uncore,
399 sfc_forced_lock_ack,
400 sfc_forced_lock_ack_bit,
401 sfc_forced_lock_ack_bit,
402 1000, 0, NULL((void *)0));
403
404 /* Was the SFC released while we were trying to lock it? */
405 if (!(intel_uncore_read_fw(uncore, sfc_usage)__raw_uncore_read32(uncore, sfc_usage) & sfc_usage_bit))
406 return 0;
407
408 if (ret) {
409 drm_dbg(&engine->i915->drm,drm_dev_dbg((&engine->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for SFC forced lock ack failed\n")
410 "Wait for SFC forced lock ack failed\n")drm_dev_dbg((&engine->i915->drm)->dev, DRM_UT_DRIVER
, "Wait for SFC forced lock ack failed\n")
;
411 return ret;
412 }
413
414 *hw_mask |= sfc_reset_bit;
415 return 0;
416}
417
418static void gen11_unlock_sfc(struct intel_engine_cs *engine)
419{
420 struct intel_uncore *uncore = engine->uncore;
421 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
422 i915_reg_t sfc_forced_lock;
423 u32 sfc_forced_lock_bit;
424
425 switch (engine->class) {
426 case VIDEO_DECODE_CLASS1:
427 if ((BIT(engine->instance)(1UL << (engine->instance)) & vdbox_sfc_access) == 0)
428 return;
429
430 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x88C) }
)
;
431 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT(1 << 0);
432 break;
433
434 case VIDEO_ENHANCEMENT_CLASS2:
435 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine)((const i915_reg_t){ .reg = ((engine)->mmio_base + 0x201C)
})
;
436 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT(1 << 0);
437 break;
438
439 default:
440 return;
441 }
442
443 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
444}
445
446static int gen11_reset_engines(struct intel_gt *gt,
447 intel_engine_mask_t engine_mask,
448 unsigned int retry)
449{
450 static const u32 hw_engine_mask[] = {
451 [RCS0] = GEN11_GRDOM_RENDER(1 << 1),
452 [BCS0] = GEN11_GRDOM_BLT(1 << 2),
453 [VCS0] = GEN11_GRDOM_MEDIA(1 << 5),
454 [VCS1] = GEN11_GRDOM_MEDIA2(1 << 6),
455 [VCS2] = GEN11_GRDOM_MEDIA3(1 << 7),
456 [VCS3] = GEN11_GRDOM_MEDIA4(1 << 8),
457 [VECS0] = GEN11_GRDOM_VECS(1 << 13),
458 [VECS1] = GEN11_GRDOM_VECS2(1 << 14),
459 };
460 struct intel_engine_cs *engine;
461 intel_engine_mask_t tmp;
462 u32 hw_mask;
463 int ret;
464
465 if (engine_mask == ALL_ENGINES((intel_engine_mask_t)~0ul)) {
466 hw_mask = GEN11_GRDOM_FULL(1 << 0);
467 } else {
468 hw_mask = 0;
469 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
{
470 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask))((void)0);
471 hw_mask |= hw_engine_mask[engine->id];
472 ret = gen11_lock_sfc(engine, &hw_mask);
473 if (ret)
474 goto sfc_unlock;
475 }
476 }
477
478 ret = gen6_hw_domain_reset(gt, hw_mask);
479
480sfc_unlock:
481 /*
482 * We unlock the SFC based on the lock status and not the result of
483 * gen11_lock_sfc to make sure that we clean properly if something
484 * wrong happened during the lock (e.g. lock acquired after timeout
485 * expiration).
486 */
487 if (engine_mask != ALL_ENGINES((intel_engine_mask_t)~0ul))
488 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
489 gen11_unlock_sfc(engine);
490
491 return ret;
492}
493
494static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
495{
496 struct intel_uncore *uncore = engine->uncore;
497 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0xd0) }
)
;
498 u32 request, mask, ack;
499 int ret;
500
501 ack = intel_uncore_read_fw(uncore, reg)__raw_uncore_read32(uncore, reg);
502 if (ack & RESET_CTL_CAT_ERROR((u32)((1UL << (2)) + 0))) {
503 /*
504 * For catastrophic errors, ready-for-reset sequence
505 * needs to be bypassed: HAS#396813
506 */
507 request = RESET_CTL_CAT_ERROR((u32)((1UL << (2)) + 0));
508 mask = RESET_CTL_CAT_ERROR((u32)((1UL << (2)) + 0));
509
510 /* Catastrophic errors need to be cleared by HW */
511 ack = 0;
512 } else if (!(ack & RESET_CTL_READY_TO_RESET((u32)((1UL << (1)) + 0)))) {
513 request = RESET_CTL_REQUEST_RESET((u32)((1UL << (0)) + 0));
514 mask = RESET_CTL_READY_TO_RESET((u32)((1UL << (1)) + 0));
515 ack = RESET_CTL_READY_TO_RESET((u32)((1UL << (1)) + 0));
516 } else {
517 return 0;
518 }
519
520 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request))__raw_uncore_write32(uncore, reg, ({ typeof(request) _a = (request
); ({ if (__builtin_constant_p(_a)) do { } while (0); if (__builtin_constant_p
(_a)) do { } while (0); if (__builtin_constant_p(_a) &&
__builtin_constant_p(_a)) do { } while (0); ((_a) << 16
| (_a)); }); }))
;
521 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
522 700, 0, NULL((void *)0));
523 if (ret)
524 drm_err(&engine->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, request, __raw_uncore_read32(uncore, reg))
525 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, request, __raw_uncore_read32(uncore, reg))
526 engine->name, request,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, request, __raw_uncore_read32(uncore, reg))
527 intel_uncore_read_fw(uncore, reg))printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, request, __raw_uncore_read32(uncore, reg))
;
528
529 return ret;
530}
531
532static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
533{
534 intel_uncore_write_fw(engine->uncore,__raw_uncore_write32(engine->uncore, ((const i915_reg_t){ .
reg = ((engine->mmio_base) + 0xd0) }), (({ if (__builtin_constant_p
((((u32)((1UL << (0)) + 0))))) do { } while (0); if (__builtin_constant_p
(0)) do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (0)) + 0)))) << 16 | (0))
; })))
535 RING_RESET_CTL(engine->mmio_base),__raw_uncore_write32(engine->uncore, ((const i915_reg_t){ .
reg = ((engine->mmio_base) + 0xd0) }), (({ if (__builtin_constant_p
((((u32)((1UL << (0)) + 0))))) do { } while (0); if (__builtin_constant_p
(0)) do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (0)) + 0)))) << 16 | (0))
; })))
536 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))__raw_uncore_write32(engine->uncore, ((const i915_reg_t){ .
reg = ((engine->mmio_base) + 0xd0) }), (({ if (__builtin_constant_p
((((u32)((1UL << (0)) + 0))))) do { } while (0); if (__builtin_constant_p
(0)) do { } while (0); if (__builtin_constant_p((((u32)((1UL <<
(0)) + 0)))) && __builtin_constant_p(0)) do { } while
(0); (((((u32)((1UL << (0)) + 0)))) << 16 | (0))
; })))
;
537}
538
539static int gen8_reset_engines(struct intel_gt *gt,
540 intel_engine_mask_t engine_mask,
541 unsigned int retry)
542{
543 struct intel_engine_cs *engine;
544 const bool_Bool reset_non_ready = retry >= 1;
545 intel_engine_mask_t tmp;
546 int ret;
547
548 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
{
549 ret = gen8_engine_reset_prepare(engine);
550 if (ret && !reset_non_ready)
551 goto skip_reset;
552
553 /*
554 * If this is not the first failed attempt to prepare,
555 * we decide to proceed anyway.
556 *
557 * By doing so we risk context corruption and with
558 * some gens (kbl), possible system hang if reset
559 * happens during active bb execution.
560 *
561 * We rather take context corruption instead of
562 * failed reset with a wedged driver/gpu. And
563 * active bb execution case should be covered by
564 * stop_engines() we have before the reset.
565 */
566 }
567
568 if (INTEL_GEN(gt->i915)((&(gt->i915)->__info)->gen) >= 11)
569 ret = gen11_reset_engines(gt, engine_mask, retry);
570 else
571 ret = gen6_reset_engines(gt, engine_mask, retry);
572
573skip_reset:
574 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
575 gen8_engine_reset_cancel(engine);
576
577 return ret;
578}
579
580static int mock_reset(struct intel_gt *gt,
581 intel_engine_mask_t mask,
582 unsigned int retry)
583{
584 return 0;
585}
586
587typedef int (*reset_func)(struct intel_gt *,
588 intel_engine_mask_t engine_mask,
589 unsigned int retry);
590
591static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
592{
593 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
594
595 if (is_mock_gt(gt))
596 return mock_reset;
597 else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8)
598 return gen8_reset_engines;
599 else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 6)
600 return gen6_reset_engines;
601 else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 5)
602 return ilk_do_reset;
603 else if (IS_G4X(i915)(IS_PLATFORM(i915, INTEL_G45) || IS_PLATFORM(i915, INTEL_GM45
))
)
604 return g4x_do_reset;
605 else if (IS_G33(i915)IS_PLATFORM(i915, INTEL_G33) || IS_PINEVIEW(i915)IS_PLATFORM(i915, INTEL_PINEVIEW))
606 return g33_do_reset;
607 else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 3)
608 return i915_do_reset;
609 else
610 return NULL((void *)0);
611}
612
613int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
614{
615 const int retries = engine_mask == ALL_ENGINES((intel_engine_mask_t)~0ul) ? RESET_MAX_RETRIES3 : 1;
616 reset_func reset;
617 int ret = -ETIMEDOUT60;
618 int retry;
619
620 reset = intel_get_gpu_reset(gt);
621 if (!reset)
622 return -ENODEV19;
623
624 /*
625 * If the power well sleeps during the reset, the reset
626 * request may be dropped and never completes (causing -EIO).
627 */
628 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
629 for (retry = 0; ret == -ETIMEDOUT60 && retry < retries; retry++) {
630 GT_TRACE(gt, "engine_mask=%x\n", engine_mask)do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
631 preempt_disable();
632 ret = reset(gt, engine_mask, retry);
633 preempt_enable();
634 }
635 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
636
637 return ret;
638}
639
640bool_Bool intel_has_gpu_reset(const struct intel_gt *gt)
641{
642 if (!gt->i915->params.reset)
643 return NULL((void *)0);
644
645 return intel_get_gpu_reset(gt);
646}
647
648bool_Bool intel_has_reset_engine(const struct intel_gt *gt)
649{
650 if (gt->i915->params.reset < 2)
651 return false0;
652
653 return INTEL_INFO(gt->i915)(&(gt->i915)->__info)->has_reset_engine;
654}
655
656int intel_reset_guc(struct intel_gt *gt)
657{
658 u32 guc_domain =
659 INTEL_GEN(gt->i915)((&(gt->i915)->__info)->gen) >= 11 ? GEN11_GRDOM_GUC(1 << 3) : GEN9_GRDOM_GUC(1 << 5);
660 int ret;
661
662 GEM_BUG_ON(!HAS_GT_UC(gt->i915))((void)0);
663
664 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
665 ret = gen6_hw_domain_reset(gt, guc_domain);
666 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
667
668 return ret;
669}
670
671/*
672 * Ensure irq handler finishes, and not run again.
673 * Also return the active request so that we only search for it once.
674 */
675static void reset_prepare_engine(struct intel_engine_cs *engine)
676{
677 /*
678 * During the reset sequence, we must prevent the engine from
679 * entering RC6. As the context state is undefined until we restart
680 * the engine, if it does enter RC6 during the reset, the state
681 * written to the powercontext is undefined and so we may lose
682 * GPU state upon resume, i.e. fail to restart after a reset.
683 */
684 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
685 if (engine->reset.prepare)
686 engine->reset.prepare(engine);
687}
688
689static void revoke_mmaps(struct intel_gt *gt)
690{
691 int i;
692
693 for (i = 0; i < gt->ggtt->num_fences; i++) {
694 struct drm_vma_offset_node *node;
695 struct i915_vma *vma;
696 u64 vma_offset;
697
698 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma)({ typeof(gt->ggtt->fence_regs[i].vma) __tmp = *(volatile
typeof(gt->ggtt->fence_regs[i].vma) *)&(gt->ggtt
->fence_regs[i].vma); membar_datadep_consumer(); __tmp; })
;
699 if (!vma)
700 continue;
701
702 if (!i915_vma_has_userfault(vma))
703 continue;
704
705 GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i])((void)0);
706
707 if (!vma->mmo)
708 continue;
709
710 node = &vma->mmo->vma_node;
Value stored to 'node' is never read
711 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT12;
712
713#ifdef __linux__
714 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
715 drm_vma_node_offset_addr(node) + vma_offset,
716 vma->size,
717 1);
718#else
719{
720 struct drm_i915_privateinteldrm_softc *dev_priv = vma->obj->base.dev->dev_private;
721 struct vm_page *pg;
722
723 for (pg = &dev_priv->pgs[atop(vma->node.start)((vma->node.start) >> 12)];
724 pg != &dev_priv->pgs[atop(vma->node.start + vma->size)((vma->node.start + vma->size) >> 12)];
725 pg++)
726 pmap_page_protect(pg, PROT_NONE0x00);
727}
728#endif
729 }
730}
731
732static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
733{
734 struct intel_engine_cs *engine;
735 intel_engine_mask_t awake = 0;
736 enum intel_engine_id id;
737
738 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
739 if (intel_engine_pm_get_if_awake(engine))
740 awake |= engine->mask;
741 reset_prepare_engine(engine);
742 }
743
744 intel_uc_reset_prepare(&gt->uc);
745
746 return awake;
747}
748
749static void gt_revoke(struct intel_gt *gt)
750{
751 revoke_mmaps(gt);
752}
753
754static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
755{
756 struct intel_engine_cs *engine;
757 enum intel_engine_id id;
758 int err;
759
760 /*
761 * Everything depends on having the GTT running, so we need to start
762 * there.
763 */
764 err = i915_ggtt_enable_hw(gt->i915);
765 if (err)
766 return err;
767
768 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
769 __intel_engine_reset(engine, stalled_mask & engine->mask);
770
771 intel_ggtt_restore_fences(gt->ggtt);
772
773 return err;
774}
775
776static void reset_finish_engine(struct intel_engine_cs *engine)
777{
778 if (engine->reset.finish)
779 engine->reset.finish(engine);
780 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
781
782 intel_engine_signal_breadcrumbs(engine);
783}
784
785static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
786{
787 struct intel_engine_cs *engine;
788 enum intel_engine_id id;
789
790 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
791 reset_finish_engine(engine);
792 if (awake & engine->mask)
793 intel_engine_pm_put(engine);
794 }
795}
796
797static void nop_submit_request(struct i915_request *request)
798{
799 struct intel_engine_cs *engine = request->engine;
800 unsigned long flags;
801
802 RQ_TRACE(request, "-EIO\n")do { const struct i915_request *rq__ = (request); do { const struct
intel_engine_cs *e__ __attribute__((__unused__)) = (rq__->
engine); do { } while (0); } while (0); } while (0)
;
803 i915_request_set_error_once(request, -EIO5);
804
805 spin_lock_irqsave(&engine->active.lock, flags)do { flags = 0; mtx_enter(&engine->active.lock); } while
(0)
;
806 __i915_request_submit(request);
807 i915_request_mark_complete(request);
808 spin_unlock_irqrestore(&engine->active.lock, flags)do { (void)(flags); mtx_leave(&engine->active.lock); }
while (0)
;
809
810 intel_engine_signal_breadcrumbs(engine);
811}
812
813static void __intel_gt_set_wedged(struct intel_gt *gt)
814{
815 struct intel_engine_cs *engine;
816 intel_engine_mask_t awake;
817 enum intel_engine_id id;
818
819 if (test_bit(I915_WEDGED(64 - 1), &gt->reset.flags))
820 return;
821
822 GT_TRACE(gt, "start\n")do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
823
824 /*
825 * First, stop submission to hw, but do not yet complete requests by
826 * rolling the global seqno forward (since this would complete requests
827 * for which we haven't set the fence error to EIO yet).
828 */
829 awake = reset_prepare(gt);
830
831 /* Even if the GPU reset fails, it should still stop the engines */
832 if (!INTEL_INFO(gt->i915)(&(gt->i915)->__info)->gpu_reset_clobbers_display)
833 __intel_gt_reset(gt, ALL_ENGINES((intel_engine_mask_t)~0ul));
834
835 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
836 engine->submit_request = nop_submit_request;
837
838 /*
839 * Make sure no request can slip through without getting completed by
840 * either this call here to intel_engine_write_global_seqno, or the one
841 * in nop_submit_request.
842 */
843 synchronize_rcu_expedited();
844 set_bit(I915_WEDGED(64 - 1), &gt->reset.flags);
845
846 /* Mark all executing requests as skipped */
847 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
848 if (engine->reset.cancel)
849 engine->reset.cancel(engine);
850
851 reset_finish(gt, awake);
852
853 GT_TRACE(gt, "end\n")do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
854}
855
856void intel_gt_set_wedged(struct intel_gt *gt)
857{
858 intel_wakeref_t wakeref;
859
860 if (test_bit(I915_WEDGED(64 - 1), &gt->reset.flags))
861 return;
862
863 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
864 mutex_lock(&gt->reset.mutex)rw_enter_write(&gt->reset.mutex);
865
866 if (GEM_SHOW_DEBUG()(0)) {
867 struct drm_printer p = drm_debug_printer(__func__);
868 struct intel_engine_cs *engine;
869 enum intel_engine_id id;
870
871 drm_printf(&p, "called from %pS\n", (void *)_RET_IP___builtin_return_address(0));
872 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
873 if (intel_engine_is_idle(engine))
874 continue;
875
876 intel_engine_dump(engine, &p, "%s\n", engine->name);
877 }
878 }
879
880 __intel_gt_set_wedged(gt);
881
882 mutex_unlock(&gt->reset.mutex)rw_exit_write(&gt->reset.mutex);
883 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
884}
885
886static bool_Bool __intel_gt_unset_wedged(struct intel_gt *gt)
887{
888 struct intel_gt_timelines *timelines = &gt->timelines;
889 struct intel_timeline *tl;
890 bool_Bool ok;
891
892 if (!test_bit(I915_WEDGED(64 - 1), &gt->reset.flags))
893 return true1;
894
895 /* Never fully initialised, recovery impossible */
896 if (intel_gt_has_unrecoverable_error(gt))
897 return false0;
898
899 GT_TRACE(gt, "start\n")do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
900
901 /*
902 * Before unwedging, make sure that all pending operations
903 * are flushed and errored out - we may have requests waiting upon
904 * third party fences. We marked all inflight requests as EIO, and
905 * every execbuf since returned EIO, for consistency we want all
906 * the currently pending requests to also be marked as EIO, which
907 * is done inside our nop_submit_request - and so we must wait.
908 *
909 * No more can be submitted until we reset the wedged bit.
910 */
911 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
912 list_for_each_entry(tl, &timelines->active_list, link)for (tl = ({ const __typeof( ((__typeof(*tl) *)0)->link ) *
__mptr = ((&timelines->active_list)->next); (__typeof
(*tl) *)( (char *)__mptr - __builtin_offsetof(__typeof(*tl), link
) );}); &tl->link != (&timelines->active_list);
tl = ({ const __typeof( ((__typeof(*tl) *)0)->link ) *__mptr
= (tl->link.next); (__typeof(*tl) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*tl), link) );}))
{
913 struct dma_fence *fence;
914
915 fence = i915_active_fence_get(&tl->last_request);
916 if (!fence)
917 continue;
918
919 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
920
921 /*
922 * All internal dependencies (i915_requests) will have
923 * been flushed by the set-wedge, but we may be stuck waiting
924 * for external fences. These should all be capped to 10s
925 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
926 * in the worst case.
927 */
928 dma_fence_default_wait(fence, false0, MAX_SCHEDULE_TIMEOUT(0x7fffffff));
929 dma_fence_put(fence);
930
931 /* Restart iteration after droping lock */
932 spin_lock(&timelines->lock)mtx_enter(&timelines->lock);
933 tl = list_entry(&timelines->active_list, typeof(*tl), link)({ const __typeof( ((typeof(*tl) *)0)->link ) *__mptr = (&
timelines->active_list); (typeof(*tl) *)( (char *)__mptr -
__builtin_offsetof(typeof(*tl), link) );})
;
934 }
935 spin_unlock(&timelines->lock)mtx_leave(&timelines->lock);
936
937 /* We must reset pending GPU events before restoring our submission */
938 ok = !HAS_EXECLISTS(gt->i915)((&(gt->i915)->__info)->has_logical_ring_contexts
)
; /* XXX better agnosticism desired */
939 if (!INTEL_INFO(gt->i915)(&(gt->i915)->__info)->gpu_reset_clobbers_display)
940 ok = __intel_gt_reset(gt, ALL_ENGINES((intel_engine_mask_t)~0ul)) == 0;
941 if (!ok) {
942 /*
943 * Warn CI about the unrecoverable wedged condition.
944 * Time for a reboot.
945 */
946 add_taint_for_CI(gt->i915, TAINT_WARN1);
947 return false0;
948 }
949
950 /*
951 * Undo nop_submit_request. We prevent all new i915 requests from
952 * being queued (by disallowing execbuf whilst wedged) so having
953 * waited for all active requests above, we know the system is idle
954 * and do not have to worry about a thread being inside
955 * engine->submit_request() as we swap over. So unlike installing
956 * the nop_submit_request on reset, we can do this from normal
957 * context and do not require stop_machine().
958 */
959 intel_engines_reset_default_submission(gt);
960
961 GT_TRACE(gt, "end\n")do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
962
963 smp_mb__before_atomic()do { } while (0); /* complete takeover before enabling execbuf */
964 clear_bit(I915_WEDGED(64 - 1), &gt->reset.flags);
965
966 return true1;
967}
968
969bool_Bool intel_gt_unset_wedged(struct intel_gt *gt)
970{
971 bool_Bool result;
972
973 mutex_lock(&gt->reset.mutex)rw_enter_write(&gt->reset.mutex);
974 result = __intel_gt_unset_wedged(gt);
975 mutex_unlock(&gt->reset.mutex)rw_exit_write(&gt->reset.mutex);
976
977 return result;
978}
979
980static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
981{
982 int err, i;
983
984 gt_revoke(gt);
985
986 err = __intel_gt_reset(gt, ALL_ENGINES((intel_engine_mask_t)~0ul));
987 for (i = 0; err && i < RESET_MAX_RETRIES3; i++) {
988 drm_msleep(10 * (i + 1))mdelay(10 * (i + 1));
989 err = __intel_gt_reset(gt, ALL_ENGINES((intel_engine_mask_t)~0ul));
990 }
991 if (err)
992 return err;
993
994 return gt_reset(gt, stalled_mask);
995}
996
997static int resume(struct intel_gt *gt)
998{
999 struct intel_engine_cs *engine;
1000 enum intel_engine_id id;
1001 int ret;
1002
1003 for_each_engine(engine, gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt)->engine[(id)])) {} else
{
1004 ret = intel_engine_resume(engine);
1005 if (ret)
1006 return ret;
1007 }
1008
1009 return 0;
1010}
1011
1012/**
1013 * intel_gt_reset - reset chip after a hang
1014 * @gt: #intel_gt to reset
1015 * @stalled_mask: mask of the stalled engines with the guilty requests
1016 * @reason: user error message for why we are resetting
1017 *
1018 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1019 * on failure.
1020 *
1021 * Procedure is fairly simple:
1022 * - reset the chip using the reset reg
1023 * - re-init context state
1024 * - re-init hardware status page
1025 * - re-init ring buffer
1026 * - re-init interrupt state
1027 * - re-init display
1028 */
1029void intel_gt_reset(struct intel_gt *gt,
1030 intel_engine_mask_t stalled_mask,
1031 const char *reason)
1032{
1033 intel_engine_mask_t awake;
1034 int ret;
1035
1036 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags)do { const struct intel_gt *gt__ __attribute__((__unused__)) =
(gt); do { } while (0); } while (0)
;
1037
1038 might_sleep()assertwaitok();
1039 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))((void)0);
1040 mutex_lock(&gt->reset.mutex)rw_enter_write(&gt->reset.mutex);
1041
1042 /* Clear any previous failed attempts at recovery. Time to try again. */
1043 if (!__intel_gt_unset_wedged(gt))
1044 goto unlock;
1045
1046 if (reason)
1047 drm_notice(&gt->i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "Resetting chip for %s\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , reason
)
1048 "Resetting chip for %s\n", reason)printf("drm:pid%d:%s *NOTICE* " "[drm] " "Resetting chip for %s\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , reason
)
;
1049 atomic_inc(&gt->i915->gpu_error.reset_count)__sync_fetch_and_add(&gt->i915->gpu_error.reset_count
, 1)
;
1050
1051 awake = reset_prepare(gt);
1052
1053 if (!intel_has_gpu_reset(gt)) {
1054 if (gt->i915->params.reset)
1055 drm_err(&gt->i915->drm, "GPU reset not supported\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "GPU reset not supported\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1056 else
1057 drm_dbg(&gt->i915->drm, "GPU reset disabled\n")drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "GPU reset disabled\n")
;
1058 goto error;
1059 }
1060
1061 if (INTEL_INFO(gt->i915)(&(gt->i915)->__info)->gpu_reset_clobbers_display)
1062 intel_runtime_pm_disable_interrupts(gt->i915);
1063
1064 if (do_reset(gt, stalled_mask)) {
1065 drm_err(&gt->i915->drm, "Failed to reset chip\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to reset chip\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1066 goto taint;
1067 }
1068
1069 if (INTEL_INFO(gt->i915)(&(gt->i915)->__info)->gpu_reset_clobbers_display)
1070 intel_runtime_pm_enable_interrupts(gt->i915);
1071
1072 intel_overlay_reset(gt->i915);
1073
1074 /*
1075 * Next we need to restore the context, but we don't use those
1076 * yet either...
1077 *
1078 * Ring buffer needs to be re-initialized in the KMS case, or if X
1079 * was running at the time of the reset (i.e. we weren't VT
1080 * switched away).
1081 */
1082 ret = intel_gt_init_hw(gt);
1083 if (ret) {
1084 drm_err(&gt->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to initialise HW following reset (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ret)
1085 "Failed to initialise HW following reset (%d)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to initialise HW following reset (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ret)
1086 ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to initialise HW following reset (%d)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , ret)
;
1087 goto taint;
1088 }
1089
1090 ret = resume(gt);
1091 if (ret)
1092 goto taint;
1093
1094finish:
1095 reset_finish(gt, awake);
1096unlock:
1097 mutex_unlock(&gt->reset.mutex)rw_exit_write(&gt->reset.mutex);
1098 return;
1099
1100taint:
1101 /*
1102 * History tells us that if we cannot reset the GPU now, we
1103 * never will. This then impacts everything that is run
1104 * subsequently. On failing the reset, we mark the driver
1105 * as wedged, preventing further execution on the GPU.
1106 * We also want to go one step further and add a taint to the
1107 * kernel so that any subsequent faults can be traced back to
1108 * this failure. This is important for CI, where if the
1109 * GPU/driver fails we would like to reboot and restart testing
1110 * rather than continue on into oblivion. For everyone else,
1111 * the system should still plod along, but they have been warned!
1112 */
1113 add_taint_for_CI(gt->i915, TAINT_WARN1);
1114error:
1115 __intel_gt_set_wedged(gt);
1116 goto finish;
1117}
1118
1119static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1120{
1121 return __intel_gt_reset(engine->gt, engine->mask);
1122}
1123
1124/**
1125 * intel_engine_reset - reset GPU engine to recover from a hang
1126 * @engine: engine to reset
1127 * @msg: reason for GPU reset; or NULL for no drm_notice()
1128 *
1129 * Reset a specific GPU engine. Useful if a hang is detected.
1130 * Returns zero on successful reset or otherwise an error code.
1131 *
1132 * Procedure is:
1133 * - identifies the request that caused the hang and it is dropped
1134 * - reset engine (which will force the engine to idle)
1135 * - re-init/configure engine
1136 */
1137int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1138{
1139 struct intel_gt *gt = engine->gt;
1140 bool_Bool uses_guc = intel_engine_in_guc_submission_mode(engine);
1141 int ret;
1142
1143 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags)do { const struct intel_engine_cs *e__ __attribute__((__unused__
)) = (engine); do { } while (0); } while (0)
;
1144 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags))((void)0);
1145
1146 if (!intel_engine_pm_get_if_awake(engine))
1147 return 0;
1148
1149 reset_prepare_engine(engine);
1150
1151 if (msg)
1152 drm_notice(&engine->i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "Resetting %s for %s\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, msg)
1153 "Resetting %s for %s\n", engine->name, msg)printf("drm:pid%d:%s *NOTICE* " "[drm] " "Resetting %s for %s\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , engine
->name, msg)
;
1154 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class])__sync_fetch_and_add(&engine->i915->gpu_error.reset_engine_count
[engine->uabi_class], 1)
;
1155
1156 if (!uses_guc)
1157 ret = intel_gt_reset_engine(engine);
1158 else
1159 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1160 if (ret) {
1161 /* If we fail here, we expect to fallback to a global reset */
1162 drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "%sFailed to reset %s, ret=%d\n", uses_guc ? "GuC " : "", engine
->name, ret)
1163 uses_guc ? "GuC " : "", engine->name, ret)drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "%sFailed to reset %s, ret=%d\n", uses_guc ? "GuC " : "", engine
->name, ret)
;
1164 goto out;
1165 }
1166
1167 /*
1168 * The request that caused the hang is stuck on elsp, we know the
1169 * active request and can drop it, adjust head to skip the offending
1170 * request to resume executing remaining requests in the queue.
1171 */
1172 __intel_engine_reset(engine, true1);
1173
1174 /*
1175 * The engine and its registers (and workarounds in case of render)
1176 * have been reset to their default values. Follow the init_ring
1177 * process to program RING_MODE, HWSP and re-enable submission.
1178 */
1179 ret = intel_engine_resume(engine);
1180
1181out:
1182 intel_engine_cancel_stop_cs(engine);
1183 reset_finish_engine(engine);
1184 intel_engine_pm_put_async(engine);
1185 return ret;
1186}
1187
1188static void intel_gt_reset_global(struct intel_gt *gt,
1189 u32 engine_mask,
1190 const char *reason)
1191{
1192#ifdef notyet
1193 struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1194 char *error_event[] = { I915_ERROR_UEVENT"ERROR" "=1", NULL((void *)0) };
1195 char *reset_event[] = { I915_RESET_UEVENT"RESET" "=1", NULL((void *)0) };
1196 char *reset_done_event[] = { I915_ERROR_UEVENT"ERROR" "=0", NULL((void *)0) };
1197#endif
1198 struct intel_wedge_me w;
1199
1200 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1201
1202 drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask)drm_dev_dbg((&gt->i915->drm)->dev, DRM_UT_DRIVER
, "resetting chip, engines=%x\n", engine_mask)
;
1203 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1204
1205 /* Use a watchdog to ensure that our reset completes */
1206 intel_wedge_on_timeout(&w, gt, 5 * HZ)for (__intel_init_wedge((&w), (gt), (5 * hz), __func__); (
&w)->gt; __intel_fini_wedge((&w)))
{
1207 intel_prepare_reset(gt->i915);
1208
1209 /* Flush everyone using a resource about to be clobbered */
1210 synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1211
1212 intel_gt_reset(gt, engine_mask, reason);
1213
1214 intel_finish_reset(gt->i915);
1215 }
1216
1217 if (!test_bit(I915_WEDGED(64 - 1), &gt->reset.flags))
1218 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1219}
1220
1221/**
1222 * intel_gt_handle_error - handle a gpu error
1223 * @gt: the intel_gt
1224 * @engine_mask: mask representing engines that are hung
1225 * @flags: control flags
1226 * @fmt: Error message format string
1227 *
1228 * Do some basic checking of register state at error time and
1229 * dump it to the syslog. Also call i915_capture_error_state() to make
1230 * sure we get a record and make it available in debugfs. Fire a uevent
1231 * so userspace knows something bad happened (should trigger collection
1232 * of a ring dump etc.).
1233 */
1234void intel_gt_handle_error(struct intel_gt *gt,
1235 intel_engine_mask_t engine_mask,
1236 unsigned long flags,
1237 const char *fmt, ...)
1238{
1239 struct intel_engine_cs *engine;
1240 intel_wakeref_t wakeref;
1241 intel_engine_mask_t tmp;
1242 char error_msg[80];
1243 char *msg = NULL((void *)0);
1244
1245 if (fmt) {
1246 va_list args;
1247
1248 va_start(args, fmt)__builtin_va_start((args), fmt);
1249 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1250 va_end(args)__builtin_va_end((args));
1251
1252 msg = error_msg;
1253 }
1254
1255 /*
1256 * In most cases it's guaranteed that we get here with an RPM
1257 * reference held, for example because there is a pending GPU
1258 * request that won't finish until the reset is done. This
1259 * isn't the case at least when we get here by doing a
1260 * simulated reset via debugfs, so get an RPM reference.
1261 */
1262 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1263
1264 engine_mask &= gt->info.engine_mask;
1265
1266 if (flags & I915_ERROR_CAPTURE(1UL << (0))) {
1267 i915_capture_error_state(gt->i915);
1268 intel_gt_clear_error_registers(gt, engine_mask);
1269 }
1270
1271 /*
1272 * Try engine reset when available. We fall back to full reset if
1273 * single reset fails.
1274 */
1275 if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1276 for_each_engine_masked(engine, gt, engine_mask, tmp)for ((tmp) = (engine_mask) & (gt)->info.engine_mask; (
tmp) ? ((engine) = (gt)->engine[({ int __idx = ffs(tmp) - 1
; tmp &= ~(1UL << (__idx)); __idx; })]), 1 : 0;)
{
1277 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE)extern char _ctassert[(!(1 >= 2)) ? 1 : -1 ] __attribute__
((__unused__))
;
1278 if (test_and_set_bit(I915_RESET_ENGINE2 + engine->id,
1279 &gt->reset.flags))
1280 continue;
1281
1282 if (intel_engine_reset(engine, msg) == 0)
1283 engine_mask &= ~engine->mask;
1284
1285 clear_and_wake_up_bit(I915_RESET_ENGINE2 + engine->id,
1286 &gt->reset.flags);
1287 }
1288 }
1289
1290 if (!engine_mask)
1291 goto out;
1292
1293 /* Full reset needs the mutex, stop any other user trying to do so. */
1294 if (test_and_set_bit(I915_RESET_BACKOFF0, &gt->reset.flags)) {
1295 wait_event(gt->reset.queue,do { if (!(!test_bit(0, &gt->reset.flags))) ({ long ret
= 0; do { int __error; unsigned long deadline; ((!cold) ? (void
)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1296, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0, "drmweti", ret); ret = deadline - jiffies; if (__error ==
-1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); } while (0)
1296 !test_bit(I915_RESET_BACKOFF, &gt->reset.flags))do { if (!(!test_bit(0, &gt->reset.flags))) ({ long ret
= 0; do { int __error; unsigned long deadline; ((!cold) ? (void
)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1296, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0, "drmweti", ret); ret = deadline - jiffies; if (__error ==
-1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); } while (0)
;
1297 goto out; /* piggy-back on the other reset */
1298 }
1299
1300 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1301 synchronize_rcu_expedited();
1302
1303 /* Prevent any other reset-engine attempt. */
1304 for_each_engine(engine, gt, tmp)for ((tmp) = 0; (tmp) < I915_NUM_ENGINES; (tmp)++) if (!((
engine) = (gt)->engine[(tmp)])) {} else
{
1305 while (test_and_set_bit(I915_RESET_ENGINE2 + engine->id,
1306 &gt->reset.flags))
1307 wait_on_bit(&gt->reset.flags,
1308 I915_RESET_ENGINE2 + engine->id,
1309 TASK_UNINTERRUPTIBLE0);
1310 }
1311
1312 intel_gt_reset_global(gt, engine_mask, msg);
1313
1314 for_each_engine(engine, gt, tmp)for ((tmp) = 0; (tmp) < I915_NUM_ENGINES; (tmp)++) if (!((
engine) = (gt)->engine[(tmp)])) {} else
1315 clear_bit_unlock(I915_RESET_ENGINE2 + engine->id,
1316 &gt->reset.flags);
1317 clear_bit_unlock(I915_RESET_BACKOFF0, &gt->reset.flags);
1318 smp_mb__after_atomic()do { } while (0);
1319 wake_up_all(&gt->reset.queue)wake_up(&gt->reset.queue);
1320
1321out:
1322 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1323}
1324
1325int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1326{
1327 might_lock(&gt->reset.backoff_srcu);
1328 might_sleep()assertwaitok();
1329
1330 rcu_read_lock();
1331 while (test_bit(I915_RESET_BACKOFF0, &gt->reset.flags)) {
1332 rcu_read_unlock();
1333
1334 if (wait_event_interruptible(gt->reset.queue,({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1336, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
1335 !test_bit(I915_RESET_BACKOFF,({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1336, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
1336 &gt->reset.flags))({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1336, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
)
1337 return -EINTR4;
1338
1339 rcu_read_lock();
1340 }
1341 *srcu = srcu_read_lock(&gt->reset.backoff_srcu)0;
1342 rcu_read_unlock();
1343
1344 return 0;
1345}
1346
1347void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1348__releases(&gt->reset.backoff_srcu)
1349{
1350 srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1351}
1352
1353int intel_gt_terminally_wedged(struct intel_gt *gt)
1354{
1355 might_sleep()assertwaitok();
1356
1357 if (!intel_gt_is_wedged(gt))
1358 return 0;
1359
1360 if (intel_gt_has_unrecoverable_error(gt))
1361 return -EIO5;
1362
1363 /* Reset still in progress? Maybe we will recover? */
1364 if (wait_event_interruptible(gt->reset.queue,({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1366, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
1365 !test_bit(I915_RESET_BACKOFF,({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1366, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
1366 &gt->reset.flags))({ int __ret = 0; if (!(!test_bit(0, &gt->reset.flags)
)) __ret = ({ long ret = 0; do { int __error; unsigned long deadline
; ((!cold) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/i915/gt/intel_reset.c"
, 1366, "!cold")); mtx_enter(&sch_mtx); deadline = jiffies
+ ret; __error = msleep(&gt->reset.queue, &sch_mtx
, 0x100, "drmweti", ret); ret = deadline - jiffies; if (__error
== -1 || __error == 4) { ret = -4; mtx_leave(&sch_mtx); break
; } if ((0) > 0 && (ret <= 0 || __error == 35))
{ mtx_leave(&sch_mtx); ret = ((!test_bit(0, &gt->
reset.flags))) ? 1 : 0; break; } mtx_leave(&sch_mtx); } while
(ret > 0 && !(!test_bit(0, &gt->reset.flags
))); ret; }); __ret; })
)
1367 return -EINTR4;
1368
1369 return intel_gt_is_wedged(gt) ? -EIO5 : 0;
1370}
1371
1372void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1373{
1374 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >extern char _ctassert[(!(2 + I915_NUM_ENGINES > (64 - 3)))
? 1 : -1 ] __attribute__((__unused__))
1375 I915_WEDGED_ON_INIT)extern char _ctassert[(!(2 + I915_NUM_ENGINES > (64 - 3)))
? 1 : -1 ] __attribute__((__unused__))
;
1376 intel_gt_set_wedged(gt);
1377 set_bit(I915_WEDGED_ON_INIT(64 - 3), &gt->reset.flags);
1378
1379 /* Wedged on init is non-recoverable */
1380 add_taint_for_CI(gt->i915, TAINT_WARN1);
1381}
1382
1383void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1384{
1385 intel_gt_set_wedged(gt);
1386 set_bit(I915_WEDGED_ON_FINI(64 - 2), &gt->reset.flags);
1387}
1388
1389void intel_gt_init_reset(struct intel_gt *gt)
1390{
1391 init_waitqueue_head(&gt->reset.queue);
1392 rw_init(&gt->reset.mutex, "gtres")_rw_init_flags(&gt->reset.mutex, "gtres", 0, ((void *)
0))
;
1393 init_srcu_struct(&gt->reset.backoff_srcu);
1394
1395 /* no GPU until we are ready! */
1396 __set_bit(I915_WEDGED(64 - 1), &gt->reset.flags);
1397}
1398
1399void intel_gt_fini_reset(struct intel_gt *gt)
1400{
1401 cleanup_srcu_struct(&gt->reset.backoff_srcu);
1402}
1403
1404static void intel_wedge_me(struct work_struct *work)
1405{
1406 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work)({ const __typeof( ((typeof(*w) *)0)->work.work ) *__mptr =
(work); (typeof(*w) *)( (char *)__mptr - __builtin_offsetof(
typeof(*w), work.work) );})
;
1407
1408 drm_err(&w->gt->i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s timed out, cancelling all in-flight rendering.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , w->
name)
1409 "%s timed out, cancelling all in-flight rendering.\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s timed out, cancelling all in-flight rendering.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , w->
name)
1410 w->name)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "%s timed out, cancelling all in-flight rendering.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , w->
name)
;
1411 intel_gt_set_wedged(w->gt);
1412}
1413
1414void __intel_init_wedge(struct intel_wedge_me *w,
1415 struct intel_gt *gt,
1416 long timeout,
1417 const char *name)
1418{
1419 w->gt = gt;
1420 w->name = name;
1421
1422 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1423 schedule_delayed_work(&w->work, timeout);
1424}
1425
1426void __intel_fini_wedge(struct intel_wedge_me *w)
1427{
1428 cancel_delayed_work_sync(&w->work);
1429 destroy_delayed_work_on_stack(&w->work);
1430 w->gt = NULL((void *)0);
1431}
1432
1433#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
1434#include "selftest_reset.c"
1435#include "selftest_hangcheck.c"
1436#endif