Bug Summary

File:dev/pci/drm/i915/i915_gpu_error.c
Warning:line 1754, column 3
Value stored to 'len' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name i915_gpu_error.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_gpu_error.c
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <linux/ascii85.h>
31#include <linux/nmi.h>
32#include <linux/pagevec.h>
33#include <linux/scatterlist.h>
34#include <linux/utsname.h>
35#include <linux/zlib.h>
36
37#include <drm/drm_print.h>
38
39#include "display/intel_atomic.h"
40#include "display/intel_csr.h"
41#include "display/intel_overlay.h"
42
43#include "gem/i915_gem_context.h"
44#include "gem/i915_gem_lmem.h"
45#include "gt/intel_gt.h"
46#include "gt/intel_gt_pm.h"
47
48#include "i915_drv.h"
49#include "i915_gpu_error.h"
50#include "i915_memcpy.h"
51#include "i915_scatterlist.h"
52
53#define ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0) (GFP_KERNEL(0x0001 | 0x0004) | __GFP_RETRY_MAYFAIL0 | __GFP_NOWARN0)
54#define ATOMIC_MAYFAIL(0x0002 | 0) (GFP_ATOMIC0x0002 | __GFP_NOWARN0)
55
56static void __sg_set_buf(struct scatterlist *sg,
57 void *addr, unsigned int len, loff_t it)
58{
59 STUB()do { printf("%s: stub\n", __func__); } while(0);
60#ifdef notyet
61 sg->page_link = (unsigned long)virt_to_page(addr);
62 sg->offset = offset_in_page(addr)((vaddr_t)(addr) & ((1 << 12) - 1));
63 sg->length = len;
64 sg->dma_address = it;
65#endif
66}
67
68static bool_Bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
69{
70 STUB()do { printf("%s: stub\n", __func__); } while(0);
71 return false0;
72#ifdef notyet
73 if (!len)
74 return false0;
75
76 if (e->bytes + len + 1 <= e->size)
77 return true1;
78
79 if (e->bytes) {
80 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
81 e->iter += e->bytes;
82 e->buf = NULL((void *)0);
83 e->bytes = 0;
84 }
85
86 if (e->cur == e->end) {
87 struct scatterlist *sgl;
88
89 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
90 if (!sgl) {
91 e->err = -ENOMEM12;
92 return false0;
93 }
94
95 if (e->cur) {
96 e->cur->offset = 0;
97 e->cur->length = 0;
98 e->cur->page_link =
99 (unsigned long)sgl | SG_CHAIN;
100 } else {
101 e->sgl = sgl;
102 }
103
104 e->cur = sgl;
105 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
106 }
107
108 e->size = roundup2(len + 1, SZ_64K)(((len + 1) + (((1024 * 64)) - 1)) & (~((__typeof(len + 1
))((1024 * 64)) - 1)))
;
109 e->buf = kmalloc(e->size, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
110 if (!e->buf) {
111 e->size = PAGE_ALIGN(len + 1)(((len + 1) + ((1 << 12) - 1)) & ~((1 << 12) -
1))
;
112 e->buf = kmalloc(e->size, GFP_KERNEL(0x0001 | 0x0004));
113 }
114 if (!e->buf) {
115 e->err = -ENOMEM12;
116 return false0;
117 }
118
119 return true1;
120#endif
121}
122
123__printf(2, 0)__attribute__((__format__(__kprintf__,2,0)))
124static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
125 const char *fmt, va_list args)
126{
127 va_list ap;
128 int len;
129
130 if (e->err)
131 return;
132
133 va_copy(ap, args)__builtin_va_copy(((ap)),((args)));
134 len = vsnprintf(NULL((void *)0), 0, fmt, ap);
135 va_end(ap)__builtin_va_end((ap));
136 if (len <= 0) {
137 e->err = len;
138 return;
139 }
140
141 if (!__i915_error_grow(e, len))
142 return;
143
144 GEM_BUG_ON(e->bytes >= e->size)((void)0);
145 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
146 if (len < 0) {
147 e->err = len;
148 return;
149 }
150 e->bytes += len;
151}
152
153static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
154{
155 unsigned len;
156
157 if (e->err || !str)
158 return;
159
160 len = strlen(str);
161 if (!__i915_error_grow(e, len))
162 return;
163
164 GEM_BUG_ON(e->bytes + len > e->size)((void)0);
165 memcpy(e->buf + e->bytes, str, len)__builtin_memcpy((e->buf + e->bytes), (str), (len));
166 e->bytes += len;
167}
168
169#define err_printf(e, ...)i915_error_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
170#define err_puts(e, s)i915_error_puts(e, s) i915_error_puts(e, s)
171
172static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
173{
174 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
175}
176
177static inline struct drm_printer
178i915_error_printer(struct drm_i915_error_state_buf *e)
179{
180 struct drm_printer p = {
181 .printfn = __i915_printfn_error,
182 .arg = e,
183 };
184 return p;
185}
186
187/* single threaded page allocator with a reserved stash for emergencies */
188static void pool_fini(struct pagevec *pv)
189{
190 STUB()do { printf("%s: stub\n", __func__); } while(0);
191#ifdef notyet
192 pagevec_release(pv);
193#endif
194}
195
196static int pool_refill(struct pagevec *pv, gfp_t gfp)
197{
198 while (pagevec_space(pv)) {
199 struct vm_page *p;
200
201 p = alloc_page(gfp);
202 if (!p)
203 return -ENOMEM12;
204
205 pagevec_add(pv, p);
206 }
207
208 return 0;
209}
210
211static int intel_pool_init(struct pagevec *pv, gfp_t gfp)
212{
213 int err;
214
215 pagevec_init(pv);
216
217 err = pool_refill(pv, gfp);
218 if (err)
219 pool_fini(pv);
220
221 return err;
222}
223
224static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
225{
226 STUB()do { printf("%s: stub\n", __func__); } while(0);
227 return NULL((void *)0);
228#ifdef notyet
229 struct vm_page *p;
230
231 p = alloc_page(gfp);
232 if (!p && pagevec_count(pv))
233 p = pv->pages[--pv->nr];
234
235 return p ? page_address(p) : NULL((void *)0);
236#endif
237}
238
239static void pool_free(struct pagevec *pv, void *addr)
240{
241 STUB()do { printf("%s: stub\n", __func__); } while(0);
242#ifdef notyet
243 struct vm_page *p = virt_to_page(addr);
244
245 if (pagevec_space(pv))
246 pagevec_add(pv, p);
247 else
248 __free_page(p);
249#endif
250}
251
252#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
253
254struct i915_vma_compress {
255 struct pagevec pool;
256 struct z_stream_s zstream;
257 void *tmp;
258};
259
260static bool_Bool compress_init(struct i915_vma_compress *c)
261{
262 struct z_stream_s *zstream = &c->zstream;
263
264 if (intel_pool_init(&c->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0)))
265 return false0;
266
267 zstream->workspace =
268 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
269 ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
270 if (!zstream->workspace) {
271 pool_fini(&c->pool);
272 return false0;
273 }
274
275 c->tmp = NULL((void *)0);
276 if (i915_has_memcpy_from_wc()i915_memcpy_from_wc(((void *)0), ((void *)0), 0))
277 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
278
279 return true1;
280}
281
282static bool_Bool compress_start(struct i915_vma_compress *c)
283{
284 struct z_stream_s *zstream = &c->zstream;
285 void *workspace = zstream->workspace;
286
287 memset(zstream, 0, sizeof(*zstream))__builtin_memset((zstream), (0), (sizeof(*zstream)));
288 zstream->workspace = workspace;
289
290 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
291}
292
293static void *compress_next_page(struct i915_vma_compress *c,
294 struct i915_vma_coredump *dst)
295{
296 void *page;
297
298 if (dst->page_count >= dst->num_pages)
299 return ERR_PTR(-ENOSPC28);
300
301 page = pool_alloc(&c->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
302 if (!page)
303 return ERR_PTR(-ENOMEM12);
304
305 return dst->pages[dst->page_count++] = page;
306}
307
308static int compress_page(struct i915_vma_compress *c,
309 void *src,
310 struct i915_vma_coredump *dst,
311 bool_Bool wc)
312{
313 struct z_stream_s *zstream = &c->zstream;
314
315 zstream->next_in = src;
316 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE(1 << 12)))
317 zstream->next_in = c->tmp;
318 zstream->avail_in = PAGE_SIZE(1 << 12);
319
320 do {
321 if (zstream->avail_out == 0) {
322 zstream->next_out = compress_next_page(c, dst);
323 if (IS_ERR(zstream->next_out))
324 return PTR_ERR(zstream->next_out);
325
326 zstream->avail_out = PAGE_SIZE(1 << 12);
327 }
328
329 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
330 return -EIO5;
331
332 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
333 } while (zstream->avail_in);
334
335 /* Fallback to uncompressed if we increase size? */
336 if (0 && zstream->total_out > zstream->total_in)
337 return -E2BIG7;
338
339 return 0;
340}
341
342static int compress_flush(struct i915_vma_compress *c,
343 struct i915_vma_coredump *dst)
344{
345 struct z_stream_s *zstream = &c->zstream;
346
347 do {
348 switch (zlib_deflate(zstream, Z_FINISH)) {
349 case Z_OK: /* more space requested */
350 zstream->next_out = compress_next_page(c, dst);
351 if (IS_ERR(zstream->next_out))
352 return PTR_ERR(zstream->next_out);
353
354 zstream->avail_out = PAGE_SIZE(1 << 12);
355 break;
356
357 case Z_STREAM_END:
358 goto end;
359
360 default: /* any error */
361 return -EIO5;
362 }
363 } while (1);
364
365end:
366 memset(zstream->next_out, 0, zstream->avail_out)__builtin_memset((zstream->next_out), (0), (zstream->avail_out
))
;
367 dst->unused = zstream->avail_out;
368 return 0;
369}
370
371static void compress_finish(struct i915_vma_compress *c)
372{
373 zlib_deflateEnd(&c->zstream);
374}
375
376static void compress_fini(struct i915_vma_compress *c)
377{
378 kfree(c->zstream.workspace);
379 if (c->tmp)
380 pool_free(&c->pool, c->tmp);
381 pool_fini(&c->pool);
382}
383
384static void err_compression_marker(struct drm_i915_error_state_buf *m)
385{
386 err_puts(m, ":")i915_error_puts(m, ":");
387}
388
389#else
390
391struct i915_vma_compress {
392 struct pagevec pool;
393};
394
395static bool_Bool compress_init(struct i915_vma_compress *c)
396{
397 return intel_pool_init(&c->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0)) == 0;
398}
399
400static bool_Bool compress_start(struct i915_vma_compress *c)
401{
402 return true1;
403}
404
405static int compress_page(struct i915_vma_compress *c,
406 void *src,
407 struct i915_vma_coredump *dst,
408 bool_Bool wc)
409{
410 void *ptr;
411
412 ptr = pool_alloc(&c->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
413 if (!ptr)
414 return -ENOMEM12;
415
416 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE(1 << 12))))
417 memcpy(ptr, src, PAGE_SIZE)__builtin_memcpy((ptr), (src), ((1 << 12)));
418 dst->pages[dst->page_count++] = ptr;
419 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
420
421 return 0;
422}
423
424static int compress_flush(struct i915_vma_compress *c,
425 struct i915_vma_coredump *dst)
426{
427 return 0;
428}
429
430static void compress_finish(struct i915_vma_compress *c)
431{
432}
433
434static void compress_fini(struct i915_vma_compress *c)
435{
436 pool_fini(&c->pool);
437}
438
439static void err_compression_marker(struct drm_i915_error_state_buf *m)
440{
441 err_puts(m, "~")i915_error_puts(m, "~");
442}
443
444#endif
445
446static void error_print_instdone(struct drm_i915_error_state_buf *m,
447 const struct intel_engine_coredump *ee)
448{
449 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
450 int slice;
451 int subslice;
452
453 err_printf(m, " INSTDONE: 0x%08x\n",i915_error_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.
instdone)
454 ee->instdone.instdone)i915_error_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.
instdone)
;
455
456 if (ee->engine->class != RENDER_CLASS0 || INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) <= 3)
457 return;
458
459 err_printf(m, " SC_INSTDONE: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE: 0x%08x\n", ee->instdone
.slice_common)
460 ee->instdone.slice_common)i915_error_printf(m, " SC_INSTDONE: 0x%08x\n", ee->instdone
.slice_common)
;
461
462 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) <= 6)
463 return;
464
465 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)for ((slice) = 0, (subslice) = 0; (slice) < 3; (subslice) =
((subslice) + 1) % 8, (slice) += ((subslice) == 0)) if (!(((
((0 + (&(m->i915)->__info)->gen == (7)) ? 1 : ((
sseu)->slice_mask)) & (1UL << (slice)))) &&
(((0 + (&(m->i915)->__info)->gen == (7)) ? (1 &
(1UL << (subslice))) : intel_sseu_has_subslice(sseu, 0
, subslice))))) {} else
466 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
467 slice, subslice,i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
468 ee->instdone.sampler[slice][subslice])i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
;
469
470 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)for ((slice) = 0, (subslice) = 0; (slice) < 3; (subslice) =
((subslice) + 1) % 8, (slice) += ((subslice) == 0)) if (!(((
((0 + (&(m->i915)->__info)->gen == (7)) ? 1 : ((
sseu)->slice_mask)) & (1UL << (slice)))) &&
(((0 + (&(m->i915)->__info)->gen == (7)) ? (1 &
(1UL << (subslice))) : intel_sseu_has_subslice(sseu, 0
, subslice))))) {} else
471 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
472 slice, subslice,i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
473 ee->instdone.row[slice][subslice])i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
;
474
475 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) < 12)
476 return;
477
478 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", ee->
instdone.slice_common_extra[0])
479 ee->instdone.slice_common_extra[0])i915_error_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", ee->
instdone.slice_common_extra[0])
;
480 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", ee->
instdone.slice_common_extra[1])
481 ee->instdone.slice_common_extra[1])i915_error_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", ee->
instdone.slice_common_extra[1])
;
482}
483
484static void error_print_request(struct drm_i915_error_state_buf *m,
485 const char *prefix,
486 const struct i915_request_coredump *erq)
487{
488 if (!erq->seqno)
489 return;
490
491 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
492 prefix, erq->pid, erq->context, erq->seqno,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
493 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
494 &erq->flags) ? "!" : "",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
495 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
496 &erq->flags) ? "+" : "",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
497 erq->sched_attr.priority,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
498 erq->head, erq->tail)i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
;
499}
500
501static void error_print_context(struct drm_i915_error_state_buf *m,
502 const char *header,
503 const struct i915_gem_context_coredump *ctx)
504{
505 const u32 period = RUNTIME_INFO(m->i915)(&(m->i915)->__runtime)->cs_timestamp_period_ns;
506
507 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime * period
, mul_u32_u32(ctx->avg_runtime, period))
508 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime * period
, mul_u32_u32(ctx->avg_runtime, period))
509 ctx->guilty, ctx->active,i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime * period
, mul_u32_u32(ctx->avg_runtime, period))
510 ctx->total_runtime * period,i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime * period
, mul_u32_u32(ctx->avg_runtime, period))
511 mul_u32_u32(ctx->avg_runtime, period))i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime * period
, mul_u32_u32(ctx->avg_runtime, period))
;
512}
513
514static struct i915_vma_coredump *
515__find_vma(struct i915_vma_coredump *vma, const char *name)
516{
517 while (vma) {
518 if (strcmp(vma->name, name) == 0)
519 return vma;
520 vma = vma->next;
521 }
522
523 return NULL((void *)0);
524}
525
526static struct i915_vma_coredump *
527find_batch(const struct intel_engine_coredump *ee)
528{
529 return __find_vma(ee->vma, "batch");
530}
531
532static void error_print_engine(struct drm_i915_error_state_buf *m,
533 const struct intel_engine_coredump *ee)
534{
535 struct i915_vma_coredump *batch;
536 int n;
537
538 err_printf(m, "%s command stream:\n", ee->engine->name)i915_error_printf(m, "%s command stream:\n", ee->engine->
name)
;
539 err_printf(m, " CCID: 0x%08x\n", ee->ccid)i915_error_printf(m, " CCID: 0x%08x\n", ee->ccid);
540 err_printf(m, " START: 0x%08x\n", ee->start)i915_error_printf(m, " START: 0x%08x\n", ee->start);
541 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head)i915_error_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head
, ee->rq_head)
;
542 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",i915_error_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", ee
->tail, ee->rq_post, ee->rq_tail)
543 ee->tail, ee->rq_post, ee->rq_tail)i915_error_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", ee
->tail, ee->rq_post, ee->rq_tail)
;
544 err_printf(m, " CTL: 0x%08x\n", ee->ctl)i915_error_printf(m, " CTL: 0x%08x\n", ee->ctl);
545 err_printf(m, " MODE: 0x%08x\n", ee->mode)i915_error_printf(m, " MODE: 0x%08x\n", ee->mode);
546 err_printf(m, " HWS: 0x%08x\n", ee->hws)i915_error_printf(m, " HWS: 0x%08x\n", ee->hws);
547 err_printf(m, " ACTHD: 0x%08x %08x\n",i915_error_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd
>>32), (u32)ee->acthd)
548 (u32)(ee->acthd>>32), (u32)ee->acthd)i915_error_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd
>>32), (u32)ee->acthd)
;
549 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir)i915_error_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
550 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr)i915_error_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
551 err_printf(m, " ESR: 0x%08x\n", ee->esr)i915_error_printf(m, " ESR: 0x%08x\n", ee->esr);
552
553 error_print_instdone(m, ee);
554
555 batch = find_batch(ee);
556 if (batch) {
557 u64 start = batch->gtt_offset;
558 u64 end = start + batch->gtt_size;
559
560 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
561 upper_32_bits(start), lower_32_bits(start),i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
562 upper_32_bits(end), lower_32_bits(end))i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
;
563 }
564 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) >= 4) {
565 err_printf(m, " BBADDR: 0x%08x_%08x\n",i915_error_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->
bbaddr>>32), (u32)ee->bbaddr)
566 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr)i915_error_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->
bbaddr>>32), (u32)ee->bbaddr)
;
567 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate)i915_error_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
568 err_printf(m, " INSTPS: 0x%08x\n", ee->instps)i915_error_printf(m, " INSTPS: 0x%08x\n", ee->instps);
569 }
570 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm)i915_error_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
571 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),i915_error_printf(m, " FADDR: 0x%08x %08x\n", ((u32)(((ee->
faddr) >> 16) >> 16)), ((u32)(ee->faddr)))
572 lower_32_bits(ee->faddr))i915_error_printf(m, " FADDR: 0x%08x %08x\n", ((u32)(((ee->
faddr) >> 16) >> 16)), ((u32)(ee->faddr)))
;
573 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) >= 6) {
574 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi)i915_error_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
575 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg)i915_error_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg
)
;
576 }
577 if (HAS_PPGTT(m->i915)(((&(m->i915)->__info)->ppgtt_type) != INTEL_PPGTT_NONE
)
) {
578 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode)i915_error_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode
)
;
579
580 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) >= 8) {
581 int i;
582 for (i = 0; i < 4; i++)
583 err_printf(m, " PDP%d: 0x%016llx\n",i915_error_printf(m, " PDP%d: 0x%016llx\n", i, ee->vm_info
.pdp[i])
584 i, ee->vm_info.pdp[i])i915_error_printf(m, " PDP%d: 0x%016llx\n", i, ee->vm_info
.pdp[i])
;
585 } else {
586 err_printf(m, " PP_DIR_BASE: 0x%08x\n",i915_error_printf(m, " PP_DIR_BASE: 0x%08x\n", ee->vm_info
.pp_dir_base)
587 ee->vm_info.pp_dir_base)i915_error_printf(m, " PP_DIR_BASE: 0x%08x\n", ee->vm_info
.pp_dir_base)
;
588 }
589 }
590 err_printf(m, " engine reset count: %u\n", ee->reset_count)i915_error_printf(m, " engine reset count: %u\n", ee->reset_count
)
;
591
592 for (n = 0; n < ee->num_ports; n++) {
593 err_printf(m, " ELSP[%d]:", n)i915_error_printf(m, " ELSP[%d]:", n);
594 error_print_request(m, " ", &ee->execlist[n]);
595 }
596
597 error_print_context(m, " Active context: ", &ee->context);
598}
599
600void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
601{
602 va_list args;
603
604 va_start(args, f)__builtin_va_start((args), f);
605 i915_error_vprintf(e, f, args);
606 va_end(args)__builtin_va_end((args));
607}
608
609static void print_error_vma(struct drm_i915_error_state_buf *m,
610 const struct intel_engine_cs *engine,
611 const struct i915_vma_coredump *vma)
612{
613 STUB()do { printf("%s: stub\n", __func__); } while(0);
614#ifdef notyet
615 char out[ASCII85_BUFSZ];
616 int page;
617
618 if (!vma)
619 return;
620
621 err_printf(m, "%s --- %s = 0x%08x %08x\n",i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
622 engine ? engine->name : "global", vma->name,i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
623 upper_32_bits(vma->gtt_offset),i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
624 lower_32_bits(vma->gtt_offset))i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
;
625
626 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K(1ULL << (12)))
627 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes)i915_error_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes
)
;
628
629 err_compression_marker(m);
630 for (page = 0; page < vma->page_count; page++) {
631 int i, len;
632
633 len = PAGE_SIZE(1 << 12);
634 if (page == vma->page_count - 1)
635 len -= vma->unused;
636 len = ascii85_encode_len(len);
637
638 for (i = 0; i < len; i++)
639 err_puts(m, ascii85_encode(vma->pages[page][i], out))i915_error_puts(m, ascii85_encode(vma->pages[page][i], out
))
;
640 }
641 err_puts(m, "\n")i915_error_puts(m, "\n");
642#endif
643}
644
645static void err_print_capabilities(struct drm_i915_error_state_buf *m,
646 struct i915_gpu_coredump *error)
647{
648 struct drm_printer p = i915_error_printer(m);
649
650 intel_device_info_print_static(&error->device_info, &p);
651 intel_device_info_print_runtime(&error->runtime_info, &p);
652 intel_driver_caps_print(&error->driver_caps, &p);
653}
654
655static void err_print_params(struct drm_i915_error_state_buf *m,
656 const struct i915_params *params)
657{
658 struct drm_printer p = i915_error_printer(m);
659
660 i915_params_dump(params, &p);
661}
662
663static void err_print_pciid(struct drm_i915_error_state_buf *m,
664 struct drm_i915_privateinteldrm_softc *i915)
665{
666 struct pci_dev *pdev = i915->drm.pdev;
667
668 err_printf(m, "PCI ID: 0x%04x\n", pdev->device)i915_error_printf(m, "PCI ID: 0x%04x\n", pdev->device);
669 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision)i915_error_printf(m, "PCI Revision: 0x%02x\n", pdev->revision
)
;
670 err_printf(m, "PCI Subsystem: %04x:%04x\n",i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
671 pdev->subsystem_vendor,i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
672 pdev->subsystem_device)i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
;
673}
674
675static void err_print_uc(struct drm_i915_error_state_buf *m,
676 const struct intel_uc_coredump *error_uc)
677{
678 struct drm_printer p = i915_error_printer(m);
679
680 intel_uc_fw_dump(&error_uc->guc_fw, &p);
681 intel_uc_fw_dump(&error_uc->huc_fw, &p);
682 print_error_vma(m, NULL((void *)0), error_uc->guc_log);
683}
684
685static void err_free_sgl(struct scatterlist *sgl)
686{
687 STUB()do { printf("%s: stub\n", __func__); } while(0);
688#ifdef notyet
689 while (sgl) {
690 struct scatterlist *sg;
691
692 for (sg = sgl; !sg_is_chain(sg)0; sg++) {
693 kfree(sg_virt(sg));
694 if (sg_is_last(sg)((sg)->end))
695 break;
696 }
697
698 sg = sg_is_last(sg)((sg)->end) ? NULL((void *)0) : sg_chain_ptr(sg)((void *)0);
699 free_page((unsigned long)sgl);
700 sgl = sg;
701 }
702#endif
703}
704
705static void err_print_gt_info(struct drm_i915_error_state_buf *m,
706 struct intel_gt_coredump *gt)
707{
708 struct drm_printer p = i915_error_printer(m);
709
710 intel_gt_info_print(&gt->info, &p);
711 intel_sseu_print_topology(&gt->info.sseu, &p);
712}
713
714static void err_print_gt(struct drm_i915_error_state_buf *m,
715 struct intel_gt_coredump *gt)
716{
717 const struct intel_engine_coredump *ee;
718 int i;
719
720 err_printf(m, "GT awake: %s\n", yesno(gt->awake))i915_error_printf(m, "GT awake: %s\n", yesno(gt->awake));
721 err_printf(m, "EIR: 0x%08x\n", gt->eir)i915_error_printf(m, "EIR: 0x%08x\n", gt->eir);
722 err_printf(m, "IER: 0x%08x\n", gt->ier)i915_error_printf(m, "IER: 0x%08x\n", gt->ier);
723 for (i = 0; i < gt->ngtier; i++)
724 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i])i915_error_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i
])
;
725 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er)i915_error_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
726 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake)i915_error_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
727 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr)i915_error_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
728
729 for (i = 0; i < gt->nfence; i++)
730 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i])i915_error_printf(m, " fence[%d] = %08llx\n", i, gt->fence
[i])
;
731
732 if (IS_GEN_RANGE(m->i915, 6, 11)(!!((&(m->i915)->__info)->gen_mask & ( 0 + 0
+ (((~0UL) >> (64 - (((11)) - 1) - 1)) & ((~0UL) <<
(((6)) - 1))))))
) {
733 err_printf(m, "ERROR: 0x%08x\n", gt->error)i915_error_printf(m, "ERROR: 0x%08x\n", gt->error);
734 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg)i915_error_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
735 }
736
737 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) >= 8)
738 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",i915_error_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", gt->
fault_data1, gt->fault_data0)
739 gt->fault_data1, gt->fault_data0)i915_error_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", gt->
fault_data1, gt->fault_data0)
;
740
741 if (IS_GEN(m->i915, 7)(0 + (&(m->i915)->__info)->gen == (7)))
742 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int)i915_error_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
743
744 if (IS_GEN_RANGE(m->i915, 8, 11)(!!((&(m->i915)->__info)->gen_mask & ( 0 + 0
+ (((~0UL) >> (64 - (((11)) - 1) - 1)) & ((~0UL) <<
(((8)) - 1))))))
)
745 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache)i915_error_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache
)
;
746
747 if (IS_GEN(m->i915, 12)(0 + (&(m->i915)->__info)->gen == (12)))
748 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err)i915_error_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
749
750 if (INTEL_GEN(m->i915)((&(m->i915)->__info)->gen) >= 12) {
751 int i;
752
753 for (i = 0; i < GEN12_SFC_DONE_MAX4; i++) {
754 /*
755 * SFC_DONE resides in the VD forcewake domain, so it
756 * only exists if the corresponding VCS engine is
757 * present.
758 */
759 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))(((gt->_gt)->info.engine_mask) & (1UL << ((VCS0
+ (i * 2)))))
)
760 continue;
761
762 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,i915_error_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, gt->sfc_done
[i])
763 gt->sfc_done[i])i915_error_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, gt->sfc_done
[i])
;
764 }
765
766 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done)i915_error_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
767 }
768
769 for (ee = gt->engine; ee; ee = ee->next) {
770 const struct i915_vma_coredump *vma;
771
772 error_print_engine(m, ee);
773 for (vma = ee->vma; vma; vma = vma->next)
774 print_error_vma(m, ee->engine, vma);
775 }
776
777 if (gt->uc)
778 err_print_uc(m, gt->uc);
779
780 err_print_gt_info(m, gt);
781}
782
783static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
784 struct i915_gpu_coredump *error)
785{
786 const struct intel_engine_coredump *ee;
787 struct timespec64 ts;
788
789 if (*error->error_msg)
790 err_printf(m, "%s\n", error->error_msg)i915_error_printf(m, "%s\n", error->error_msg);
791#ifdef __linux__
792 err_printf(m, "Kernel: %s %s\n",i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
793 init_utsname()->release,i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
794 init_utsname()->machine)i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
;
795#else
796 extern char machine[];
797 err_printf(m, "Kernel: %s %s\n",i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine)
798 osrelease,i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine)
799 machine)i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine);
800#endif
801 err_printf(m, "Driver: %s\n", DRIVER_DATE)i915_error_printf(m, "Driver: %s\n", "20200917");
802 ts = ktime_to_timespec64(error->time);
803 err_printf(m, "Time: %lld s %ld us\n",i915_error_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec,
ts.tv_nsec / 1000L)
804 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec,
ts.tv_nsec / 1000L)
;
805 ts = ktime_to_timespec64(error->boottime);
806 err_printf(m, "Boottime: %lld s %ld us\n",i915_error_printf(m, "Boottime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
807 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Boottime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
;
808 ts = ktime_to_timespec64(error->uptime);
809 err_printf(m, "Uptime: %lld s %ld us\n",i915_error_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
810 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
;
811 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",i915_error_printf(m, "Capture: %lu jiffies; %d ms ago\n", error
->capture, jiffies_to_msecs(jiffies - error->capture))
812 error->capture, jiffies_to_msecs(jiffies - error->capture))i915_error_printf(m, "Capture: %lu jiffies; %d ms ago\n", error
->capture, jiffies_to_msecs(jiffies - error->capture))
;
813
814 for (ee = error->gt ? error->gt->engine : NULL((void *)0); ee; ee = ee->next)
815 err_printf(m, "Active process (on ring %s): %s [%d]\n",i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
816 ee->engine->name,i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
817 ee->context.comm,i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
818 ee->context.pid)i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
;
819
820 err_printf(m, "Reset count: %u\n", error->reset_count)i915_error_printf(m, "Reset count: %u\n", error->reset_count
)
;
821 err_printf(m, "Suspend count: %u\n", error->suspend_count)i915_error_printf(m, "Suspend count: %u\n", error->suspend_count
)
;
822 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform))i915_error_printf(m, "Platform: %s\n", intel_platform_name(error
->device_info.platform))
;
823 err_printf(m, "Subplatform: 0x%x\n",i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
824 intel_subplatform(&error->runtime_info,i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
825 error->device_info.platform))i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
;
826 err_print_pciid(m, m->i915);
827
828 err_printf(m, "IOMMU enabled?: %d\n", error->iommu)i915_error_printf(m, "IOMMU enabled?: %d\n", error->iommu);
829
830 if (HAS_CSR(m->i915)((&(m->i915)->__info)->display.has_csr)) {
831 struct intel_csr *csr = &m->i915->csr;
832
833 err_printf(m, "DMC loaded: %s\n",i915_error_printf(m, "DMC loaded: %s\n", yesno(csr->dmc_payload
!= ((void *)0)))
834 yesno(csr->dmc_payload != NULL))i915_error_printf(m, "DMC loaded: %s\n", yesno(csr->dmc_payload
!= ((void *)0)))
;
835 err_printf(m, "DMC fw version: %d.%d\n",i915_error_printf(m, "DMC fw version: %d.%d\n", ((csr->version
) >> 16), ((csr->version) & 0xffff))
836 CSR_VERSION_MAJOR(csr->version),i915_error_printf(m, "DMC fw version: %d.%d\n", ((csr->version
) >> 16), ((csr->version) & 0xffff))
837 CSR_VERSION_MINOR(csr->version))i915_error_printf(m, "DMC fw version: %d.%d\n", ((csr->version
) >> 16), ((csr->version) & 0xffff))
;
838 }
839
840 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock))i915_error_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock
))
;
841 err_printf(m, "PM suspended: %s\n", yesno(error->suspended))i915_error_printf(m, "PM suspended: %s\n", yesno(error->suspended
))
;
842
843 if (error->gt)
844 err_print_gt(m, error->gt);
845
846 if (error->overlay)
847 intel_overlay_print_error_state(m, error->overlay);
848
849 if (error->display)
850 intel_display_print_error_state(m, error->display);
851
852 err_print_capabilities(m, error);
853 err_print_params(m, &error->params);
854}
855
856static int err_print_to_sgl(struct i915_gpu_coredump *error)
857{
858 struct drm_i915_error_state_buf m;
859
860 if (IS_ERR(error))
861 return PTR_ERR(error);
862
863 if (READ_ONCE(error->sgl)({ typeof(error->sgl) __tmp = *(volatile typeof(error->
sgl) *)&(error->sgl); membar_datadep_consumer(); __tmp
; })
)
864 return 0;
865
866 memset(&m, 0, sizeof(m))__builtin_memset((&m), (0), (sizeof(m)));
867 m.i915 = error->i915;
868
869 __err_print_to_sgl(&m, error);
870
871 if (m.buf) {
872 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
873 m.bytes = 0;
874 m.buf = NULL((void *)0);
875 }
876 if (m.cur) {
877 GEM_BUG_ON(m.end < m.cur)((void)0);
878 sg_mark_end(m.cur - 1);
879 }
880 GEM_BUG_ON(m.sgl && !m.cur)((void)0);
881
882 if (m.err) {
883 err_free_sgl(m.sgl);
884 return m.err;
885 }
886
887 if (cmpxchg(&error->sgl, NULL, m.sgl)__sync_val_compare_and_swap(&error->sgl, ((void *)0), m
.sgl)
)
888 err_free_sgl(m.sgl);
889
890 return 0;
891}
892
893ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
894 char *buf, loff_t off, size_t rem)
895{
896 STUB()do { printf("%s: stub\n", __func__); } while(0);
897 return -ENOSYS78;
898#ifdef notyet
899 struct scatterlist *sg;
900 size_t count;
901 loff_t pos;
902 int err;
903
904 if (!error || !rem)
905 return 0;
906
907 err = err_print_to_sgl(error);
908 if (err)
909 return err;
910
911 sg = READ_ONCE(error->fit)({ typeof(error->fit) __tmp = *(volatile typeof(error->
fit) *)&(error->fit); membar_datadep_consumer(); __tmp
; })
;
912 if (!sg || off < sg->dma_address)
913 sg = error->sgl;
914 if (!sg)
915 return 0;
916
917 pos = sg->dma_address;
918 count = 0;
919 do {
920 size_t len, start;
921
922 if (sg_is_chain(sg)0) {
923 sg = sg_chain_ptr(sg)((void *)0);
924 GEM_BUG_ON(sg_is_chain(sg))((void)0);
925 }
926
927 len = sg->length;
928 if (pos + len <= off) {
929 pos += len;
930 continue;
931 }
932
933 start = sg->offset;
934 if (pos < off) {
935 GEM_BUG_ON(off - pos > len)((void)0);
936 len -= off - pos;
937 start += off - pos;
938 pos = off;
939 }
940
941 len = min(len, rem)(((len)<(rem))?(len):(rem));
942 GEM_BUG_ON(!len || len > sg->length)((void)0);
943
944 memcpy(buf, page_address(sg_page(sg)) + start, len)__builtin_memcpy((buf), (page_address(sg_page(sg)) + start), (
len))
;
945
946 count += len;
947 pos += len;
948
949 buf += len;
950 rem -= len;
951 if (!rem) {
952 WRITE_ONCE(error->fit, sg)({ typeof(error->fit) __tmp = (sg); *(volatile typeof(error
->fit) *)&(error->fit) = __tmp; __tmp; })
;
953 break;
954 }
955 } while (!sg_is_last(sg++)((sg++)->end));
956
957 return count;
958#endif
959}
960
961static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
962{
963 while (vma) {
964 struct i915_vma_coredump *next = vma->next;
965 int page;
966
967 for (page = 0; page < vma->page_count; page++)
968 free_page((unsigned long)vma->pages[page]);
969
970 kfree(vma);
971 vma = next;
972 }
973}
974
975static void cleanup_params(struct i915_gpu_coredump *error)
976{
977 i915_params_free(&error->params);
978}
979
980static void cleanup_uc(struct intel_uc_coredump *uc)
981{
982 kfree(uc->guc_fw.path);
983 kfree(uc->huc_fw.path);
984 i915_vma_coredump_free(uc->guc_log);
985
986 kfree(uc);
987}
988
989static void cleanup_gt(struct intel_gt_coredump *gt)
990{
991 while (gt->engine) {
992 struct intel_engine_coredump *ee = gt->engine;
993
994 gt->engine = ee->next;
995
996 i915_vma_coredump_free(ee->vma);
997 kfree(ee);
998 }
999
1000 if (gt->uc)
1001 cleanup_uc(gt->uc);
1002
1003 kfree(gt);
1004}
1005
1006void __i915_gpu_coredump_free(struct kref *error_ref)
1007{
1008 struct i915_gpu_coredump *error =
1009 container_of(error_ref, typeof(*error), ref)({ const __typeof( ((typeof(*error) *)0)->ref ) *__mptr = (
error_ref); (typeof(*error) *)( (char *)__mptr - __builtin_offsetof
(typeof(*error), ref) );})
;
1010
1011 while (error->gt) {
1012 struct intel_gt_coredump *gt = error->gt;
1013
1014 error->gt = gt->next;
1015 cleanup_gt(gt);
1016 }
1017
1018 kfree(error->overlay);
1019 kfree(error->display);
1020
1021 cleanup_params(error);
1022
1023 err_free_sgl(error->sgl);
1024 kfree(error);
1025}
1026
1027static struct i915_vma_coredump *
1028i915_vma_coredump_create(const struct intel_gt *gt,
1029 const struct i915_vma *vma,
1030 const char *name,
1031 struct i915_vma_compress *compress)
1032{
1033 STUB()do { printf("%s: stub\n", __func__); } while(0);
1034 return NULL((void *)0);
1035#ifdef notyet
1036 struct i915_ggtt *ggtt = gt->ggtt;
1037 const u64 slot = ggtt->error_capture.start;
1038 struct i915_vma_coredump *dst;
1039 unsigned long num_pages;
1040 struct sgt_iter iter;
1041 int ret;
1042
1043 might_sleep()assertwaitok();
1044
1045 if (!vma || !vma->pages || !compress)
1046 return NULL((void *)0);
1047
1048 num_pages = min_t(u64, vma->size, vma->obj->base.size)({ u64 __min_a = (vma->size); u64 __min_b = (vma->obj->
base.size); __min_a < __min_b ? __min_a : __min_b; })
>> PAGE_SHIFT12;
1049 num_pages = DIV_ROUND_UP(10 * num_pages, 8)(((10 * num_pages) + ((8) - 1)) / (8)); /* worstcase zlib growth */
1050 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1051 if (!dst)
1052 return NULL((void *)0);
1053
1054 if (!compress_start(compress)) {
1055 kfree(dst);
1056 return NULL((void *)0);
1057 }
1058
1059 strlcpy(dst->name, name, sizeof(dst->name));
1060 dst->next = NULL((void *)0);
1061
1062 dst->gtt_offset = vma->node.start;
1063 dst->gtt_size = vma->node.size;
1064 dst->gtt_page_sizes = vma->page_sizes.gtt;
1065 dst->num_pages = num_pages;
1066 dst->page_count = 0;
1067 dst->unused = 0;
1068
1069 ret = -EINVAL22;
1070 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1071 void __iomem *s;
1072 dma_addr_t dma;
1073
1074 for_each_sgt_daddr(dma, iter, vma->pages)for ((iter) = __sgt_iter((vma->pages)->sgl, 1); ((dma) =
(iter).dma + (iter).curr), (iter).sgp; (((iter).curr += ((1ULL
<< (12)))) >= (iter).max) ? (iter) = __sgt_iter(__sg_next
((iter).sgp), 1), 0 : 0)
{
1075 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1076 I915_CACHE_NONE, 0);
1077 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
1078
1079 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE(1 << 12));
1080 ret = compress_page(compress,
1081 (void __force *)s, dst,
1082 true1);
1083 io_mapping_unmap(s);
1084 if (ret)
1085 break;
1086 }
1087 } else if (i915_gem_object_is_lmem(vma->obj)) {
1088 struct intel_memory_region *mem = vma->obj->mm.region;
1089 dma_addr_t dma;
1090
1091 for_each_sgt_daddr(dma, iter, vma->pages)for ((iter) = __sgt_iter((vma->pages)->sgl, 1); ((dma) =
(iter).dma + (iter).curr), (iter).sgp; (((iter).curr += ((1ULL
<< (12)))) >= (iter).max) ? (iter) = __sgt_iter(__sg_next
((iter).sgp), 1), 0 : 0)
{
1092 void __iomem *s;
1093
1094 s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE(1 << 12));
1095 ret = compress_page(compress,
1096 (void __force *)s, dst,
1097 true1);
1098 io_mapping_unmap(s);
1099 if (ret)
1100 break;
1101 }
1102 } else {
1103 struct vm_page *page;
1104
1105 for_each_sgt_page(page, iter, vma->pages)for ((iter) = __sgt_iter((vma->pages)->sgl, 0); ((page)
= (iter).pfn == 0 ? ((void *)0) : (PHYS_TO_VM_PAGE(((paddr_t
)((iter).pfn + ((iter).curr >> 12)) << 12)))); ((
(iter).curr += (1 << 12)) >= (iter).max) ? (iter) = __sgt_iter
(__sg_next((iter).sgp), 0), 0 : 0)
{
1106 void *s;
1107
1108 drm_clflush_pages(&page, 1);
1109
1110 s = kmap(page);
1111 ret = compress_page(compress, s, dst, false0);
1112 kunmap(page);
1113
1114 drm_clflush_pages(&page, 1);
1115
1116 if (ret)
1117 break;
1118 }
1119 }
1120
1121 if (ret || compress_flush(compress, dst)) {
1122 while (dst->page_count--)
1123 pool_free(&compress->pool, dst->pages[dst->page_count]);
1124 kfree(dst);
1125 dst = NULL((void *)0);
1126 }
1127 compress_finish(compress);
1128
1129 return dst;
1130#endif
1131}
1132
1133static void gt_record_fences(struct intel_gt_coredump *gt)
1134{
1135 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1136 struct intel_uncore *uncore = gt->_gt->uncore;
1137 int i;
1138
1139 if (INTEL_GEN(uncore->i915)((&(uncore->i915)->__info)->gen) >= 6) {
1140 for (i = 0; i < ggtt->num_fences; i++)
1141 gt->fence[i] =
1142 intel_uncore_read64(uncore,
1143 FENCE_REG_GEN6_LO(i)((const i915_reg_t){ .reg = (0x100000 + (i) * 8) }));
1144 } else if (INTEL_GEN(uncore->i915)((&(uncore->i915)->__info)->gen) >= 4) {
1145 for (i = 0; i < ggtt->num_fences; i++)
1146 gt->fence[i] =
1147 intel_uncore_read64(uncore,
1148 FENCE_REG_965_LO(i)((const i915_reg_t){ .reg = (0x03000 + (i) * 8) }));
1149 } else {
1150 for (i = 0; i < ggtt->num_fences; i++)
1151 gt->fence[i] =
1152 intel_uncore_read(uncore, FENCE_REG(i)((const i915_reg_t){ .reg = (0x2000 + (((i) & 8) <<
9) + ((i) & 7) * 4) })
);
1153 }
1154 gt->nfence = i;
1155}
1156
1157static void engine_record_registers(struct intel_engine_coredump *ee)
1158{
1159 const struct intel_engine_cs *engine = ee->engine;
1160 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1161
1162 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 6) {
1163 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x50) }))
;
1164
1165 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 12)
1166 ee->fault_reg = intel_uncore_read(engine->uncore,
1167 GEN12_RING_FAULT_REG((const i915_reg_t){ .reg = (0xcec4) }));
1168 else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8)
1169 ee->fault_reg = intel_uncore_read(engine->uncore,
1170 GEN8_RING_FAULT_REG((const i915_reg_t){ .reg = (0x4094) }));
1171 else
1172 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine)intel_uncore_read((engine)->uncore, ((const i915_reg_t){ .
reg = (0x4094 + 0x100 * (engine)->hw_id) }))
;
1173 }
1174
1175 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 4) {
1176 ee->esr = ENGINE_READ(engine, RING_ESR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xb8) }))
;
1177 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x78) }))
;
1178 ee->ipeir = ENGINE_READ(engine, RING_IPEIR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x64) }))
;
1179 ee->ipehr = ENGINE_READ(engine, RING_IPEHR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x68) }))
;
1180 ee->instps = ENGINE_READ(engine, RING_INSTPS)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x70) }))
;
1181 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x140) }))
;
1182 ee->ccid = ENGINE_READ(engine, CCID)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x180) }))
;
1183 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8) {
1184 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x60) }))
<< 32;
1185 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x168) }))
<< 32;
1186 }
1187 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x110) }))
;
1188 } else {
1189 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xd0) }))
;
1190 ee->ipeir = ENGINE_READ(engine, IPEIR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x88) }))
;
1191 ee->ipehr = ENGINE_READ(engine, IPEHR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x8c) }))
;
1192 }
1193
1194 intel_engine_get_instdone(engine, &ee->instdone);
1195
1196 ee->instpm = ENGINE_READ(engine, RING_INSTPM)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xc0) }))
;
1197 ee->acthd = intel_engine_get_active_head(engine);
1198 ee->start = ENGINE_READ(engine, RING_START)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x38) }))
;
1199 ee->head = ENGINE_READ(engine, RING_HEAD)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x34) }))
;
1200 ee->tail = ENGINE_READ(engine, RING_TAIL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x30) }))
;
1201 ee->ctl = ENGINE_READ(engine, RING_CTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x3c) }))
;
1202 if (INTEL_GEN(i915)((&(i915)->__info)->gen) > 2)
1203 ee->mode = ENGINE_READ(engine, RING_MI_MODE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x9c) }))
;
1204
1205 if (!HWS_NEEDS_PHYSICAL(i915)((&(i915)->__info)->hws_needs_physical)) {
1206 i915_reg_t mmio;
1207
1208 if (IS_GEN(i915, 7)(0 + (&(i915)->__info)->gen == (7))) {
1209 switch (engine->id) {
1210 default:
1211 MISSING_CASE(engine->id)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "engine->id", (long)(engine->id)); __builtin_expect(!
!(__ret), 0); })
;
1212 fallthroughdo {} while (0);
1213 case RCS0:
1214 mmio = RENDER_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x04080) });
1215 break;
1216 case BCS0:
1217 mmio = BLT_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x04280) });
1218 break;
1219 case VCS0:
1220 mmio = BSD_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x04180) });
1221 break;
1222 case VECS0:
1223 mmio = VEBOX_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x04380) });
1224 break;
1225 }
1226 } else if (IS_GEN(engine->i915, 6)(0 + (&(engine->i915)->__info)->gen == (6))) {
1227 mmio = RING_HWS_PGA_GEN6(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x2080)
})
;
1228 } else {
1229 /* XXX: gen8 returns to sanity */
1230 mmio = RING_HWS_PGA(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x80) }
)
;
1231 }
1232
1233 ee->hws = intel_uncore_read(engine->uncore, mmio);
1234 }
1235
1236 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1237
1238 if (HAS_PPGTT(i915)(((&(i915)->__info)->ppgtt_type) != INTEL_PPGTT_NONE
)
) {
1239 int i;
1240
1241 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x29c) }))
;
1242
1243 if (IS_GEN(i915, 6)(0 + (&(i915)->__info)->gen == (6))) {
1244 ee->vm_info.pp_dir_base =
1245 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x518) }))
;
1246 } else if (IS_GEN(i915, 7)(0 + (&(i915)->__info)->gen == (7))) {
1247 ee->vm_info.pp_dir_base =
1248 ENGINE_READ(engine, RING_PP_DIR_BASE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x228) }))
;
1249 } else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8) {
1250 u32 base = engine->mmio_base;
1251
1252 for (i = 0; i < 4; i++) {
1253 ee->vm_info.pdp[i] =
1254 intel_uncore_read(engine->uncore,
1255 GEN8_RING_PDP_UDW(base, i)((const i915_reg_t){ .reg = ((base) + 0x270 + (i) * 8 + 4) }));
1256 ee->vm_info.pdp[i] <<= 32;
1257 ee->vm_info.pdp[i] |=
1258 intel_uncore_read(engine->uncore,
1259 GEN8_RING_PDP_LDW(base, i)((const i915_reg_t){ .reg = ((base) + 0x270 + (i) * 8) }));
1260 }
1261 }
1262 }
1263}
1264
1265static void record_request(const struct i915_request *request,
1266 struct i915_request_coredump *erq)
1267{
1268 erq->flags = request->fence.flags;
1269 erq->context = request->fence.context;
1270 erq->seqno = request->fence.seqno;
1271 erq->sched_attr = request->sched.attr;
1272 erq->head = request->head;
1273 erq->tail = request->tail;
1274
1275 erq->pid = 0;
1276 rcu_read_lock();
1277 if (!intel_context_is_closed(request->context)) {
1278 const struct i915_gem_context *ctx;
1279
1280 ctx = rcu_dereference(request->context->gem_context)(request->context->gem_context);
1281 if (ctx)
1282#ifdef __linux__
1283 erq->pid = pid_nr(ctx->pid);
1284#else
1285 erq->pid = ctx->pid;
1286#endif
1287 }
1288 rcu_read_unlock();
1289}
1290
1291static void engine_record_execlists(struct intel_engine_coredump *ee)
1292{
1293 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1294 struct i915_request * const *port = el->active;
1295 unsigned int n = 0;
1296
1297 while (*port)
1298 record_request(*port++, &ee->execlist[n++]);
1299
1300 ee->num_ports = n;
1301}
1302
1303static bool_Bool record_context(struct i915_gem_context_coredump *e,
1304 const struct i915_request *rq)
1305{
1306 struct i915_gem_context *ctx;
1307 struct task_struct *task;
1308 bool_Bool simulated;
1309
1310 rcu_read_lock();
1311 ctx = rcu_dereference(rq->context->gem_context)(rq->context->gem_context);
1312 if (ctx && !kref_get_unless_zero(&ctx->ref))
1313 ctx = NULL((void *)0);
1314 rcu_read_unlock();
1315 if (!ctx)
1316 return true1;
1317
1318#ifdef __linux__
1319 rcu_read_lock();
1320 task = pid_task(ctx->pid, PIDTYPE_PID);
1321 if (task) {
1322 strcpy(e->comm, task->comm);
1323 e->pid = task->pid;
1324 }
1325 rcu_read_unlock();
1326#endif
1327
1328 e->sched_attr = ctx->sched;
1329 e->guilty = atomic_read(&ctx->guilty_count)({ typeof(*(&ctx->guilty_count)) __tmp = *(volatile typeof
(*(&ctx->guilty_count)) *)&(*(&ctx->guilty_count
)); membar_datadep_consumer(); __tmp; })
;
1330 e->active = atomic_read(&ctx->active_count)({ typeof(*(&ctx->active_count)) __tmp = *(volatile typeof
(*(&ctx->active_count)) *)&(*(&ctx->active_count
)); membar_datadep_consumer(); __tmp; })
;
1331
1332 e->total_runtime = rq->context->runtime.total;
1333 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1334
1335 simulated = i915_gem_context_no_error_capture(ctx);
1336
1337 i915_gem_context_put(ctx);
1338 return simulated;
1339}
1340
1341struct intel_engine_capture_vma {
1342 struct intel_engine_capture_vma *next;
1343 struct i915_vma *vma;
1344 char name[16];
1345};
1346
1347static struct intel_engine_capture_vma *
1348capture_vma(struct intel_engine_capture_vma *next,
1349 struct i915_vma *vma,
1350 const char *name,
1351 gfp_t gfp)
1352{
1353 struct intel_engine_capture_vma *c;
1354
1355 if (!vma)
1356 return next;
1357
1358 c = kmalloc(sizeof(*c), gfp);
1359 if (!c)
1360 return next;
1361
1362 if (!i915_active_acquire_if_busy(&vma->active)) {
1363 kfree(c);
1364 return next;
1365 }
1366
1367 strlcpy(c->name, name, sizeof(c->name));
1368 c->vma = vma; /* reference held while active */
1369
1370 c->next = next;
1371 return c;
1372}
1373
1374static struct intel_engine_capture_vma *
1375capture_user(struct intel_engine_capture_vma *capture,
1376 const struct i915_request *rq,
1377 gfp_t gfp)
1378{
1379 struct i915_capture_list *c;
1380
1381 for (c = rq->capture_list; c; c = c->next)
1382 capture = capture_vma(capture, c->vma, "user", gfp);
1383
1384 return capture;
1385}
1386
1387static void add_vma(struct intel_engine_coredump *ee,
1388 struct i915_vma_coredump *vma)
1389{
1390 if (vma) {
1391 vma->next = ee->vma;
1392 ee->vma = vma;
1393 }
1394}
1395
1396struct intel_engine_coredump *
1397intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1398{
1399 struct intel_engine_coredump *ee;
1400
1401 ee = kzalloc(sizeof(*ee), gfp);
1402 if (!ee)
1403 return NULL((void *)0);
1404
1405 ee->engine = engine;
1406
1407 engine_record_registers(ee);
1408 engine_record_execlists(ee);
1409
1410 return ee;
1411}
1412
1413struct intel_engine_capture_vma *
1414intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1415 struct i915_request *rq,
1416 gfp_t gfp)
1417{
1418 struct intel_engine_capture_vma *vma = NULL((void *)0);
1419
1420 ee->simulated |= record_context(&ee->context, rq);
1421 if (ee->simulated)
1422 return NULL((void *)0);
1423
1424 /*
1425 * We need to copy these to an anonymous buffer
1426 * as the simplest method to avoid being overwritten
1427 * by userspace.
1428 */
1429 vma = capture_vma(vma, rq->batch, "batch", gfp);
1430 vma = capture_user(vma, rq, gfp);
1431 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1432 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1433
1434 ee->rq_head = rq->head;
1435 ee->rq_post = rq->postfix;
1436 ee->rq_tail = rq->tail;
1437
1438 return vma;
1439}
1440
1441void
1442intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1443 struct intel_engine_capture_vma *capture,
1444 struct i915_vma_compress *compress)
1445{
1446 const struct intel_engine_cs *engine = ee->engine;
1447
1448 while (capture) {
1449 struct intel_engine_capture_vma *this = capture;
1450 struct i915_vma *vma = this->vma;
1451
1452 add_vma(ee,
1453 i915_vma_coredump_create(engine->gt,
1454 vma, this->name,
1455 compress));
1456
1457 i915_active_release(&vma->active);
1458
1459 capture = this->next;
1460 kfree(this);
1461 }
1462
1463 add_vma(ee,
1464 i915_vma_coredump_create(engine->gt,
1465 engine->status_page.vma,
1466 "HW Status",
1467 compress));
1468
1469 add_vma(ee,
1470 i915_vma_coredump_create(engine->gt,
1471 engine->wa_ctx.vma,
1472 "WA context",
1473 compress));
1474}
1475
1476static struct intel_engine_coredump *
1477capture_engine(struct intel_engine_cs *engine,
1478 struct i915_vma_compress *compress)
1479{
1480 struct intel_engine_capture_vma *capture = NULL((void *)0);
1481 struct intel_engine_coredump *ee;
1482 struct i915_request *rq;
1483 unsigned long flags;
1484
1485 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL(0x0001 | 0x0004));
1486 if (!ee)
1487 return NULL((void *)0);
1488
1489 spin_lock_irqsave(&engine->active.lock, flags)do { flags = 0; mtx_enter(&engine->active.lock); } while
(0)
;
1490 rq = intel_engine_find_active_request(engine);
1491 if (rq)
1492 capture = intel_engine_coredump_add_request(ee, rq,
1493 ATOMIC_MAYFAIL(0x0002 | 0));
1494 spin_unlock_irqrestore(&engine->active.lock, flags)do { (void)(flags); mtx_leave(&engine->active.lock); }
while (0)
;
1495 if (!capture) {
1496 kfree(ee);
1497 return NULL((void *)0);
1498 }
1499
1500 intel_engine_coredump_add_vma(ee, capture, compress);
1501
1502 return ee;
1503}
1504
1505static void
1506gt_record_engines(struct intel_gt_coredump *gt,
1507 struct i915_vma_compress *compress)
1508{
1509 struct intel_engine_cs *engine;
1510 enum intel_engine_id id;
1511
1512 for_each_engine(engine, gt->_gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt->_gt)->engine[(id)])) {} else
{
1513 struct intel_engine_coredump *ee;
1514
1515 /* Refill our page pool before entering atomic section */
1516 pool_refill(&compress->pool, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1517
1518 ee = capture_engine(engine, compress);
1519 if (!ee)
1520 continue;
1521
1522 gt->simulated |= ee->simulated;
1523 if (ee->simulated) {
1524 kfree(ee);
1525 continue;
1526 }
1527
1528 ee->next = gt->engine;
1529 gt->engine = ee;
1530 }
1531}
1532
1533static struct intel_uc_coredump *
1534gt_record_uc(struct intel_gt_coredump *gt,
1535 struct i915_vma_compress *compress)
1536{
1537 const struct intel_uc *uc = &gt->_gt->uc;
1538 struct intel_uc_coredump *error_uc;
1539
1540 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1541 if (!error_uc)
1542 return NULL((void *)0);
1543
1544 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw))__builtin_memcpy((&error_uc->guc_fw), (&uc->guc
.fw), (sizeof(uc->guc.fw)))
;
1545 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw))__builtin_memcpy((&error_uc->huc_fw), (&uc->huc
.fw), (sizeof(uc->huc.fw)))
;
1546
1547 /* Non-default firmware paths will be specified by the modparam.
1548 * As modparams are generally accesible from the userspace make
1549 * explicit copies of the firmware paths.
1550 */
1551 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1552 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1553 error_uc->guc_log =
1554 i915_vma_coredump_create(gt->_gt,
1555 uc->guc.log.vma, "GuC log buffer",
1556 compress);
1557
1558 return error_uc;
1559}
1560
1561static void gt_capture_prepare(struct intel_gt_coredump *gt)
1562{
1563 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1564
1565 mutex_lock(&ggtt->error_mutex)rw_enter_write(&ggtt->error_mutex);
1566}
1567
1568static void gt_capture_finish(struct intel_gt_coredump *gt)
1569{
1570 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1571
1572 if (drm_mm_node_allocated(&ggtt->error_capture))
1573 ggtt->vm.clear_range(&ggtt->vm,
1574 ggtt->error_capture.start,
1575 PAGE_SIZE(1 << 12));
1576
1577 mutex_unlock(&ggtt->error_mutex)rw_exit_write(&ggtt->error_mutex);
1578}
1579
1580/* Capture all registers which don't fit into another category. */
1581static void gt_record_regs(struct intel_gt_coredump *gt)
1582{
1583 struct intel_uncore *uncore = gt->_gt->uncore;
1584 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1585 int i;
1586
1587 /*
1588 * General organization
1589 * 1. Registers specific to a single generation
1590 * 2. Registers which belong to multiple generations
1591 * 3. Feature specific registers.
1592 * 4. Everything else
1593 * Please try to follow the order.
1594 */
1595
1596 /* 1: Registers specific to a single generation */
1597 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
1598 gt->gtier[0] = intel_uncore_read(uncore, GTIER((const i915_reg_t){ .reg = (0x4401c) }));
1599 gt->ier = intel_uncore_read(uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }));
1600 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x1300b0
) }))
;
1601 }
1602
1603 if (IS_GEN(i915, 7)(0 + (&(i915)->__info)->gen == (7)))
1604 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT((const i915_reg_t){ .reg = (0x44040) }));
1605
1606 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 12) {
1607 gt->fault_data0 = intel_uncore_read(uncore,
1608 GEN12_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0xceb8) }));
1609 gt->fault_data1 = intel_uncore_read(uncore,
1610 GEN12_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0xcebc) }));
1611 } else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8) {
1612 gt->fault_data0 = intel_uncore_read(uncore,
1613 GEN8_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0x4b10) }));
1614 gt->fault_data1 = intel_uncore_read(uncore,
1615 GEN8_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0x4b14) }));
1616 }
1617
1618 if (IS_GEN(i915, 6)(0 + (&(i915)->__info)->gen == (6))) {
1619 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0xA18C
) }))
;
1620 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL((const i915_reg_t){ .reg = (0x24000) }));
1621 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE((const i915_reg_t){ .reg = (0x2520) }));
1622 }
1623
1624 /* 2: Registers which belong to multiple generations */
1625 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 7)
1626 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0xa188
) }))
;
1627
1628 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 6) {
1629 gt->derrmr = intel_uncore_read(uncore, DERRMR((const i915_reg_t){ .reg = (0x44050) }));
1630 if (INTEL_GEN(i915)((&(i915)->__info)->gen) < 12) {
1631 gt->error = intel_uncore_read(uncore, ERROR_GEN6((const i915_reg_t){ .reg = (0x40a0) }));
1632 gt->done_reg = intel_uncore_read(uncore, DONE_REG((const i915_reg_t){ .reg = (0x40b0) }));
1633 }
1634 }
1635
1636 /* 3: Feature specific registers */
1637 if (IS_GEN_RANGE(i915, 6, 7)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((7)) - 1) - 1)) & ((~0UL) <<
(((6)) - 1))))))
) {
1638 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK((const i915_reg_t){ .reg = (0x4090) }));
1639 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS((const i915_reg_t){ .reg = (0x14090) }));
1640 }
1641
1642 if (IS_GEN_RANGE(i915, 8, 11)(!!((&(i915)->__info)->gen_mask & ( 0 + 0 + (((
~0UL) >> (64 - (((11)) - 1) - 1)) & ((~0UL) <<
(((8)) - 1))))))
)
1643 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN((const i915_reg_t){ .reg = (0x4024) }));
1644
1645 if (IS_GEN(i915, 12)(0 + (&(i915)->__info)->gen == (12)))
1646 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG((const i915_reg_t){ .reg = (0x43f4) }));
1647
1648 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 12) {
1649 for (i = 0; i < GEN12_SFC_DONE_MAX4; i++) {
1650 /*
1651 * SFC_DONE resides in the VD forcewake domain, so it
1652 * only exists if the corresponding VCS engine is
1653 * present.
1654 */
1655 if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))(((gt->_gt)->info.engine_mask) & (1UL << ((VCS0
+ (i * 2)))))
)
1656 continue;
1657
1658 gt->sfc_done[i] =
1659 intel_uncore_read(uncore, GEN12_SFC_DONE(i)((const i915_reg_t){ .reg = (0x1cc000 + (i) * 0x1000) }));
1660 }
1661
1662 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE((const i915_reg_t){ .reg = (0xcf68) }));
1663 }
1664
1665 /* 4: Everything else */
1666 if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 11) {
1667 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER((const i915_reg_t){ .reg = (0x4446c) }));
1668 gt->gtier[0] =
1669 intel_uncore_read(uncore,
1670 GEN11_RENDER_COPY_INTR_ENABLE((const i915_reg_t){ .reg = (0x190030) }));
1671 gt->gtier[1] =
1672 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE((const i915_reg_t){ .reg = (0x190034) }));
1673 gt->gtier[2] =
1674 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE((const i915_reg_t){ .reg = (0x190038) }));
1675 gt->gtier[3] =
1676 intel_uncore_read(uncore,
1677 GEN11_GPM_WGBOXPERF_INTR_ENABLE((const i915_reg_t){ .reg = (0x19003c) }));
1678 gt->gtier[4] =
1679 intel_uncore_read(uncore,
1680 GEN11_CRYPTO_RSVD_INTR_ENABLE((const i915_reg_t){ .reg = (0x190040) }));
1681 gt->gtier[5] =
1682 intel_uncore_read(uncore,
1683 GEN11_GUNIT_CSME_INTR_ENABLE((const i915_reg_t){ .reg = (0x190044) }));
1684 gt->ngtier = 6;
1685 } else if (INTEL_GEN(i915)((&(i915)->__info)->gen) >= 8) {
1686 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER((const i915_reg_t){ .reg = (0x4446c) }));
1687 for (i = 0; i < 4; i++)
1688 gt->gtier[i] =
1689 intel_uncore_read(uncore, GEN8_GT_IER(i)((const i915_reg_t){ .reg = (0x4430c + (0x10 * (i))) }));
1690 gt->ngtier = 4;
1691 } else if (HAS_PCH_SPLIT(i915)(((i915)->pch_type) != PCH_NONE)) {
1692 gt->ier = intel_uncore_read(uncore, DEIER((const i915_reg_t){ .reg = (0x4400c) }));
1693 gt->gtier[0] = intel_uncore_read(uncore, GTIER((const i915_reg_t){ .reg = (0x4401c) }));
1694 gt->ngtier = 1;
1695 } else if (IS_GEN(i915, 2)(0 + (&(i915)->__info)->gen == (2))) {
1696 gt->ier = intel_uncore_read16(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }));
1697 } else if (!IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
1698 gt->ier = intel_uncore_read(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }));
1699 }
1700 gt->eir = intel_uncore_read(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
1701 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER((const i915_reg_t){ .reg = (0x02024) }));
1702}
1703
1704static void gt_record_info(struct intel_gt_coredump *gt)
1705{
1706 memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info))__builtin_memcpy((&gt->info), (&gt->_gt->info
), (sizeof(struct intel_gt_info)))
;
1707}
1708
1709/*
1710 * Generate a semi-unique error code. The code is not meant to have meaning, The
1711 * code's only purpose is to try to prevent false duplicated bug reports by
1712 * grossly estimating a GPU error state.
1713 *
1714 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1715 * the hang if we could strip the GTT offset information from it.
1716 *
1717 * It's only a small step better than a random number in its current form.
1718 */
1719static u32 generate_ecode(const struct intel_engine_coredump *ee)
1720{
1721 /*
1722 * IPEHR would be an ideal way to detect errors, as it's the gross
1723 * measure of "the command that hung." However, has some very common
1724 * synchronization commands which almost always appear in the case
1725 * strictly a client bug. Use instdone to differentiate those some.
1726 */
1727 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1728}
1729
1730static const char *error_msg(struct i915_gpu_coredump *error)
1731{
1732 struct intel_engine_coredump *first = NULL((void *)0);
1733 struct intel_gt_coredump *gt;
1734 intel_engine_mask_t engines;
1735 int len;
1736
1737 engines = 0;
1738 for (gt = error->gt; gt; gt = gt->next) {
1739 struct intel_engine_coredump *cs;
1740
1741 if (gt->engine && !first)
1742 first = gt->engine;
1743
1744 for (cs = gt->engine; cs; cs = cs->next)
1745 engines |= cs->engine->mask;
1746 }
1747
1748 len = scnprintf(error->error_msg, sizeof(error->error_msg),snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__info)->gen), engines, generate_ecode
(first))
1749 "GPU HANG: ecode %d:%x:%08x",snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__info)->gen), engines, generate_ecode
(first))
1750 INTEL_GEN(error->i915), engines,snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__info)->gen), engines, generate_ecode
(first))
1751 generate_ecode(first))snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__info)->gen), engines, generate_ecode
(first))
;
1752 if (first && first->context.pid) {
1753 /* Just show the first executing process, more is confusing */
1754 len += scnprintf(error->error_msg + len,snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
Value stored to 'len' is never read
1755 sizeof(error->error_msg) - len,snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
1756 ", in %s [%d]",snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
1757 first->context.comm, first->context.pid)snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
;
1758 }
1759
1760 return error->error_msg;
1761}
1762
1763static void capture_gen(struct i915_gpu_coredump *error)
1764{
1765 struct drm_i915_privateinteldrm_softc *i915 = error->i915;
1766
1767 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count)({ typeof(*(&i915->runtime_pm.wakeref_count)) __tmp = *
(volatile typeof(*(&i915->runtime_pm.wakeref_count)) *
)&(*(&i915->runtime_pm.wakeref_count)); membar_datadep_consumer
(); __tmp; })
;
1768 error->suspended = i915->runtime_pm.suspended;
1769
1770 error->iommu = -1;
1771#ifdef CONFIG_INTEL_IOMMU
1772 error->iommu = intel_iommu_gfx_mapped;
1773#endif
1774 error->reset_count = i915_reset_count(&i915->gpu_error);
1775 error->suspend_count = i915->suspend_count;
1776
1777 i915_params_copy(&error->params, &i915->params);
1778 memcpy(&error->device_info,__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
1779 INTEL_INFO(i915),__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
1780 sizeof(error->device_info))__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
;
1781 memcpy(&error->runtime_info,__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
1782 RUNTIME_INFO(i915),__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
1783 sizeof(error->runtime_info))__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
;
1784 error->driver_caps = i915->caps;
1785}
1786
1787struct i915_gpu_coredump *
1788i915_gpu_coredump_alloc(struct drm_i915_privateinteldrm_softc *i915, gfp_t gfp)
1789{
1790 struct i915_gpu_coredump *error;
1791
1792 if (!i915->params.error_capture)
1793 return NULL((void *)0);
1794
1795 error = kzalloc(sizeof(*error), gfp);
1796 if (!error)
1797 return NULL((void *)0);
1798
1799 kref_init(&error->ref);
1800 error->i915 = i915;
1801
1802 error->time = ktime_get_real();
1803 error->boottime = ktime_get_boottime()ktime_get();
1804 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1805 error->capture = jiffies;
1806
1807 capture_gen(error);
1808
1809 return error;
1810}
1811
1812#define DAY_AS_SECONDS(x)(24 * 60 * 60 * (x)) (24 * 60 * 60 * (x))
1813
1814struct intel_gt_coredump *
1815intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1816{
1817 struct intel_gt_coredump *gc;
1818
1819 gc = kzalloc(sizeof(*gc), gfp);
1820 if (!gc)
1821 return NULL((void *)0);
1822
1823 gc->_gt = gt;
1824 gc->awake = intel_gt_pm_is_awake(gt);
1825
1826 gt_record_regs(gc);
1827 gt_record_fences(gc);
1828
1829 return gc;
1830}
1831
1832struct i915_vma_compress *
1833i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1834{
1835 struct i915_vma_compress *compress;
1836
1837 compress = kmalloc(sizeof(*compress), ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1838 if (!compress)
1839 return NULL((void *)0);
1840
1841 if (!compress_init(compress)) {
1842 kfree(compress);
1843 return NULL((void *)0);
1844 }
1845
1846 gt_capture_prepare(gt);
1847
1848 return compress;
1849}
1850
1851void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1852 struct i915_vma_compress *compress)
1853{
1854 if (!compress)
1855 return;
1856
1857 gt_capture_finish(gt);
1858
1859 compress_fini(compress);
1860 kfree(compress);
1861}
1862
1863struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_privateinteldrm_softc *i915)
1864{
1865 struct i915_gpu_coredump *error;
1866
1867 /* Check if GPU capture has been disabled */
1868 error = READ_ONCE(i915->gpu_error.first_error)({ typeof(i915->gpu_error.first_error) __tmp = *(volatile typeof
(i915->gpu_error.first_error) *)&(i915->gpu_error.first_error
); membar_datadep_consumer(); __tmp; })
;
1869 if (IS_ERR(error))
1870 return error;
1871
1872 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1873 if (!error)
1874 return ERR_PTR(-ENOMEM12);
1875
1876 error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL((0x0001 | 0x0004) | 0 | 0));
1877 if (error->gt) {
1878 struct i915_vma_compress *compress;
1879
1880 compress = i915_vma_capture_prepare(error->gt);
1881 if (!compress) {
1882 kfree(error->gt);
1883 kfree(error);
1884 return ERR_PTR(-ENOMEM12);
1885 }
1886
1887 gt_record_info(error->gt);
1888 gt_record_engines(error->gt, compress);
1889
1890 if (INTEL_INFO(i915)(&(i915)->__info)->has_gt_uc)
1891 error->gt->uc = gt_record_uc(error->gt, compress);
1892
1893 i915_vma_capture_finish(error->gt, compress);
1894
1895 error->simulated |= error->gt->simulated;
1896 }
1897
1898 error->overlay = intel_overlay_capture_error_state(i915);
1899 error->display = intel_display_capture_error_state(i915);
1900
1901 return error;
1902}
1903
1904void i915_error_state_store(struct i915_gpu_coredump *error)
1905{
1906 struct drm_i915_privateinteldrm_softc *i915;
1907 static bool_Bool warned;
1908
1909 if (IS_ERR_OR_NULL(error))
1910 return;
1911
1912 i915 = error->i915;
1913 drm_info(&i915->drm, "%s\n", error_msg(error))do { } while(0);
1914
1915 if (error->simulated ||
1916 cmpxchg(&i915->gpu_error.first_error, NULL, error)__sync_val_compare_and_swap(&i915->gpu_error.first_error
, ((void *)0), error)
)
1917 return;
1918
1919 i915_gpu_coredump_get(error);
1920
1921 if (!xchg(&warned, true)__sync_lock_test_and_set(&warned, 1) &&
1922 ktime_get_real_seconds() - DRIVER_TIMESTAMP1600375437 < DAY_AS_SECONDS(180)(24 * 60 * 60 * (180))) {
1923 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n")do { } while(0);
1924 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n")do { } while(0);
1925 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n")do { } while(0);
1926 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n")do { } while(0);
1927 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n")do { } while(0);
1928 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",do { } while(0)
1929 i915->drm.primary->index)do { } while(0);
1930 }
1931}
1932
1933/**
1934 * i915_capture_error_state - capture an error record for later analysis
1935 * @i915: i915 device
1936 *
1937 * Should be called when an error is detected (either a hang or an error
1938 * interrupt) to capture error state from the time of the error. Fills
1939 * out a structure which becomes available in debugfs for user level tools
1940 * to pick up.
1941 */
1942void i915_capture_error_state(struct drm_i915_privateinteldrm_softc *i915)
1943{
1944 struct i915_gpu_coredump *error;
1945
1946 error = i915_gpu_coredump(i915);
1947 if (IS_ERR(error)) {
1948 cmpxchg(&i915->gpu_error.first_error, NULL, error)__sync_val_compare_and_swap(&i915->gpu_error.first_error
, ((void *)0), error)
;
1949 return;
1950 }
1951
1952 i915_error_state_store(error);
1953 i915_gpu_coredump_put(error);
1954}
1955
1956struct i915_gpu_coredump *
1957i915_first_error_state(struct drm_i915_privateinteldrm_softc *i915)
1958{
1959 struct i915_gpu_coredump *error;
1960
1961 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
1962 error = i915->gpu_error.first_error;
1963 if (!IS_ERR_OR_NULL(error))
1964 i915_gpu_coredump_get(error);
1965 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
1966
1967 return error;
1968}
1969
1970void i915_reset_error_state(struct drm_i915_privateinteldrm_softc *i915)
1971{
1972 struct i915_gpu_coredump *error;
1973
1974 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
1975 error = i915->gpu_error.first_error;
1976 if (error != ERR_PTR(-ENODEV19)) /* if disabled, always disabled */
1977 i915->gpu_error.first_error = NULL((void *)0);
1978 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
1979
1980 if (!IS_ERR_OR_NULL(error))
1981 i915_gpu_coredump_put(error);
1982}
1983
1984void i915_disable_error_state(struct drm_i915_privateinteldrm_softc *i915, int err)
1985{
1986 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
1987 if (!i915->gpu_error.first_error)
1988 i915->gpu_error.first_error = ERR_PTR(err);
1989 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
1990}