Bug Summary

File:dev/pci/drm/i915/i915_gpu_error.c
Warning:line 1966, column 3
Value stored to 'len' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_gpu_error.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/i915_gpu_error.c
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <linux/ascii85.h>
31#include <linux/highmem.h>
32#include <linux/nmi.h>
33#include <linux/pagevec.h>
34#include <linux/scatterlist.h>
35#include <linux/string_helpers.h>
36#include <linux/utsname.h>
37#include <linux/zlib.h>
38
39#include <drm/drm_cache.h>
40#include <drm/drm_print.h>
41
42#include "display/intel_dmc.h"
43#include "display/intel_overlay.h"
44
45#include "gem/i915_gem_context.h"
46#include "gem/i915_gem_lmem.h"
47#include "gt/intel_engine_regs.h"
48#include "gt/intel_gt.h"
49#include "gt/intel_gt_mcr.h"
50#include "gt/intel_gt_pm.h"
51#include "gt/intel_gt_regs.h"
52#include "gt/uc/intel_guc_capture.h"
53
54#include "i915_driver.h"
55#include "i915_drv.h"
56#include "i915_gpu_error.h"
57#include "i915_memcpy.h"
58#include "i915_scatterlist.h"
59#include "i915_utils.h"
60
61#define ALLOW_FAIL(0x0002 | 0 | 0) (__GFP_KSWAPD_RECLAIM0x0002 | __GFP_RETRY_MAYFAIL0 | __GFP_NOWARN0)
62#define ATOMIC_MAYFAIL(0x0002 | 0) (GFP_ATOMIC0x0002 | __GFP_NOWARN0)
63
64static void __sg_set_buf(struct scatterlist *sg,
65 void *addr, unsigned int len, loff_t it)
66{
67 STUB()do { printf("%s: stub\n", __func__); } while(0);
68#ifdef notyet
69 sg->page_link = (unsigned long)virt_to_page(addr);
70 sg->offset = offset_in_page(addr)((vaddr_t)(addr) & ((1 << 12) - 1));
71 sg->length = len;
72 sg->dma_address = it;
73#endif
74}
75
76static bool_Bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
77{
78 STUB()do { printf("%s: stub\n", __func__); } while(0);
79 return false0;
80#ifdef notyet
81 if (!len)
82 return false0;
83
84 if (e->bytes + len + 1 <= e->size)
85 return true1;
86
87 if (e->bytes) {
88 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
89 e->iter += e->bytes;
90 e->buf = NULL((void *)0);
91 e->bytes = 0;
92 }
93
94 if (e->cur == e->end) {
95 struct scatterlist *sgl;
96
97 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL(0x0002 | 0 | 0));
98 if (!sgl) {
99 e->err = -ENOMEM12;
100 return false0;
101 }
102
103 if (e->cur) {
104 e->cur->offset = 0;
105 e->cur->length = 0;
106 e->cur->page_link =
107 (unsigned long)sgl | SG_CHAIN;
108 } else {
109 e->sgl = sgl;
110 }
111
112 e->cur = sgl;
113 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
114 }
115
116 e->size = roundup2(len + 1, SZ_64K)(((len + 1) + (((64 << 10)) - 1)) & (~((__typeof(len
+ 1))((64 << 10)) - 1)))
;
117 e->buf = kmalloc(e->size, ALLOW_FAIL(0x0002 | 0 | 0));
118 if (!e->buf) {
119 e->size = PAGE_ALIGN(len + 1)(((len + 1) + ((1 << 12) - 1)) & ~((1 << 12) -
1))
;
120 e->buf = kmalloc(e->size, GFP_KERNEL(0x0001 | 0x0004));
121 }
122 if (!e->buf) {
123 e->err = -ENOMEM12;
124 return false0;
125 }
126
127 return true1;
128#endif
129}
130
131__printf(2, 0)__attribute__((__format__(__kprintf__,2,0)))
132static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
133 const char *fmt, va_list args)
134{
135 va_list ap;
136 int len;
137
138 if (e->err)
139 return;
140
141 va_copy(ap, args)__builtin_va_copy(((ap)),((args)));
142 len = vsnprintf(NULL((void *)0), 0, fmt, ap);
143 va_end(ap)__builtin_va_end((ap));
144 if (len <= 0) {
145 e->err = len;
146 return;
147 }
148
149 if (!__i915_error_grow(e, len))
150 return;
151
152 GEM_BUG_ON(e->bytes >= e->size)((void)0);
153 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
154 if (len < 0) {
155 e->err = len;
156 return;
157 }
158 e->bytes += len;
159}
160
161static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
162{
163 unsigned len;
164
165 if (e->err || !str)
166 return;
167
168 len = strlen(str);
169 if (!__i915_error_grow(e, len))
170 return;
171
172 GEM_BUG_ON(e->bytes + len > e->size)((void)0);
173 memcpy(e->buf + e->bytes, str, len)__builtin_memcpy((e->buf + e->bytes), (str), (len));
174 e->bytes += len;
175}
176
177#define err_printf(e, ...)i915_error_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
178#define err_puts(e, s)i915_error_puts(e, s) i915_error_puts(e, s)
179
180static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
181{
182 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
183}
184
185static inline struct drm_printer
186i915_error_printer(struct drm_i915_error_state_buf *e)
187{
188 struct drm_printer p = {
189 .printfn = __i915_printfn_error,
190 .arg = e,
191 };
192 return p;
193}
194
195/* single threaded page allocator with a reserved stash for emergencies */
196static void pool_fini(struct pagevec *pv)
197{
198 STUB()do { printf("%s: stub\n", __func__); } while(0);
199#ifdef notyet
200 pagevec_release(pv);
201#endif
202}
203
204static int pool_refill(struct pagevec *pv, gfp_t gfp)
205{
206 while (pagevec_space(pv)) {
207 struct vm_page *p;
208
209 p = alloc_page(gfp);
210 if (!p)
211 return -ENOMEM12;
212
213 pagevec_add(pv, p);
214 }
215
216 return 0;
217}
218
219static int intel_pool_init(struct pagevec *pv, gfp_t gfp)
220{
221 int err;
222
223 pagevec_init(pv);
224
225 err = pool_refill(pv, gfp);
226 if (err)
227 pool_fini(pv);
228
229 return err;
230}
231
232static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
233{
234 STUB()do { printf("%s: stub\n", __func__); } while(0);
235 return NULL((void *)0);
236#ifdef notyet
237 struct vm_page *p;
238
239 p = alloc_page(gfp);
240 if (!p && pagevec_count(pv))
241 p = pv->pages[--pv->nr];
242
243 return p ? page_address(p) : NULL((void *)0);
244#endif
245}
246
247static void pool_free(struct pagevec *pv, void *addr)
248{
249 STUB()do { printf("%s: stub\n", __func__); } while(0);
250#ifdef notyet
251 struct vm_page *p = virt_to_page(addr);
252
253 if (pagevec_space(pv))
254 pagevec_add(pv, p);
255 else
256 __free_page(p);
257#endif
258}
259
260#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
261
262struct i915_vma_compress {
263 struct pagevec pool;
264 struct z_stream_s zstream;
265 void *tmp;
266};
267
268static bool_Bool compress_init(struct i915_vma_compress *c)
269{
270 struct z_stream_s *zstream = &c->zstream;
271
272 if (intel_pool_init(&c->pool, ALLOW_FAIL(0x0002 | 0 | 0)))
273 return false0;
274
275 zstream->workspace =
276 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
277 ALLOW_FAIL(0x0002 | 0 | 0));
278 if (!zstream->workspace) {
279 pool_fini(&c->pool);
280 return false0;
281 }
282
283 c->tmp = NULL((void *)0);
284 if (i915_has_memcpy_from_wc()i915_memcpy_from_wc(((void *)0), ((void *)0), 0))
285 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL(0x0002 | 0 | 0));
286
287 return true1;
288}
289
290static bool_Bool compress_start(struct i915_vma_compress *c)
291{
292 struct z_stream_s *zstream = &c->zstream;
293 void *workspace = zstream->workspace;
294
295 memset(zstream, 0, sizeof(*zstream))__builtin_memset((zstream), (0), (sizeof(*zstream)));
296 zstream->workspace = workspace;
297
298 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
299}
300
301static void *compress_next_page(struct i915_vma_compress *c,
302 struct i915_vma_coredump *dst)
303{
304 void *page_addr;
305 struct vm_page *page;
306
307 page_addr = pool_alloc(&c->pool, ALLOW_FAIL(0x0002 | 0 | 0));
308 if (!page_addr)
309 return ERR_PTR(-ENOMEM12);
310
311 page = virt_to_page(page_addr);
312 list_add_tail(&page->lru, &dst->page_list);
313 return page_addr;
314}
315
316static int compress_page(struct i915_vma_compress *c,
317 void *src,
318 struct i915_vma_coredump *dst,
319 bool_Bool wc)
320{
321 struct z_stream_s *zstream = &c->zstream;
322
323 zstream->next_in = src;
324 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE(1 << 12)))
325 zstream->next_in = c->tmp;
326 zstream->avail_in = PAGE_SIZE(1 << 12);
327
328 do {
329 if (zstream->avail_out == 0) {
330 zstream->next_out = compress_next_page(c, dst);
331 if (IS_ERR(zstream->next_out))
332 return PTR_ERR(zstream->next_out);
333
334 zstream->avail_out = PAGE_SIZE(1 << 12);
335 }
336
337 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
338 return -EIO5;
339
340 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
341 } while (zstream->avail_in);
342
343 /* Fallback to uncompressed if we increase size? */
344 if (0 && zstream->total_out > zstream->total_in)
345 return -E2BIG7;
346
347 return 0;
348}
349
350static int compress_flush(struct i915_vma_compress *c,
351 struct i915_vma_coredump *dst)
352{
353 struct z_stream_s *zstream = &c->zstream;
354
355 do {
356 switch (zlib_deflate(zstream, Z_FINISH)) {
357 case Z_OK: /* more space requested */
358 zstream->next_out = compress_next_page(c, dst);
359 if (IS_ERR(zstream->next_out))
360 return PTR_ERR(zstream->next_out);
361
362 zstream->avail_out = PAGE_SIZE(1 << 12);
363 break;
364
365 case Z_STREAM_END:
366 goto end;
367
368 default: /* any error */
369 return -EIO5;
370 }
371 } while (1);
372
373end:
374 memset(zstream->next_out, 0, zstream->avail_out)__builtin_memset((zstream->next_out), (0), (zstream->avail_out
))
;
375 dst->unused = zstream->avail_out;
376 return 0;
377}
378
379static void compress_finish(struct i915_vma_compress *c)
380{
381 zlib_deflateEnd(&c->zstream);
382}
383
384static void compress_fini(struct i915_vma_compress *c)
385{
386 kfree(c->zstream.workspace);
387 if (c->tmp)
388 pool_free(&c->pool, c->tmp);
389 pool_fini(&c->pool);
390}
391
392static void err_compression_marker(struct drm_i915_error_state_buf *m)
393{
394 err_puts(m, ":")i915_error_puts(m, ":");
395}
396
397#else
398
399struct i915_vma_compress {
400 struct pagevec pool;
401};
402
403static bool_Bool compress_init(struct i915_vma_compress *c)
404{
405 return intel_pool_init(&c->pool, ALLOW_FAIL(0x0002 | 0 | 0)) == 0;
406}
407
408static bool_Bool compress_start(struct i915_vma_compress *c)
409{
410 return true1;
411}
412
413static int compress_page(struct i915_vma_compress *c,
414 void *src,
415 struct i915_vma_coredump *dst,
416 bool_Bool wc)
417{
418 STUB()do { printf("%s: stub\n", __func__); } while(0);
419 return -ENOSYS78;
420#ifdef notyet
421 void *ptr;
422
423 ptr = pool_alloc(&c->pool, ALLOW_FAIL(0x0002 | 0 | 0));
424 if (!ptr)
425 return -ENOMEM12;
426
427 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE(1 << 12))))
428 memcpy(ptr, src, PAGE_SIZE)__builtin_memcpy((ptr), (src), ((1 << 12)));
429 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
430 cond_resched()do { if (({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags & 0x0002) yield
(); } while (0)
;
431
432 return 0;
433#endif
434}
435
436static int compress_flush(struct i915_vma_compress *c,
437 struct i915_vma_coredump *dst)
438{
439 return 0;
440}
441
442static void compress_finish(struct i915_vma_compress *c)
443{
444}
445
446static void compress_fini(struct i915_vma_compress *c)
447{
448 pool_fini(&c->pool);
449}
450
451static void err_compression_marker(struct drm_i915_error_state_buf *m)
452{
453 err_puts(m, "~")i915_error_puts(m, "~");
454}
455
456#endif
457
458static void error_print_instdone(struct drm_i915_error_state_buf *m,
459 const struct intel_engine_coredump *ee)
460{
461 int slice;
462 int subslice;
463 int iter;
464
465 err_printf(m, " INSTDONE: 0x%08x\n",i915_error_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.
instdone)
466 ee->instdone.instdone)i915_error_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.
instdone)
;
467
468 if (ee->engine->class != RENDER_CLASS0 || GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) <= 3)
469 return;
470
471 err_printf(m, " SC_INSTDONE: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE: 0x%08x\n", ee->instdone
.slice_common)
472 ee->instdone.slice_common)i915_error_printf(m, " SC_INSTDONE: 0x%08x\n", ee->instdone
.slice_common)
;
473
474 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) <= 6)
475 return;
476
477 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)for (iter = 0, intel_gt_mcr_get_ss_steering(ee->engine->
gt, 0, &slice, &subslice); iter < (2 * 32); iter++
, intel_gt_mcr_get_ss_steering(ee->engine->gt, iter, &
slice, &subslice)) if (!(( (((&(ee->engine->gt->
i915)->__runtime)->graphics.ip.ver) << 8 | ((&
(ee->engine->gt->i915)->__runtime)->graphics.ip
.rel)) >= ((12) << 8 | (50)) ? intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, 0, iter) : intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, slice, subslice))
)) {} else
478 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
479 slice, subslice,i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
480 ee->instdone.sampler[slice][subslice])i915_error_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.sampler[slice][subslice])
;
481
482 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)for (iter = 0, intel_gt_mcr_get_ss_steering(ee->engine->
gt, 0, &slice, &subslice); iter < (2 * 32); iter++
, intel_gt_mcr_get_ss_steering(ee->engine->gt, iter, &
slice, &subslice)) if (!(( (((&(ee->engine->gt->
i915)->__runtime)->graphics.ip.ver) << 8 | ((&
(ee->engine->gt->i915)->__runtime)->graphics.ip
.rel)) >= ((12) << 8 | (50)) ? intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, 0, iter) : intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, slice, subslice))
)) {} else
483 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
484 slice, subslice,i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
485 ee->instdone.row[slice][subslice])i915_error_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice
, subslice, ee->instdone.row[slice][subslice])
;
486
487 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) < 12)
488 return;
489
490 if (GRAPHICS_VER_FULL(m->i915)(((&(m->i915)->__runtime)->graphics.ip.ver) <<
8 | ((&(m->i915)->__runtime)->graphics.ip.rel))
>= IP_VER(12, 55)((12) << 8 | (55))) {
491 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)for (iter = 0, intel_gt_mcr_get_ss_steering(ee->engine->
gt, 0, &slice, &subslice); iter < (2 * 32); iter++
, intel_gt_mcr_get_ss_steering(ee->engine->gt, iter, &
slice, &subslice)) if (!(( (((&(ee->engine->gt->
i915)->__runtime)->graphics.ip.ver) << 8 | ((&
(ee->engine->gt->i915)->__runtime)->graphics.ip
.rel)) >= ((12) << 8 | (50)) ? intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, 0, iter) : intel_sseu_has_subslice
(&(ee->engine->gt)->info.sseu, slice, subslice))
)) {} else
492 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",i915_error_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n"
, slice, subslice, ee->instdone.geom_svg[slice][subslice])
493 slice, subslice,i915_error_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n"
, slice, subslice, ee->instdone.geom_svg[slice][subslice])
494 ee->instdone.geom_svg[slice][subslice])i915_error_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n"
, slice, subslice, ee->instdone.geom_svg[slice][subslice])
;
495 }
496
497 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", ee->
instdone.slice_common_extra[0])
498 ee->instdone.slice_common_extra[0])i915_error_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", ee->
instdone.slice_common_extra[0])
;
499 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",i915_error_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", ee->
instdone.slice_common_extra[1])
500 ee->instdone.slice_common_extra[1])i915_error_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", ee->
instdone.slice_common_extra[1])
;
501}
502
503static void error_print_request(struct drm_i915_error_state_buf *m,
504 const char *prefix,
505 const struct i915_request_coredump *erq)
506{
507 if (!erq->seqno)
508 return;
509
510 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
511 prefix, erq->pid, erq->context, erq->seqno,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
512 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
513 &erq->flags) ? "!" : "",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
514 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
515 &erq->flags) ? "+" : "",i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
516 erq->sched_attr.priority,i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
517 erq->head, erq->tail)i915_error_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n"
, prefix, erq->pid, erq->context, erq->seqno, test_bit
(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &erq->flags
) ? "+" : "", erq->sched_attr.priority, erq->head, erq->
tail)
;
518}
519
520static void error_print_context(struct drm_i915_error_state_buf *m,
521 const char *header,
522 const struct i915_gem_context_coredump *ctx)
523{
524 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime, ctx->
avg_runtime)
525 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime, ctx->
avg_runtime)
526 ctx->guilty, ctx->active,i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime, ctx->
avg_runtime)
527 ctx->total_runtime, ctx->avg_runtime)i915_error_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n"
, header, ctx->comm, ctx->pid, ctx->sched_attr.priority
, ctx->guilty, ctx->active, ctx->total_runtime, ctx->
avg_runtime)
;
528}
529
530static struct i915_vma_coredump *
531__find_vma(struct i915_vma_coredump *vma, const char *name)
532{
533 while (vma) {
534 if (strcmp(vma->name, name) == 0)
535 return vma;
536 vma = vma->next;
537 }
538
539 return NULL((void *)0);
540}
541
542struct i915_vma_coredump *
543intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
544{
545 return __find_vma(ee->vma, "batch");
546}
547
548static void error_print_engine(struct drm_i915_error_state_buf *m,
549 const struct intel_engine_coredump *ee)
550{
551 struct i915_vma_coredump *batch;
552 int n;
553
554 err_printf(m, "%s command stream:\n", ee->engine->name)i915_error_printf(m, "%s command stream:\n", ee->engine->
name)
;
555 err_printf(m, " CCID: 0x%08x\n", ee->ccid)i915_error_printf(m, " CCID: 0x%08x\n", ee->ccid);
556 err_printf(m, " START: 0x%08x\n", ee->start)i915_error_printf(m, " START: 0x%08x\n", ee->start);
557 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head)i915_error_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head
, ee->rq_head)
;
558 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",i915_error_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", ee
->tail, ee->rq_post, ee->rq_tail)
559 ee->tail, ee->rq_post, ee->rq_tail)i915_error_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n", ee
->tail, ee->rq_post, ee->rq_tail)
;
560 err_printf(m, " CTL: 0x%08x\n", ee->ctl)i915_error_printf(m, " CTL: 0x%08x\n", ee->ctl);
561 err_printf(m, " MODE: 0x%08x\n", ee->mode)i915_error_printf(m, " MODE: 0x%08x\n", ee->mode);
562 err_printf(m, " HWS: 0x%08x\n", ee->hws)i915_error_printf(m, " HWS: 0x%08x\n", ee->hws);
563 err_printf(m, " ACTHD: 0x%08x %08x\n",i915_error_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd
>>32), (u32)ee->acthd)
564 (u32)(ee->acthd>>32), (u32)ee->acthd)i915_error_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ee->acthd
>>32), (u32)ee->acthd)
;
565 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir)i915_error_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
566 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr)i915_error_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
567 err_printf(m, " ESR: 0x%08x\n", ee->esr)i915_error_printf(m, " ESR: 0x%08x\n", ee->esr);
568
569 error_print_instdone(m, ee);
570
571 batch = intel_gpu_error_find_batch(ee);
572 if (batch) {
573 u64 start = batch->gtt_offset;
574 u64 end = start + batch->gtt_size;
575
576 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
577 upper_32_bits(start), lower_32_bits(start),i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
578 upper_32_bits(end), lower_32_bits(end))i915_error_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
((u32)(((start) >> 16) >> 16)), ((u32)(start)), (
(u32)(((end) >> 16) >> 16)), ((u32)(end)))
;
579 }
580 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 4) {
581 err_printf(m, " BBADDR: 0x%08x_%08x\n",i915_error_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->
bbaddr>>32), (u32)ee->bbaddr)
582 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr)i915_error_printf(m, " BBADDR: 0x%08x_%08x\n", (u32)(ee->
bbaddr>>32), (u32)ee->bbaddr)
;
583 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate)i915_error_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
584 err_printf(m, " INSTPS: 0x%08x\n", ee->instps)i915_error_printf(m, " INSTPS: 0x%08x\n", ee->instps);
585 }
586 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm)i915_error_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
587 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),i915_error_printf(m, " FADDR: 0x%08x %08x\n", ((u32)(((ee->
faddr) >> 16) >> 16)), ((u32)(ee->faddr)))
588 lower_32_bits(ee->faddr))i915_error_printf(m, " FADDR: 0x%08x %08x\n", ((u32)(((ee->
faddr) >> 16) >> 16)), ((u32)(ee->faddr)))
;
589 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 6) {
590 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi)i915_error_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
591 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg)i915_error_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg
)
;
592 }
593 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 11) {
594 err_printf(m, " NOPID: 0x%08x\n", ee->nopid)i915_error_printf(m, " NOPID: 0x%08x\n", ee->nopid);
595 err_printf(m, " EXCC: 0x%08x\n", ee->excc)i915_error_printf(m, " EXCC: 0x%08x\n", ee->excc);
596 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl)i915_error_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
597 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop)i915_error_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
598 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl)i915_error_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl
)
;
599 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi)i915_error_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi
)
;
600 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo)i915_error_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo
)
;
601 }
602 if (HAS_PPGTT(m->i915)(((&(m->i915)->__runtime)->ppgtt_type) != INTEL_PPGTT_NONE
)
) {
603 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode)i915_error_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode
)
;
604
605 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 8) {
606 int i;
607 for (i = 0; i < 4; i++)
608 err_printf(m, " PDP%d: 0x%016llx\n",i915_error_printf(m, " PDP%d: 0x%016llx\n", i, ee->vm_info
.pdp[i])
609 i, ee->vm_info.pdp[i])i915_error_printf(m, " PDP%d: 0x%016llx\n", i, ee->vm_info
.pdp[i])
;
610 } else {
611 err_printf(m, " PP_DIR_BASE: 0x%08x\n",i915_error_printf(m, " PP_DIR_BASE: 0x%08x\n", ee->vm_info
.pp_dir_base)
612 ee->vm_info.pp_dir_base)i915_error_printf(m, " PP_DIR_BASE: 0x%08x\n", ee->vm_info
.pp_dir_base)
;
613 }
614 }
615
616 for (n = 0; n < ee->num_ports; n++) {
617 err_printf(m, " ELSP[%d]:", n)i915_error_printf(m, " ELSP[%d]:", n);
618 error_print_request(m, " ", &ee->execlist[n]);
619 }
620}
621
622void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
623{
624 va_list args;
625
626 va_start(args, f)__builtin_va_start((args), f);
627 i915_error_vprintf(e, f, args);
628 va_end(args)__builtin_va_end((args));
629}
630
631void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
632 const struct intel_engine_cs *engine,
633 const struct i915_vma_coredump *vma)
634{
635 STUB()do { printf("%s: stub\n", __func__); } while(0);
636#ifdef notyet
637 char out[ASCII85_BUFSZ];
638 struct vm_page *page;
639
640 if (!vma)
641 return;
642
643 err_printf(m, "%s --- %s = 0x%08x %08x\n",i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
644 engine ? engine->name : "global", vma->name,i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
645 upper_32_bits(vma->gtt_offset),i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
646 lower_32_bits(vma->gtt_offset))i915_error_printf(m, "%s --- %s = 0x%08x %08x\n", engine ? engine
->name : "global", vma->name, ((u32)(((vma->gtt_offset
) >> 16) >> 16)), ((u32)(vma->gtt_offset)))
;
647
648 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K(1ULL << (12)))
649 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes)i915_error_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes
)
;
650
651 err_compression_marker(m);
652 list_for_each_entry(page, &vma->page_list, lru)for (page = ({ const __typeof( ((__typeof(*page) *)0)->lru
) *__mptr = ((&vma->page_list)->next); (__typeof(*
page) *)( (char *)__mptr - __builtin_offsetof(__typeof(*page)
, lru) );}); &page->lru != (&vma->page_list); page
= ({ const __typeof( ((__typeof(*page) *)0)->lru ) *__mptr
= (page->lru.next); (__typeof(*page) *)( (char *)__mptr -
__builtin_offsetof(__typeof(*page), lru) );}))
{
653 int i, len;
654 const u32 *addr = page_address(page);
655
656 len = PAGE_SIZE(1 << 12);
657 if (page == list_last_entry(&vma->page_list, typeof(*page), lru)({ const __typeof( ((typeof(*page) *)0)->lru ) *__mptr = (
(&vma->page_list)->prev); (typeof(*page) *)( (char *
)__mptr - __builtin_offsetof(typeof(*page), lru) );})
)
658 len -= vma->unused;
659 len = ascii85_encode_len(len);
660
661 for (i = 0; i < len; i++)
662 err_puts(m, ascii85_encode(addr[i], out))i915_error_puts(m, ascii85_encode(addr[i], out));
663 }
664 err_puts(m, "\n")i915_error_puts(m, "\n");
665#endif
666}
667
668static void err_print_capabilities(struct drm_i915_error_state_buf *m,
669 struct i915_gpu_coredump *error)
670{
671 struct drm_printer p = i915_error_printer(m);
672
673 intel_device_info_print(&error->device_info, &error->runtime_info, &p);
674 intel_driver_caps_print(&error->driver_caps, &p);
675}
676
677static void err_print_params(struct drm_i915_error_state_buf *m,
678 const struct i915_params *params)
679{
680 struct drm_printer p = i915_error_printer(m);
681
682 i915_params_dump(params, &p);
683}
684
685static void err_print_pciid(struct drm_i915_error_state_buf *m,
686 struct drm_i915_privateinteldrm_softc *i915)
687{
688 struct pci_dev *pdev = i915->drm.pdev;
689
690 err_printf(m, "PCI ID: 0x%04x\n", pdev->device)i915_error_printf(m, "PCI ID: 0x%04x\n", pdev->device);
691 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision)i915_error_printf(m, "PCI Revision: 0x%02x\n", pdev->revision
)
;
692 err_printf(m, "PCI Subsystem: %04x:%04x\n",i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
693 pdev->subsystem_vendor,i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
694 pdev->subsystem_device)i915_error_printf(m, "PCI Subsystem: %04x:%04x\n", pdev->subsystem_vendor
, pdev->subsystem_device)
;
695}
696
697static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
698 const char *name,
699 const struct intel_ctb_coredump *ctb)
700{
701 if (!ctb->size)
702 return;
703
704 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",i915_error_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n"
, name, ctb->raw_status, ctb->raw_head, ctb->raw_tail
, ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset
, ctb->size)
705 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,i915_error_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n"
, name, ctb->raw_status, ctb->raw_head, ctb->raw_tail
, ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset
, ctb->size)
706 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size)i915_error_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n"
, name, ctb->raw_status, ctb->raw_head, ctb->raw_tail
, ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset
, ctb->size)
;
707}
708
709static void err_print_uc(struct drm_i915_error_state_buf *m,
710 const struct intel_uc_coredump *error_uc)
711{
712 struct drm_printer p = i915_error_printer(m);
713
714 intel_uc_fw_dump(&error_uc->guc_fw, &p);
715 intel_uc_fw_dump(&error_uc->huc_fw, &p);
716 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp)i915_error_printf(m, "GuC timestamp: 0x%08x\n", error_uc->
guc.timestamp)
;
717 intel_gpu_error_print_vma(m, NULL((void *)0), error_uc->guc.vma_log);
718 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence)i915_error_printf(m, "GuC CTB fence: %d\n", error_uc->guc.
last_fence)
;
719 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
720 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
721 intel_gpu_error_print_vma(m, NULL((void *)0), error_uc->guc.vma_ctb);
722}
723
724static void err_free_sgl(struct scatterlist *sgl)
725{
726 STUB()do { printf("%s: stub\n", __func__); } while(0);
727#ifdef notyet
728 while (sgl) {
729 struct scatterlist *sg;
730
731 for (sg = sgl; !sg_is_chain(sg)0; sg++) {
732 kfree(sg_virt(sg));
733 if (sg_is_last(sg)((sg)->end))
734 break;
735 }
736
737 sg = sg_is_last(sg)((sg)->end) ? NULL((void *)0) : sg_chain_ptr(sg)((void *)0);
738 free_page((unsigned long)sgl);
739 sgl = sg;
740 }
741#endif
742}
743
744static void err_print_gt_info(struct drm_i915_error_state_buf *m,
745 struct intel_gt_coredump *gt)
746{
747 struct drm_printer p = i915_error_printer(m);
748
749 intel_gt_info_print(&gt->info, &p);
750 intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
751}
752
753static void err_print_gt_display(struct drm_i915_error_state_buf *m,
754 struct intel_gt_coredump *gt)
755{
756 err_printf(m, "IER: 0x%08x\n", gt->ier)i915_error_printf(m, "IER: 0x%08x\n", gt->ier);
757 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr)i915_error_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
758}
759
760static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
761 struct intel_gt_coredump *gt)
762{
763 int i;
764
765 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake))i915_error_printf(m, "GT awake: %s\n", str_yes_no(gt->awake
))
;
766 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",i915_error_printf(m, "CS timestamp frequency: %u Hz, %d ns\n"
, gt->clock_frequency, gt->clock_period_ns)
767 gt->clock_frequency, gt->clock_period_ns)i915_error_printf(m, "CS timestamp frequency: %u Hz, %d ns\n"
, gt->clock_frequency, gt->clock_period_ns)
;
768 err_printf(m, "EIR: 0x%08x\n", gt->eir)i915_error_printf(m, "EIR: 0x%08x\n", gt->eir);
769 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er)i915_error_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
770
771 for (i = 0; i < gt->ngtier; i++)
772 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i])i915_error_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i
])
;
773}
774
775static void err_print_gt_global(struct drm_i915_error_state_buf *m,
776 struct intel_gt_coredump *gt)
777{
778 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake)i915_error_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
779
780 if (IS_GRAPHICS_VER(m->i915, 6, 11)(((&(m->i915)->__runtime)->graphics.ip.ver) >=
(6) && ((&(m->i915)->__runtime)->graphics
.ip.ver) <= (11))
) {
781 err_printf(m, "ERROR: 0x%08x\n", gt->error)i915_error_printf(m, "ERROR: 0x%08x\n", gt->error);
782 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg)i915_error_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
783 }
784
785 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 8)
786 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",i915_error_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", gt->
fault_data1, gt->fault_data0)
787 gt->fault_data1, gt->fault_data0)i915_error_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", gt->
fault_data1, gt->fault_data0)
;
788
789 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) == 7)
790 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int)i915_error_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
791
792 if (IS_GRAPHICS_VER(m->i915, 8, 11)(((&(m->i915)->__runtime)->graphics.ip.ver) >=
(8) && ((&(m->i915)->__runtime)->graphics
.ip.ver) <= (11))
)
793 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache)i915_error_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache
)
;
794
795 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) == 12)
796 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err)i915_error_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
797
798 if (GRAPHICS_VER(m->i915)((&(m->i915)->__runtime)->graphics.ip.ver) >= 12) {
799 int i;
800
801 for (i = 0; i < I915_MAX_SFC(8 / 2); i++) {
802 /*
803 * SFC_DONE resides in the VD forcewake domain, so it
804 * only exists if the corresponding VCS engine is
805 * present.
806 */
807 if ((gt->_gt->info.sfc_mask & BIT(i)(1UL << (i))) == 0 ||
808 !HAS_ENGINE(gt->_gt, _VCS(i * 2))(((gt->_gt)->info.engine_mask) & (1UL << ((VCS0
+ (i * 2)))))
)
809 continue;
810
811 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,i915_error_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, gt->sfc_done
[i])
812 gt->sfc_done[i])i915_error_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, gt->sfc_done
[i])
;
813 }
814
815 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done)i915_error_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
816 }
817}
818
819static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
820 struct intel_gt_coredump *gt)
821{
822 int i;
823
824 for (i = 0; i < gt->nfence; i++)
825 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i])i915_error_printf(m, " fence[%d] = %08llx\n", i, gt->fence
[i])
;
826}
827
828static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
829 struct intel_gt_coredump *gt)
830{
831 const struct intel_engine_coredump *ee;
832
833 for (ee = gt->engine; ee; ee = ee->next) {
834 const struct i915_vma_coredump *vma;
835
836 if (ee->guc_capture_node)
837 intel_guc_capture_print_engine_node(m, ee);
838 else
839 error_print_engine(m, ee);
840
841 err_printf(m, " hung: %u\n", ee->hung)i915_error_printf(m, " hung: %u\n", ee->hung);
842 err_printf(m, " engine reset count: %u\n", ee->reset_count)i915_error_printf(m, " engine reset count: %u\n", ee->reset_count
)
;
843 error_print_context(m, " Active context: ", &ee->context);
844
845 for (vma = ee->vma; vma; vma = vma->next)
846 intel_gpu_error_print_vma(m, ee->engine, vma);
847 }
848
849}
850
851static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
852 struct i915_gpu_coredump *error)
853{
854 const struct intel_engine_coredump *ee;
855 struct timespec64 ts;
856
857 if (*error->error_msg)
858 err_printf(m, "%s\n", error->error_msg)i915_error_printf(m, "%s\n", error->error_msg);
859#ifdef __linux__
860 err_printf(m, "Kernel: %s %s\n",i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
861 init_utsname()->release,i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
862 init_utsname()->machine)i915_error_printf(m, "Kernel: %s %s\n", init_utsname()->release
, init_utsname()->machine)
;
863#else
864 extern char machine[];
865 err_printf(m, "Kernel: %s %s\n",i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine)
866 osrelease,i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine)
867 machine)i915_error_printf(m, "Kernel: %s %s\n", osrelease, machine);
868#endif
869 err_printf(m, "Driver: %s\n", DRIVER_DATE)i915_error_printf(m, "Driver: %s\n", "20201103");
870 ts = ktime_to_timespec64(error->time);
871 err_printf(m, "Time: %lld s %ld us\n",i915_error_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec,
ts.tv_nsec / 1000L)
872 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec,
ts.tv_nsec / 1000L)
;
873 ts = ktime_to_timespec64(error->boottime);
874 err_printf(m, "Boottime: %lld s %ld us\n",i915_error_printf(m, "Boottime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
875 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Boottime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
;
876 ts = ktime_to_timespec64(error->uptime);
877 err_printf(m, "Uptime: %lld s %ld us\n",i915_error_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
878 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC)i915_error_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec
, ts.tv_nsec / 1000L)
;
879 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",i915_error_printf(m, "Capture: %lu jiffies; %d ms ago\n", error
->capture, jiffies_to_msecs(jiffies - error->capture))
880 error->capture, jiffies_to_msecs(jiffies - error->capture))i915_error_printf(m, "Capture: %lu jiffies; %d ms ago\n", error
->capture, jiffies_to_msecs(jiffies - error->capture))
;
881
882 for (ee = error->gt ? error->gt->engine : NULL((void *)0); ee; ee = ee->next)
883 err_printf(m, "Active process (on ring %s): %s [%d]\n",i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
884 ee->engine->name,i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
885 ee->context.comm,i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
886 ee->context.pid)i915_error_printf(m, "Active process (on ring %s): %s [%d]\n"
, ee->engine->name, ee->context.comm, ee->context
.pid)
;
887
888 err_printf(m, "Reset count: %u\n", error->reset_count)i915_error_printf(m, "Reset count: %u\n", error->reset_count
)
;
889 err_printf(m, "Suspend count: %u\n", error->suspend_count)i915_error_printf(m, "Suspend count: %u\n", error->suspend_count
)
;
890 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform))i915_error_printf(m, "Platform: %s\n", intel_platform_name(error
->device_info.platform))
;
891 err_printf(m, "Subplatform: 0x%x\n",i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
892 intel_subplatform(&error->runtime_info,i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
893 error->device_info.platform))i915_error_printf(m, "Subplatform: 0x%x\n", intel_subplatform
(&error->runtime_info, error->device_info.platform)
)
;
894 err_print_pciid(m, m->i915);
895
896 err_printf(m, "IOMMU enabled?: %d\n", error->iommu)i915_error_printf(m, "IOMMU enabled?: %d\n", error->iommu);
897
898 intel_dmc_print_error_state(m, m->i915);
899
900 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock))i915_error_printf(m, "RPM wakelock: %s\n", str_yes_no(error->
wakelock))
;
901 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended))i915_error_printf(m, "PM suspended: %s\n", str_yes_no(error->
suspended))
;
902
903 if (error->gt) {
904 bool_Bool print_guc_capture = false0;
905
906 if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
907 print_guc_capture = true1;
908
909 err_print_gt_display(m, error->gt);
910 err_print_gt_global_nonguc(m, error->gt);
911 err_print_gt_fences(m, error->gt);
912
913 /*
914 * GuC dumped global, eng-class and eng-instance registers together
915 * as part of engine state dump so we print in err_print_gt_engines
916 */
917 if (!print_guc_capture)
918 err_print_gt_global(m, error->gt);
919
920 err_print_gt_engines(m, error->gt);
921
922 if (error->gt->uc)
923 err_print_uc(m, error->gt->uc);
924
925 err_print_gt_info(m, error->gt);
926 }
927
928 if (error->overlay)
929 intel_overlay_print_error_state(m, error->overlay);
930
931 err_print_capabilities(m, error);
932 err_print_params(m, &error->params);
933}
934
935static int err_print_to_sgl(struct i915_gpu_coredump *error)
936{
937 struct drm_i915_error_state_buf m;
938
939 if (IS_ERR(error))
940 return PTR_ERR(error);
941
942 if (READ_ONCE(error->sgl)({ typeof(error->sgl) __tmp = *(volatile typeof(error->
sgl) *)&(error->sgl); membar_datadep_consumer(); __tmp
; })
)
943 return 0;
944
945 memset(&m, 0, sizeof(m))__builtin_memset((&m), (0), (sizeof(m)));
946 m.i915 = error->i915;
947
948 __err_print_to_sgl(&m, error);
949
950 if (m.buf) {
951 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
952 m.bytes = 0;
953 m.buf = NULL((void *)0);
954 }
955 if (m.cur) {
956 GEM_BUG_ON(m.end < m.cur)((void)0);
957 sg_mark_end(m.cur - 1);
958 }
959 GEM_BUG_ON(m.sgl && !m.cur)((void)0);
960
961 if (m.err) {
962 err_free_sgl(m.sgl);
963 return m.err;
964 }
965
966 if (cmpxchg(&error->sgl, NULL, m.sgl)__sync_val_compare_and_swap(&error->sgl, ((void *)0), m
.sgl)
)
967 err_free_sgl(m.sgl);
968
969 return 0;
970}
971
972ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
973 char *buf, loff_t off, size_t rem)
974{
975 STUB()do { printf("%s: stub\n", __func__); } while(0);
976 return -ENOSYS78;
977#ifdef notyet
978 struct scatterlist *sg;
979 size_t count;
980 loff_t pos;
981 int err;
982
983 if (!error || !rem)
984 return 0;
985
986 err = err_print_to_sgl(error);
987 if (err)
988 return err;
989
990 sg = READ_ONCE(error->fit)({ typeof(error->fit) __tmp = *(volatile typeof(error->
fit) *)&(error->fit); membar_datadep_consumer(); __tmp
; })
;
991 if (!sg || off < sg->dma_address)
992 sg = error->sgl;
993 if (!sg)
994 return 0;
995
996 pos = sg->dma_address;
997 count = 0;
998 do {
999 size_t len, start;
1000
1001 if (sg_is_chain(sg)0) {
1002 sg = sg_chain_ptr(sg)((void *)0);
1003 GEM_BUG_ON(sg_is_chain(sg))((void)0);
1004 }
1005
1006 len = sg->length;
1007 if (pos + len <= off) {
1008 pos += len;
1009 continue;
1010 }
1011
1012 start = sg->offset;
1013 if (pos < off) {
1014 GEM_BUG_ON(off - pos > len)((void)0);
1015 len -= off - pos;
1016 start += off - pos;
1017 pos = off;
1018 }
1019
1020 len = min(len, rem)(((len)<(rem))?(len):(rem));
1021 GEM_BUG_ON(!len || len > sg->length)((void)0);
1022
1023 memcpy(buf, page_address(sg_page(sg)) + start, len)__builtin_memcpy((buf), (page_address(sg_page(sg)) + start), (
len))
;
1024
1025 count += len;
1026 pos += len;
1027
1028 buf += len;
1029 rem -= len;
1030 if (!rem) {
1031 WRITE_ONCE(error->fit, sg)({ typeof(error->fit) __tmp = (sg); *(volatile typeof(error
->fit) *)&(error->fit) = __tmp; __tmp; })
;
1032 break;
1033 }
1034 } while (!sg_is_last(sg++)((sg++)->end));
1035
1036 return count;
1037#endif
1038}
1039
1040static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1041{
1042 STUB()do { printf("%s: stub\n", __func__); } while(0);
1043#ifdef notyet
1044 while (vma) {
1045 struct i915_vma_coredump *next = vma->next;
1046 struct vm_page *page, *n;
1047
1048 list_for_each_entry_safe(page, n, &vma->page_list, lru)for (page = ({ const __typeof( ((__typeof(*page) *)0)->lru
) *__mptr = ((&vma->page_list)->next); (__typeof(*
page) *)( (char *)__mptr - __builtin_offsetof(__typeof(*page)
, lru) );}), n = ({ const __typeof( ((__typeof(*page) *)0)->
lru ) *__mptr = (page->lru.next); (__typeof(*page) *)( (char
*)__mptr - __builtin_offsetof(__typeof(*page), lru) );}); &
page->lru != (&vma->page_list); page = n, n = ({ const
__typeof( ((__typeof(*n) *)0)->lru ) *__mptr = (n->lru
.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*n), lru) );}))
{
1049 list_del_init(&page->lru);
1050 __free_page(page);
1051 }
1052
1053 kfree(vma);
1054 vma = next;
1055 }
1056#endif
1057}
1058
1059static void cleanup_params(struct i915_gpu_coredump *error)
1060{
1061 i915_params_free(&error->params);
1062}
1063
1064static void cleanup_uc(struct intel_uc_coredump *uc)
1065{
1066 kfree(uc->guc_fw.file_selected.path);
1067 kfree(uc->huc_fw.file_selected.path);
1068 kfree(uc->guc_fw.file_wanted.path);
1069 kfree(uc->huc_fw.file_wanted.path);
1070 i915_vma_coredump_free(uc->guc.vma_log);
1071 i915_vma_coredump_free(uc->guc.vma_ctb);
1072
1073 kfree(uc);
1074}
1075
1076static void cleanup_gt(struct intel_gt_coredump *gt)
1077{
1078 while (gt->engine) {
1079 struct intel_engine_coredump *ee = gt->engine;
1080
1081 gt->engine = ee->next;
1082
1083 i915_vma_coredump_free(ee->vma);
1084 intel_guc_capture_free_node(ee);
1085 kfree(ee);
1086 }
1087
1088 if (gt->uc)
1089 cleanup_uc(gt->uc);
1090
1091 kfree(gt);
1092}
1093
1094void __i915_gpu_coredump_free(struct kref *error_ref)
1095{
1096 struct i915_gpu_coredump *error =
1097 container_of(error_ref, typeof(*error), ref)({ const __typeof( ((typeof(*error) *)0)->ref ) *__mptr = (
error_ref); (typeof(*error) *)( (char *)__mptr - __builtin_offsetof
(typeof(*error), ref) );})
;
1098
1099 while (error->gt) {
1100 struct intel_gt_coredump *gt = error->gt;
1101
1102 error->gt = gt->next;
1103 cleanup_gt(gt);
1104 }
1105
1106 kfree(error->overlay);
1107
1108 cleanup_params(error);
1109
1110 err_free_sgl(error->sgl);
1111 kfree(error);
1112}
1113
1114static struct i915_vma_coredump *
1115i915_vma_coredump_create(const struct intel_gt *gt,
1116 const struct i915_vma_resource *vma_res,
1117 struct i915_vma_compress *compress,
1118 const char *name)
1119
1120{
1121 STUB()do { printf("%s: stub\n", __func__); } while(0);
1122 return NULL((void *)0);
1123#ifdef notyet
1124 struct i915_ggtt *ggtt = gt->ggtt;
1125 const u64 slot = ggtt->error_capture.start;
1126 struct i915_vma_coredump *dst;
1127 struct sgt_iter iter;
1128 int ret;
1129
1130 might_sleep()assertwaitok();
1131
1132 if (!vma_res || !vma_res->bi.pages || !compress)
1133 return NULL((void *)0);
1134
1135 dst = kmalloc(sizeof(*dst), ALLOW_FAIL(0x0002 | 0 | 0));
1136 if (!dst)
1137 return NULL((void *)0);
1138
1139 if (!compress_start(compress)) {
1140 kfree(dst);
1141 return NULL((void *)0);
1142 }
1143
1144 INIT_LIST_HEAD(&dst->page_list);
1145 strlcpy(dst->name, name, sizeof(dst->name));
1146 dst->next = NULL((void *)0);
1147
1148 dst->gtt_offset = vma_res->start;
1149 dst->gtt_size = vma_res->node_size;
1150 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1151 dst->unused = 0;
1152
1153 ret = -EINVAL22;
1154 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1155 void __iomem *s;
1156 dma_addr_t dma;
1157
1158 for_each_sgt_daddr(dma, iter, vma_res->bi.pages)for ((iter) = __sgt_iter((vma_res->bi.pages)->sgl, 1); (
(dma) = (iter).dma + (iter).curr), (iter).sgp; (((iter).curr +=
((1ULL << (12)))) >= (iter).max) ? (iter) = __sgt_iter
(__sg_next((iter).sgp), 1), 0 : 0)
{
1159 mutex_lock(&ggtt->error_mutex)rw_enter_write(&ggtt->error_mutex);
1160 if (ggtt->vm.raw_insert_page)
1161 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1162 I915_CACHE_NONE, 0);
1163 else
1164 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1165 I915_CACHE_NONE, 0);
1166 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
1167
1168 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE(1 << 12));
1169 ret = compress_page(compress,
1170 (void __force *)s, dst,
1171 true1);
1172 io_mapping_unmap(s);
1173
1174 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
1175 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE(1 << 12));
1176 mutex_unlock(&ggtt->error_mutex)rw_exit_write(&ggtt->error_mutex);
1177 if (ret)
1178 break;
1179 }
1180 } else if (vma_res->bi.lmem) {
1181 struct intel_memory_region *mem = vma_res->mr;
1182 dma_addr_t dma;
1183
1184 for_each_sgt_daddr(dma, iter, vma_res->bi.pages)for ((iter) = __sgt_iter((vma_res->bi.pages)->sgl, 1); (
(dma) = (iter).dma + (iter).curr), (iter).sgp; (((iter).curr +=
((1ULL << (12)))) >= (iter).max) ? (iter) = __sgt_iter
(__sg_next((iter).sgp), 1), 0 : 0)
{
1185 dma_addr_t offset = dma - mem->region.start;
1186 void __iomem *s;
1187
1188 if (offset + PAGE_SIZE(1 << 12) > mem->io_size) {
1189 ret = -EINVAL22;
1190 break;
1191 }
1192
1193 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE(1 << 12));
1194 ret = compress_page(compress,
1195 (void __force *)s, dst,
1196 true1);
1197 io_mapping_unmap(s);
1198 if (ret)
1199 break;
1200 }
1201 } else {
1202 struct vm_page *page;
1203
1204 for_each_sgt_page(page, iter, vma_res->bi.pages)for ((iter) = __sgt_iter((vma_res->bi.pages)->sgl, 0); (
(page) = (iter).pfn == 0 ? ((void *)0) : (PHYS_TO_VM_PAGE(((paddr_t
)((iter).pfn + ((iter).curr >> 12)) << 12)))); ((
(iter).curr += (1 << 12)) >= (iter).max) ? (iter) = __sgt_iter
(__sg_next((iter).sgp), 0), 0 : 0)
{
1205 void *s;
1206
1207 drm_clflush_pages(&page, 1);
1208
1209 s = kmap(page);
1210 ret = compress_page(compress, s, dst, false0);
1211 kunmap(page);
1212
1213 drm_clflush_pages(&page, 1);
1214
1215 if (ret)
1216 break;
1217 }
1218 }
1219
1220 if (ret || compress_flush(compress, dst)) {
1221 struct vm_page *page, *n;
1222
1223 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru)for (page = ({ const __typeof( ((__typeof(*page) *)0)->lru
) *__mptr = ((&dst->page_list)->prev); (__typeof(*
page) *)( (char *)__mptr - __builtin_offsetof(__typeof(*page)
, lru) );}), n = ({ const __typeof( ((__typeof(*page) *)0)->
lru ) *__mptr = ((page)->lru.prev); (__typeof(*page) *)( (
char *)__mptr - __builtin_offsetof(__typeof(*page), lru) );})
; &(page)->lru != (&dst->page_list); page = n, n
= ({ const __typeof( ((__typeof(*n) *)0)->lru ) *__mptr =
(n->lru.prev); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*n), lru) );}))
{
1224 list_del_init(&page->lru);
1225 pool_free(&compress->pool, page_address(page));
1226 }
1227
1228 kfree(dst);
1229 dst = NULL((void *)0);
1230 }
1231 compress_finish(compress);
1232
1233 return dst;
1234#endif
1235}
1236
1237static void gt_record_fences(struct intel_gt_coredump *gt)
1238{
1239 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1240 struct intel_uncore *uncore = gt->_gt->uncore;
1241 int i;
1242
1243 if (GRAPHICS_VER(uncore->i915)((&(uncore->i915)->__runtime)->graphics.ip.ver) >= 6) {
1244 for (i = 0; i < ggtt->num_fences; i++)
1245 gt->fence[i] =
1246 intel_uncore_read64(uncore,
1247 FENCE_REG_GEN6_LO(i)((const i915_reg_t){ .reg = (0x100000 + (i) * 8) }));
1248 } else if (GRAPHICS_VER(uncore->i915)((&(uncore->i915)->__runtime)->graphics.ip.ver) >= 4) {
1249 for (i = 0; i < ggtt->num_fences; i++)
1250 gt->fence[i] =
1251 intel_uncore_read64(uncore,
1252 FENCE_REG_965_LO(i)((const i915_reg_t){ .reg = (0x03000 + (i) * 8) }));
1253 } else {
1254 for (i = 0; i < ggtt->num_fences; i++)
1255 gt->fence[i] =
1256 intel_uncore_read(uncore, FENCE_REG(i)((const i915_reg_t){ .reg = (0x2000 + (((i) & 8) <<
9) + ((i) & 7) * 4) })
);
1257 }
1258 gt->nfence = i;
1259}
1260
1261static void engine_record_registers(struct intel_engine_coredump *ee)
1262{
1263 const struct intel_engine_cs *engine = ee->engine;
1264 struct drm_i915_privateinteldrm_softc *i915 = engine->i915;
1265
1266 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
1267 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x50) }))
;
1268
1269 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12)
1270 ee->fault_reg = intel_uncore_read(engine->uncore,
1271 GEN12_RING_FAULT_REG((const i915_reg_t){ .reg = (0xcec4) }));
1272 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8)
1273 ee->fault_reg = intel_uncore_read(engine->uncore,
1274 GEN8_RING_FAULT_REG((const i915_reg_t){ .reg = (0x4094) }));
1275 else
1276 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine)intel_uncore_read((engine)->uncore, ((const i915_reg_t){ .
reg = ((((const u32 []){ 0x4094, 0x4194, 0x4394, 0x4294 })[(engine
)->class])) }))
;
1277 }
1278
1279 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 4) {
1280 ee->esr = ENGINE_READ(engine, RING_ESR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xb8) }))
;
1281 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x78) }))
;
1282 ee->ipeir = ENGINE_READ(engine, RING_IPEIR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x64) }))
;
1283 ee->ipehr = ENGINE_READ(engine, RING_IPEHR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x68) }))
;
1284 ee->instps = ENGINE_READ(engine, RING_INSTPS)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x70) }))
;
1285 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x140) }))
;
1286 ee->ccid = ENGINE_READ(engine, CCID)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x180) }))
;
1287 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
1288 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x60) }))
<< 32;
1289 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x168) }))
<< 32;
1290 }
1291 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x110) }))
;
1292 } else {
1293 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xd0) }))
;
1294 ee->ipeir = ENGINE_READ(engine, IPEIR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x88) }))
;
1295 ee->ipehr = ENGINE_READ(engine, IPEHR)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x8c) }))
;
1296 }
1297
1298 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 11) {
1299 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xc4) }))
;
1300 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x20c) }))
;
1301 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x244) }))
;
1302 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x60) }))
;
1303 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x78) }))
;
1304 ee->nopid = ENGINE_READ(engine, RING_NOPID)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x94) }))
;
1305 ee->excc = ENGINE_READ(engine, RING_EXCC)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x28) }))
;
1306 }
1307
1308 intel_engine_get_instdone(engine, &ee->instdone);
1309
1310 ee->instpm = ENGINE_READ(engine, RING_INSTPM)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0xc0) }))
;
1311 ee->acthd = intel_engine_get_active_head(engine);
1312 ee->start = ENGINE_READ(engine, RING_START)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x38) }))
;
1313 ee->head = ENGINE_READ(engine, RING_HEAD)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x34) }))
;
1314 ee->tail = ENGINE_READ(engine, RING_TAIL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x30) }))
;
1315 ee->ctl = ENGINE_READ(engine, RING_CTL)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x3c) }))
;
1316 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) > 2)
1317 ee->mode = ENGINE_READ(engine, RING_MI_MODE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x9c) }))
;
1318
1319 if (!HWS_NEEDS_PHYSICAL(i915)((&(i915)->__info)->hws_needs_physical)) {
1320 i915_reg_t mmio;
1321
1322 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) {
1323 switch (engine->id) {
1324 default:
1325 MISSING_CASE(engine->id)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "engine->id", (long)(engine->id)); __builtin_expect(!
!(__ret), 0); })
;
1326 fallthroughdo {} while (0);
1327 case RCS0:
1328 mmio = RENDER_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4080) });
1329 break;
1330 case BCS0:
1331 mmio = BLT_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4280) });
1332 break;
1333 case VCS0:
1334 mmio = BSD_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4180) });
1335 break;
1336 case VECS0:
1337 mmio = VEBOX_HWS_PGA_GEN7((const i915_reg_t){ .reg = (0x4380) });
1338 break;
1339 }
1340 } else if (GRAPHICS_VER(engine->i915)((&(engine->i915)->__runtime)->graphics.ip.ver) == 6) {
1341 mmio = RING_HWS_PGA_GEN6(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x2080)
})
;
1342 } else {
1343 /* XXX: gen8 returns to sanity */
1344 mmio = RING_HWS_PGA(engine->mmio_base)((const i915_reg_t){ .reg = ((engine->mmio_base) + 0x80) }
)
;
1345 }
1346
1347 ee->hws = intel_uncore_read(engine->uncore, mmio);
1348 }
1349
1350 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1351
1352 if (HAS_PPGTT(i915)(((&(i915)->__runtime)->ppgtt_type) != INTEL_PPGTT_NONE
)
) {
1353 int i;
1354
1355 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x29c) }))
;
1356
1357 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) {
1358 ee->vm_info.pp_dir_base =
1359 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x518) }))
;
1360 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7) {
1361 ee->vm_info.pp_dir_base =
1362 ENGINE_READ(engine, RING_PP_DIR_BASE)intel_uncore_read(((engine))->uncore, ((const i915_reg_t){
.reg = (((engine)->mmio_base) + 0x228) }))
;
1363 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
1364 u32 base = engine->mmio_base;
1365
1366 for (i = 0; i < 4; i++) {
1367 ee->vm_info.pdp[i] =
1368 intel_uncore_read(engine->uncore,
1369 GEN8_RING_PDP_UDW(base, i)((const i915_reg_t){ .reg = ((base) + 0x270 + (i) * 8 + 4) }));
1370 ee->vm_info.pdp[i] <<= 32;
1371 ee->vm_info.pdp[i] |=
1372 intel_uncore_read(engine->uncore,
1373 GEN8_RING_PDP_LDW(base, i)((const i915_reg_t){ .reg = ((base) + 0x270 + (i) * 8) }));
1374 }
1375 }
1376 }
1377}
1378
1379static void record_request(const struct i915_request *request,
1380 struct i915_request_coredump *erq)
1381{
1382 erq->flags = request->fence.flags;
1383 erq->context = request->fence.context;
1384 erq->seqno = request->fence.seqno;
1385 erq->sched_attr = request->sched.attr;
1386 erq->head = request->head;
1387 erq->tail = request->tail;
1388
1389 erq->pid = 0;
1390 rcu_read_lock();
1391 if (!intel_context_is_closed(request->context)) {
1392 const struct i915_gem_context *ctx;
1393
1394 ctx = rcu_dereference(request->context->gem_context)(request->context->gem_context);
1395 if (ctx)
1396#ifdef __linux__
1397 erq->pid = pid_nr(ctx->pid);
1398#else
1399 erq->pid = ctx->pid;
1400#endif
1401 }
1402 rcu_read_unlock();
1403}
1404
1405static void engine_record_execlists(struct intel_engine_coredump *ee)
1406{
1407 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1408 struct i915_request * const *port = el->active;
1409 unsigned int n = 0;
1410
1411 while (*port)
1412 record_request(*port++, &ee->execlist[n++]);
1413
1414 ee->num_ports = n;
1415}
1416
1417static bool_Bool record_context(struct i915_gem_context_coredump *e,
1418 const struct i915_request *rq)
1419{
1420 struct i915_gem_context *ctx;
1421 struct task_struct *task;
1422 bool_Bool simulated;
1423
1424 rcu_read_lock();
1425 ctx = rcu_dereference(rq->context->gem_context)(rq->context->gem_context);
1426 if (ctx && !kref_get_unless_zero(&ctx->ref))
1427 ctx = NULL((void *)0);
1428 rcu_read_unlock();
1429 if (!ctx)
1430 return true1;
1431
1432#ifdef __linux__
1433 rcu_read_lock();
1434 task = pid_task(ctx->pid, PIDTYPE_PID);
1435 if (task) {
1436 strcpy(e->comm, task->comm);
1437 e->pid = task->pid;
1438 }
1439 rcu_read_unlock();
1440#endif
1441
1442 e->sched_attr = ctx->sched;
1443 e->guilty = atomic_read(&ctx->guilty_count)({ typeof(*(&ctx->guilty_count)) __tmp = *(volatile typeof
(*(&ctx->guilty_count)) *)&(*(&ctx->guilty_count
)); membar_datadep_consumer(); __tmp; })
;
1444 e->active = atomic_read(&ctx->active_count)({ typeof(*(&ctx->active_count)) __tmp = *(volatile typeof
(*(&ctx->active_count)) *)&(*(&ctx->active_count
)); membar_datadep_consumer(); __tmp; })
;
1445
1446 e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
1447 e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
1448
1449 simulated = i915_gem_context_no_error_capture(ctx);
1450
1451 i915_gem_context_put(ctx);
1452 return simulated;
1453}
1454
1455struct intel_engine_capture_vma {
1456 struct intel_engine_capture_vma *next;
1457 struct i915_vma_resource *vma_res;
1458 char name[16];
1459 bool_Bool lockdep_cookie;
1460};
1461
1462static struct intel_engine_capture_vma *
1463capture_vma_snapshot(struct intel_engine_capture_vma *next,
1464 struct i915_vma_resource *vma_res,
1465 gfp_t gfp, const char *name)
1466{
1467 struct intel_engine_capture_vma *c;
1468
1469 if (!vma_res)
1470 return next;
1471
1472 c = kmalloc(sizeof(*c), gfp);
1473 if (!c)
1474 return next;
1475
1476 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1477 kfree(c);
1478 return next;
1479 }
1480
1481 strlcpy(c->name, name, sizeof(c->name));
1482 c->vma_res = i915_vma_resource_get(vma_res);
1483
1484 c->next = next;
1485 return c;
1486}
1487
1488static struct intel_engine_capture_vma *
1489capture_vma(struct intel_engine_capture_vma *next,
1490 struct i915_vma *vma,
1491 const char *name,
1492 gfp_t gfp)
1493{
1494 if (!vma)
1495 return next;
1496
1497 /*
1498 * If the vma isn't pinned, then the vma should be snapshotted
1499 * to a struct i915_vma_snapshot at command submission time.
1500 * Not here.
1501 */
1502 if (GEM_WARN_ON(!i915_vma_is_pinned(vma))({ __builtin_expect(!!(!!(!i915_vma_is_pinned(vma))), 0); }))
1503 return next;
1504
1505 next = capture_vma_snapshot(next, vma->resource, gfp, name);
1506
1507 return next;
1508}
1509
1510static struct intel_engine_capture_vma *
1511capture_user(struct intel_engine_capture_vma *capture,
1512 const struct i915_request *rq,
1513 gfp_t gfp)
1514{
1515 struct i915_capture_list *c;
1516
1517 for (c = rq->capture_list; c; c = c->next)
1518 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1519 "user");
1520
1521 return capture;
1522}
1523
1524static void add_vma(struct intel_engine_coredump *ee,
1525 struct i915_vma_coredump *vma)
1526{
1527 if (vma) {
1528 vma->next = ee->vma;
1529 ee->vma = vma;
1530 }
1531}
1532
1533static struct i915_vma_coredump *
1534create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1535 const char *name, struct i915_vma_compress *compress)
1536{
1537 struct i915_vma_coredump *ret = NULL((void *)0);
1538 struct i915_vma_resource *vma_res;
1539 bool_Bool lockdep_cookie;
1540
1541 if (!vma)
1542 return NULL((void *)0);
1543
1544 vma_res = vma->resource;
1545
1546 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1547 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1548 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1549 }
1550
1551 return ret;
1552}
1553
1554static void add_vma_coredump(struct intel_engine_coredump *ee,
1555 const struct intel_gt *gt,
1556 struct i915_vma *vma,
1557 const char *name,
1558 struct i915_vma_compress *compress)
1559{
1560 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1561}
1562
1563struct intel_engine_coredump *
1564intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1565{
1566 struct intel_engine_coredump *ee;
1567
1568 ee = kzalloc(sizeof(*ee), gfp);
1569 if (!ee)
1570 return NULL((void *)0);
1571
1572 ee->engine = engine;
1573
1574 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE(1UL << (0)))) {
1575 engine_record_registers(ee);
1576 engine_record_execlists(ee);
1577 }
1578
1579 return ee;
1580}
1581
1582struct intel_engine_capture_vma *
1583intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1584 struct i915_request *rq,
1585 gfp_t gfp)
1586{
1587 struct intel_engine_capture_vma *vma = NULL((void *)0);
1588
1589 ee->simulated |= record_context(&ee->context, rq);
1590 if (ee->simulated)
1591 return NULL((void *)0);
1592
1593 /*
1594 * We need to copy these to an anonymous buffer
1595 * as the simplest method to avoid being overwritten
1596 * by userspace.
1597 */
1598 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1599 vma = capture_user(vma, rq, gfp);
1600 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1601 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1602
1603 ee->rq_head = rq->head;
1604 ee->rq_post = rq->postfix;
1605 ee->rq_tail = rq->tail;
1606
1607 return vma;
1608}
1609
1610void
1611intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1612 struct intel_engine_capture_vma *capture,
1613 struct i915_vma_compress *compress)
1614{
1615 const struct intel_engine_cs *engine = ee->engine;
1616
1617 while (capture) {
1618 struct intel_engine_capture_vma *this = capture;
1619 struct i915_vma_resource *vma_res = this->vma_res;
1620
1621 add_vma(ee,
1622 i915_vma_coredump_create(engine->gt, vma_res,
1623 compress, this->name));
1624
1625 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1626 i915_vma_resource_put(vma_res);
1627
1628 capture = this->next;
1629 kfree(this);
1630 }
1631
1632 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1633 "HW Status", compress);
1634
1635 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1636 "WA context", compress);
1637}
1638
1639static struct intel_engine_coredump *
1640capture_engine(struct intel_engine_cs *engine,
1641 struct i915_vma_compress *compress,
1642 u32 dump_flags)
1643{
1644 struct intel_engine_capture_vma *capture = NULL((void *)0);
1645 struct intel_engine_coredump *ee;
1646 struct intel_context *ce = NULL((void *)0);
1647 struct i915_request *rq = NULL((void *)0);
1648
1649 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL(0x0002 | 0 | 0), dump_flags);
1650 if (!ee)
1651 return NULL((void *)0);
1652
1653 intel_engine_get_hung_entity(engine, &ce, &rq);
1654 if (!rq || !i915_request_started(rq))
1655 goto no_request_capture;
1656
1657 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL(0x0002 | 0));
1658 if (!capture)
1659 goto no_request_capture;
1660 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE(1UL << (0)))
1661 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1662
1663 intel_engine_coredump_add_vma(ee, capture, compress);
1664 i915_request_put(rq);
1665
1666 return ee;
1667
1668no_request_capture:
1669 if (rq)
1670 i915_request_put(rq);
1671 kfree(ee);
1672 return NULL((void *)0);
1673}
1674
1675static void
1676gt_record_engines(struct intel_gt_coredump *gt,
1677 intel_engine_mask_t engine_mask,
1678 struct i915_vma_compress *compress,
1679 u32 dump_flags)
1680{
1681 struct intel_engine_cs *engine;
1682 enum intel_engine_id id;
1683
1684 for_each_engine(engine, gt->_gt, id)for ((id) = 0; (id) < I915_NUM_ENGINES; (id)++) if (!((engine
) = (gt->_gt)->engine[(id)])) {} else
{
1685 struct intel_engine_coredump *ee;
1686
1687 /* Refill our page pool before entering atomic section */
1688 pool_refill(&compress->pool, ALLOW_FAIL(0x0002 | 0 | 0));
1689
1690 ee = capture_engine(engine, compress, dump_flags);
1691 if (!ee)
1692 continue;
1693
1694 ee->hung = engine->mask & engine_mask;
1695
1696 gt->simulated |= ee->simulated;
1697 if (ee->simulated) {
1698 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE(1UL << (0)))
1699 intel_guc_capture_free_node(ee);
1700 kfree(ee);
1701 continue;
1702 }
1703
1704 ee->next = gt->engine;
1705 gt->engine = ee;
1706 }
1707}
1708
1709static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1710 const struct intel_guc_ct_buffer *ctb,
1711 const void *blob_ptr, struct intel_guc *guc)
1712{
1713 if (!ctb || !ctb->desc)
1714 return;
1715
1716 saved->raw_status = ctb->desc->status;
1717 saved->raw_head = ctb->desc->head;
1718 saved->raw_tail = ctb->desc->tail;
1719 saved->head = ctb->head;
1720 saved->tail = ctb->tail;
1721 saved->size = ctb->size;
1722 saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1723 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1724}
1725
1726static struct intel_uc_coredump *
1727gt_record_uc(struct intel_gt_coredump *gt,
1728 struct i915_vma_compress *compress)
1729{
1730 const struct intel_uc *uc = &gt->_gt->uc;
1731 struct intel_uc_coredump *error_uc;
1732
1733 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL(0x0002 | 0 | 0));
1734 if (!error_uc)
1735 return NULL((void *)0);
1736
1737 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw))__builtin_memcpy((&error_uc->guc_fw), (&uc->guc
.fw), (sizeof(uc->guc.fw)))
;
1738 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw))__builtin_memcpy((&error_uc->huc_fw), (&uc->huc
.fw), (sizeof(uc->huc.fw)))
;
1739
1740 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL(0x0002 | 0 | 0));
1741 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL(0x0002 | 0 | 0));
1742 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL(0x0002 | 0 | 0));
1743 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL(0x0002 | 0 | 0));
1744
1745 /*
1746 * Save the GuC log and include a timestamp reference for converting the
1747 * log times to system times (in conjunction with the error->boottime and
1748 * gt->clock_frequency fields saved elsewhere).
1749 */
1750 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP((const i915_reg_t){ .reg = (0xc3e8) }));
1751 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1752 "GuC log buffer", compress);
1753 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1754 "GuC CT buffer", compress);
1755 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1756 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1757 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1758 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1759 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1760
1761 return error_uc;
1762}
1763
1764/* Capture display registers. */
1765static void gt_record_display_regs(struct intel_gt_coredump *gt)
1766{
1767 struct intel_uncore *uncore = gt->_gt->uncore;
1768 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1769
1770 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6)
1771 gt->derrmr = intel_uncore_read(uncore, DERRMR((const i915_reg_t){ .reg = (0x44050) }));
1772
1773 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8)
1774 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER((const i915_reg_t){ .reg = (0x4446c) }));
1775 else if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW))
1776 gt->ier = intel_uncore_read(uncore, VLV_IER((const i915_reg_t){ .reg = (0x180000 + 0x20a0) }));
1777 else if (HAS_PCH_SPLIT(i915)(((i915)->pch_type) != PCH_NONE))
1778 gt->ier = intel_uncore_read(uncore, DEIER((const i915_reg_t){ .reg = (0x4400c) }));
1779 else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 2)
1780 gt->ier = intel_uncore_read16(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }));
1781 else
1782 gt->ier = intel_uncore_read(uncore, GEN2_IER((const i915_reg_t){ .reg = (0x20a0) }));
1783}
1784
1785/* Capture all other registers that GuC doesn't capture. */
1786static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1787{
1788 struct intel_uncore *uncore = gt->_gt->uncore;
1789 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1790 int i;
1791
1792 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW)) {
1793 gt->gtier[0] = intel_uncore_read(uncore, GTIER((const i915_reg_t){ .reg = (0x4401c) }));
1794 gt->ngtier = 1;
1795 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 11) {
1796 gt->gtier[0] =
1797 intel_uncore_read(uncore,
1798 GEN11_RENDER_COPY_INTR_ENABLE((const i915_reg_t){ .reg = (0x190030) }));
1799 gt->gtier[1] =
1800 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE((const i915_reg_t){ .reg = (0x190034) }));
1801 gt->gtier[2] =
1802 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE((const i915_reg_t){ .reg = (0x190038) }));
1803 gt->gtier[3] =
1804 intel_uncore_read(uncore,
1805 GEN11_GPM_WGBOXPERF_INTR_ENABLE((const i915_reg_t){ .reg = (0x19003c) }));
1806 gt->gtier[4] =
1807 intel_uncore_read(uncore,
1808 GEN11_CRYPTO_RSVD_INTR_ENABLE((const i915_reg_t){ .reg = (0x190040) }));
1809 gt->gtier[5] =
1810 intel_uncore_read(uncore,
1811 GEN11_GUNIT_CSME_INTR_ENABLE((const i915_reg_t){ .reg = (0x190044) }));
1812 gt->ngtier = 6;
1813 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
1814 for (i = 0; i < 4; i++)
1815 gt->gtier[i] =
1816 intel_uncore_read(uncore, GEN8_GT_IER(i)((const i915_reg_t){ .reg = (0x4430c + (0x10 * (i))) }));
1817 gt->ngtier = 4;
1818 } else if (HAS_PCH_SPLIT(i915)(((i915)->pch_type) != PCH_NONE)) {
1819 gt->gtier[0] = intel_uncore_read(uncore, GTIER((const i915_reg_t){ .reg = (0x4401c) }));
1820 gt->ngtier = 1;
1821 }
1822
1823 gt->eir = intel_uncore_read(uncore, EIR((const i915_reg_t){ .reg = (0x20b0) }));
1824 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER((const i915_reg_t){ .reg = (0x02024) }));
1825}
1826
1827/*
1828 * Capture all registers that relate to workload submission.
1829 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1830 */
1831static void gt_record_global_regs(struct intel_gt_coredump *gt)
1832{
1833 struct intel_uncore *uncore = gt->_gt->uncore;
1834 struct drm_i915_privateinteldrm_softc *i915 = uncore->i915;
1835 int i;
1836
1837 /*
1838 * General organization
1839 * 1. Registers specific to a single generation
1840 * 2. Registers which belong to multiple generations
1841 * 3. Feature specific registers.
1842 * 4. Everything else
1843 * Please try to follow the order.
1844 */
1845
1846 /* 1: Registers specific to a single generation */
1847 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW))
1848 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0x1300b0
) }))
;
1849
1850 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 7)
1851 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT((const i915_reg_t){ .reg = (0x44040) }));
1852
1853 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) {
1854 gt->fault_data0 = intel_uncore_read(uncore,
1855 GEN12_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0xceb8) }));
1856 gt->fault_data1 = intel_uncore_read(uncore,
1857 GEN12_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0xcebc) }));
1858 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
1859 gt->fault_data0 = intel_uncore_read(uncore,
1860 GEN8_FAULT_TLB_DATA0((const i915_reg_t){ .reg = (0x4b10) }));
1861 gt->fault_data1 = intel_uncore_read(uncore,
1862 GEN8_FAULT_TLB_DATA1((const i915_reg_t){ .reg = (0x4b14) }));
1863 }
1864
1865 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 6) {
1866 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0xa18c
) }))
;
1867 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL((const i915_reg_t){ .reg = (0x24000) }));
1868 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE((const i915_reg_t){ .reg = (0x2520) }));
1869 }
1870
1871 /* 2: Registers which belong to multiple generations */
1872 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 7)
1873 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT)__raw_uncore_read32(uncore, ((const i915_reg_t){ .reg = (0xa188
) }))
;
1874
1875 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
1876 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 12) {
1877 gt->error = intel_uncore_read(uncore, ERROR_GEN6((const i915_reg_t){ .reg = (0x40a0) }));
1878 gt->done_reg = intel_uncore_read(uncore, DONE_REG((const i915_reg_t){ .reg = (0x40b0) }));
1879 }
1880 }
1881
1882 /* 3: Feature specific registers */
1883 if (IS_GRAPHICS_VER(i915, 6, 7)(((&(i915)->__runtime)->graphics.ip.ver) >= (6) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (7))
) {
1884 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK((const i915_reg_t){ .reg = (0x4090) }));
1885 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS((const i915_reg_t){ .reg = (0x14090) }));
1886 }
1887
1888 if (IS_GRAPHICS_VER(i915, 8, 11)(((&(i915)->__runtime)->graphics.ip.ver) >= (8) &&
((&(i915)->__runtime)->graphics.ip.ver) <= (11)
)
)
1889 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN((const i915_reg_t){ .reg = (0x4024) }));
1890
1891 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 12)
1892 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG((const i915_reg_t){ .reg = (0x43f4) }));
1893
1894 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12) {
1895 for (i = 0; i < I915_MAX_SFC(8 / 2); i++) {
1896 /*
1897 * SFC_DONE resides in the VD forcewake domain, so it
1898 * only exists if the corresponding VCS engine is
1899 * present.
1900 */
1901 if ((gt->_gt->info.sfc_mask & BIT(i)(1UL << (i))) == 0 ||
1902 !HAS_ENGINE(gt->_gt, _VCS(i * 2))(((gt->_gt)->info.engine_mask) & (1UL << ((VCS0
+ (i * 2)))))
)
1903 continue;
1904
1905 gt->sfc_done[i] =
1906 intel_uncore_read(uncore, GEN12_SFC_DONE(i)((const i915_reg_t){ .reg = (0x1cc000 + (i) * 0x1000) }));
1907 }
1908
1909 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE((const i915_reg_t){ .reg = (0xcf68) }));
1910 }
1911}
1912
1913static void gt_record_info(struct intel_gt_coredump *gt)
1914{
1915 memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info))__builtin_memcpy((&gt->info), (&gt->_gt->info
), (sizeof(struct intel_gt_info)))
;
1916 gt->clock_frequency = gt->_gt->clock_frequency;
1917 gt->clock_period_ns = gt->_gt->clock_period_ns;
1918}
1919
1920/*
1921 * Generate a semi-unique error code. The code is not meant to have meaning, The
1922 * code's only purpose is to try to prevent false duplicated bug reports by
1923 * grossly estimating a GPU error state.
1924 *
1925 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1926 * the hang if we could strip the GTT offset information from it.
1927 *
1928 * It's only a small step better than a random number in its current form.
1929 */
1930static u32 generate_ecode(const struct intel_engine_coredump *ee)
1931{
1932 /*
1933 * IPEHR would be an ideal way to detect errors, as it's the gross
1934 * measure of "the command that hung." However, has some very common
1935 * synchronization commands which almost always appear in the case
1936 * strictly a client bug. Use instdone to differentiate those some.
1937 */
1938 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1939}
1940
1941static const char *error_msg(struct i915_gpu_coredump *error)
1942{
1943 struct intel_engine_coredump *first = NULL((void *)0);
1944 unsigned int hung_classes = 0;
1945 struct intel_gt_coredump *gt;
1946 int len;
1947
1948 for (gt = error->gt; gt; gt = gt->next) {
1949 struct intel_engine_coredump *cs;
1950
1951 for (cs = gt->engine; cs; cs = cs->next) {
1952 if (cs->hung) {
1953 hung_classes |= BIT(cs->engine->uabi_class)(1UL << (cs->engine->uabi_class));
1954 if (!first)
1955 first = cs;
1956 }
1957 }
1958 }
1959
1960 len = scnprintf(error->error_msg, sizeof(error->error_msg),snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__runtime)->graphics.ip.ver)
, hung_classes, generate_ecode(first))
1961 "GPU HANG: ecode %d:%x:%08x",snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__runtime)->graphics.ip.ver)
, hung_classes, generate_ecode(first))
1962 GRAPHICS_VER(error->i915), hung_classes,snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__runtime)->graphics.ip.ver)
, hung_classes, generate_ecode(first))
1963 generate_ecode(first))snprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x"
, ((&(error->i915)->__runtime)->graphics.ip.ver)
, hung_classes, generate_ecode(first))
;
1964 if (first && first->context.pid) {
1965 /* Just show the first executing process, more is confusing */
1966 len += scnprintf(error->error_msg + len,snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
Value stored to 'len' is never read
1967 sizeof(error->error_msg) - len,snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
1968 ", in %s [%d]",snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
1969 first->context.comm, first->context.pid)snprintf(error->error_msg + len, sizeof(error->error_msg
) - len, ", in %s [%d]", first->context.comm, first->context
.pid)
;
1970 }
1971
1972 return error->error_msg;
1973}
1974
1975static void capture_gen(struct i915_gpu_coredump *error)
1976{
1977 struct drm_i915_privateinteldrm_softc *i915 = error->i915;
1978
1979 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count)({ typeof(*(&i915->runtime_pm.wakeref_count)) __tmp = *
(volatile typeof(*(&i915->runtime_pm.wakeref_count)) *
)&(*(&i915->runtime_pm.wakeref_count)); membar_datadep_consumer
(); __tmp; })
;
1980 error->suspended = i915->runtime_pm.suspended;
1981
1982 error->iommu = i915_vtd_active(i915);
1983 error->reset_count = i915_reset_count(&i915->gpu_error);
1984 error->suspend_count = i915->suspend_count;
1985
1986 i915_params_copy(&error->params, &i915->params);
1987 memcpy(&error->device_info,__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
1988 INTEL_INFO(i915),__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
1989 sizeof(error->device_info))__builtin_memcpy((&error->device_info), ((&(i915)->
__info)), (sizeof(error->device_info)))
;
1990 memcpy(&error->runtime_info,__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
1991 RUNTIME_INFO(i915),__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
1992 sizeof(error->runtime_info))__builtin_memcpy((&error->runtime_info), ((&(i915)
->__runtime)), (sizeof(error->runtime_info)))
;
1993 error->driver_caps = i915->caps;
1994}
1995
1996struct i915_gpu_coredump *
1997i915_gpu_coredump_alloc(struct drm_i915_privateinteldrm_softc *i915, gfp_t gfp)
1998{
1999 struct i915_gpu_coredump *error;
2000
2001 if (!i915->params.error_capture)
2002 return NULL((void *)0);
2003
2004 error = kzalloc(sizeof(*error), gfp);
2005 if (!error)
2006 return NULL((void *)0);
2007
2008 kref_init(&error->ref);
2009 error->i915 = i915;
2010
2011 error->time = ktime_get_real();
2012 error->boottime = ktime_get_boottime();
2013 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
2014 error->capture = jiffies;
2015
2016 capture_gen(error);
2017
2018 return error;
2019}
2020
2021#define DAY_AS_SECONDS(x)(24 * 60 * 60 * (x)) (24 * 60 * 60 * (x))
2022
2023struct intel_gt_coredump *
2024intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2025{
2026 struct intel_gt_coredump *gc;
2027
2028 gc = kzalloc(sizeof(*gc), gfp);
2029 if (!gc)
2030 return NULL((void *)0);
2031
2032 gc->_gt = gt;
2033 gc->awake = intel_gt_pm_is_awake(gt);
2034
2035 gt_record_display_regs(gc);
2036 gt_record_global_nonguc_regs(gc);
2037
2038 /*
2039 * GuC dumps global, eng-class and eng-instance registers
2040 * (that can change as part of engine state during execution)
2041 * before an engine is reset due to a hung context.
2042 * GuC captures and reports all three groups of registers
2043 * together as a single set before the engine is reset.
2044 * Thus, if GuC triggered the context reset we retrieve
2045 * the register values as part of gt_record_engines.
2046 */
2047 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE(1UL << (0))))
2048 gt_record_global_regs(gc);
2049
2050 gt_record_fences(gc);
2051
2052 return gc;
2053}
2054
2055struct i915_vma_compress *
2056i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2057{
2058 struct i915_vma_compress *compress;
2059
2060 compress = kmalloc(sizeof(*compress), ALLOW_FAIL(0x0002 | 0 | 0));
2061 if (!compress)
2062 return NULL((void *)0);
2063
2064 if (!compress_init(compress)) {
2065 kfree(compress);
2066 return NULL((void *)0);
2067 }
2068
2069 return compress;
2070}
2071
2072void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2073 struct i915_vma_compress *compress)
2074{
2075 if (!compress)
2076 return;
2077
2078 compress_fini(compress);
2079 kfree(compress);
2080}
2081
2082static struct i915_gpu_coredump *
2083__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2084{
2085 struct drm_i915_privateinteldrm_softc *i915 = gt->i915;
2086 struct i915_gpu_coredump *error;
2087
2088 /* Check if GPU capture has been disabled */
2089 error = READ_ONCE(i915->gpu_error.first_error)({ typeof(i915->gpu_error.first_error) __tmp = *(volatile typeof
(i915->gpu_error.first_error) *)&(i915->gpu_error.first_error
); membar_datadep_consumer(); __tmp; })
;
2090 if (IS_ERR(error))
2091 return error;
2092
2093 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL(0x0002 | 0 | 0));
2094 if (!error)
2095 return ERR_PTR(-ENOMEM12);
2096
2097 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL(0x0002 | 0 | 0), dump_flags);
2098 if (error->gt) {
2099 struct i915_vma_compress *compress;
2100
2101 compress = i915_vma_capture_prepare(error->gt);
2102 if (!compress) {
2103 kfree(error->gt);
2104 kfree(error);
2105 return ERR_PTR(-ENOMEM12);
2106 }
2107
2108 if (INTEL_INFO(i915)(&(i915)->__info)->has_gt_uc) {
2109 error->gt->uc = gt_record_uc(error->gt, compress);
2110 if (error->gt->uc) {
2111 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE(1UL << (0)))
2112 error->gt->uc->guc.is_guc_capture = true1;
2113 else
2114 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture)((void)0);
2115 }
2116 }
2117
2118 gt_record_info(error->gt);
2119 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2120
2121
2122 i915_vma_capture_finish(error->gt, compress);
2123
2124 error->simulated |= error->gt->simulated;
2125 }
2126
2127 error->overlay = intel_overlay_capture_error_state(i915);
2128
2129 return error;
2130}
2131
2132static DEFINE_MUTEX(capture_mutex)struct rwlock capture_mutex = { 0, "capture_mutex" };
2133
2134struct i915_gpu_coredump *
2135i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2136{
2137 int ret = mutex_lock_interruptible(&capture_mutex);
2138 struct i915_gpu_coredump *dump;
2139
2140 if (ret)
2141 return ERR_PTR(ret);
2142
2143 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2144 mutex_unlock(&capture_mutex)rw_exit_write(&capture_mutex);
2145
2146 return dump;
2147}
2148
2149void i915_error_state_store(struct i915_gpu_coredump *error)
2150{
2151 struct drm_i915_privateinteldrm_softc *i915;
2152 static bool_Bool warned;
2153
2154 if (IS_ERR_OR_NULL(error))
2155 return;
2156
2157 i915 = error->i915;
2158 drm_info(&i915->drm, "%s\n", error_msg(error))do { } while(0);
2159
2160 if (error->simulated ||
2161 cmpxchg(&i915->gpu_error.first_error, NULL, error)__sync_val_compare_and_swap(&i915->gpu_error.first_error
, ((void *)0), error)
)
2162 return;
2163
2164 i915_gpu_coredump_get(error);
2165
2166 if (!xchg(&warned, true)__sync_lock_test_and_set(&warned, 1) &&
2167 ktime_get_real_seconds() - DRIVER_TIMESTAMP1604406085 < DAY_AS_SECONDS(180)(24 * 60 * 60 * (180))) {
2168 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n")do { } while(0);
2169 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n")do { } while(0);
2170 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n")do { } while(0);
2171 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n")do { } while(0);
2172 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n")do { } while(0);
2173 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",do { } while(0)
2174 i915->drm.primary->index)do { } while(0);
2175 }
2176}
2177
2178/**
2179 * i915_capture_error_state - capture an error record for later analysis
2180 * @gt: intel_gt which originated the hang
2181 * @engine_mask: hung engines
2182 *
2183 *
2184 * Should be called when an error is detected (either a hang or an error
2185 * interrupt) to capture error state from the time of the error. Fills
2186 * out a structure which becomes available in debugfs for user level tools
2187 * to pick up.
2188 */
2189void i915_capture_error_state(struct intel_gt *gt,
2190 intel_engine_mask_t engine_mask, u32 dump_flags)
2191{
2192 struct i915_gpu_coredump *error;
2193
2194 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2195 if (IS_ERR(error)) {
2196 cmpxchg(&gt->i915->gpu_error.first_error, NULL, error)__sync_val_compare_and_swap(&gt->i915->gpu_error.first_error
, ((void *)0), error)
;
2197 return;
2198 }
2199
2200 i915_error_state_store(error);
2201 i915_gpu_coredump_put(error);
2202}
2203
2204struct i915_gpu_coredump *
2205i915_first_error_state(struct drm_i915_privateinteldrm_softc *i915)
2206{
2207 struct i915_gpu_coredump *error;
2208
2209 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
2210 error = i915->gpu_error.first_error;
2211 if (!IS_ERR_OR_NULL(error))
2212 i915_gpu_coredump_get(error);
2213 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
2214
2215 return error;
2216}
2217
2218void i915_reset_error_state(struct drm_i915_privateinteldrm_softc *i915)
2219{
2220 struct i915_gpu_coredump *error;
2221
2222 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
2223 error = i915->gpu_error.first_error;
2224 if (error != ERR_PTR(-ENODEV19)) /* if disabled, always disabled */
2225 i915->gpu_error.first_error = NULL((void *)0);
2226 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
2227
2228 if (!IS_ERR_OR_NULL(error))
2229 i915_gpu_coredump_put(error);
2230}
2231
2232void i915_disable_error_state(struct drm_i915_privateinteldrm_softc *i915, int err)
2233{
2234 spin_lock_irq(&i915->gpu_error.lock)mtx_enter(&i915->gpu_error.lock);
2235 if (!i915->gpu_error.first_error)
2236 i915->gpu_error.first_error = ERR_PTR(err);
2237 spin_unlock_irq(&i915->gpu_error.lock)mtx_leave(&i915->gpu_error.lock);
2238}