2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <linux/ascii85.h>
31 #include <linux/nmi.h>
32 #include <linux/pagevec.h>
33 #include <linux/scatterlist.h>
34 #include <linux/utsname.h>
35 #include <linux/zlib.h>
37 #include <drm/drm_print.h>
39 #include "display/intel_dmc.h"
40 #include "display/intel_overlay.h"
42 #include "gem/i915_gem_context.h"
43 #include "gem/i915_gem_lmem.h"
44 #include "gt/intel_gt.h"
45 #include "gt/intel_gt_pm.h"
48 #include "i915_gpu_error.h"
49 #include "i915_memcpy.h"
50 #include "i915_scatterlist.h"
51 #include "i915_vma_snapshot.h"
53 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
54 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
56 static void __sg_set_buf(struct scatterlist *sg,
57 void *addr, unsigned int len, loff_t it)
59 sg->page_link = (unsigned long)virt_to_page(addr);
60 sg->offset = offset_in_page(addr);
65 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
70 if (e->bytes + len + 1 <= e->size)
74 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
80 if (e->cur == e->end) {
81 struct scatterlist *sgl;
83 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
93 (unsigned long)sgl | SG_CHAIN;
99 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
102 e->size = ALIGN(len + 1, SZ_64K);
103 e->buf = kmalloc(e->size, ALLOW_FAIL);
105 e->size = PAGE_ALIGN(len + 1);
106 e->buf = kmalloc(e->size, GFP_KERNEL);
117 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
118 const char *fmt, va_list args)
127 len = vsnprintf(NULL, 0, fmt, ap);
134 if (!__i915_error_grow(e, len))
137 GEM_BUG_ON(e->bytes >= e->size);
138 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
146 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
154 if (!__i915_error_grow(e, len))
157 GEM_BUG_ON(e->bytes + len > e->size);
158 memcpy(e->buf + e->bytes, str, len);
162 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
163 #define err_puts(e, s) i915_error_puts(e, s)
165 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
167 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
170 static inline struct drm_printer
171 i915_error_printer(struct drm_i915_error_state_buf *e)
173 struct drm_printer p = {
174 .printfn = __i915_printfn_error,
180 /* single threaded page allocator with a reserved stash for emergencies */
181 static void pool_fini(struct pagevec *pv)
186 static int pool_refill(struct pagevec *pv, gfp_t gfp)
188 while (pagevec_space(pv)) {
201 static int pool_init(struct pagevec *pv, gfp_t gfp)
207 err = pool_refill(pv, gfp);
214 static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
219 if (!p && pagevec_count(pv))
220 p = pv->pages[--pv->nr];
222 return p ? page_address(p) : NULL;
225 static void pool_free(struct pagevec *pv, void *addr)
227 struct page *p = virt_to_page(addr);
229 if (pagevec_space(pv))
235 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
237 struct i915_vma_compress {
239 struct z_stream_s zstream;
243 static bool compress_init(struct i915_vma_compress *c)
245 struct z_stream_s *zstream = &c->zstream;
247 if (pool_init(&c->pool, ALLOW_FAIL))
251 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
253 if (!zstream->workspace) {
259 if (i915_has_memcpy_from_wc())
260 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
265 static bool compress_start(struct i915_vma_compress *c)
267 struct z_stream_s *zstream = &c->zstream;
268 void *workspace = zstream->workspace;
270 memset(zstream, 0, sizeof(*zstream));
271 zstream->workspace = workspace;
273 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
276 static void *compress_next_page(struct i915_vma_compress *c,
277 struct i915_vma_coredump *dst)
282 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
284 return ERR_PTR(-ENOMEM);
286 page = virt_to_page(page_addr);
287 list_add_tail(&page->lru, &dst->page_list);
291 static int compress_page(struct i915_vma_compress *c,
293 struct i915_vma_coredump *dst,
296 struct z_stream_s *zstream = &c->zstream;
298 zstream->next_in = src;
299 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
300 zstream->next_in = c->tmp;
301 zstream->avail_in = PAGE_SIZE;
304 if (zstream->avail_out == 0) {
305 zstream->next_out = compress_next_page(c, dst);
306 if (IS_ERR(zstream->next_out))
307 return PTR_ERR(zstream->next_out);
309 zstream->avail_out = PAGE_SIZE;
312 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
316 } while (zstream->avail_in);
318 /* Fallback to uncompressed if we increase size? */
319 if (0 && zstream->total_out > zstream->total_in)
325 static int compress_flush(struct i915_vma_compress *c,
326 struct i915_vma_coredump *dst)
328 struct z_stream_s *zstream = &c->zstream;
331 switch (zlib_deflate(zstream, Z_FINISH)) {
332 case Z_OK: /* more space requested */
333 zstream->next_out = compress_next_page(c, dst);
334 if (IS_ERR(zstream->next_out))
335 return PTR_ERR(zstream->next_out);
337 zstream->avail_out = PAGE_SIZE;
343 default: /* any error */
349 memset(zstream->next_out, 0, zstream->avail_out);
350 dst->unused = zstream->avail_out;
354 static void compress_finish(struct i915_vma_compress *c)
356 zlib_deflateEnd(&c->zstream);
359 static void compress_fini(struct i915_vma_compress *c)
361 kfree(c->zstream.workspace);
363 pool_free(&c->pool, c->tmp);
367 static void err_compression_marker(struct drm_i915_error_state_buf *m)
374 struct i915_vma_compress {
378 static bool compress_init(struct i915_vma_compress *c)
380 return pool_init(&c->pool, ALLOW_FAIL) == 0;
383 static bool compress_start(struct i915_vma_compress *c)
388 static int compress_page(struct i915_vma_compress *c,
390 struct i915_vma_coredump *dst,
395 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
399 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
400 memcpy(ptr, src, PAGE_SIZE);
401 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
407 static int compress_flush(struct i915_vma_compress *c,
408 struct i915_vma_coredump *dst)
413 static void compress_finish(struct i915_vma_compress *c)
417 static void compress_fini(struct i915_vma_compress *c)
422 static void err_compression_marker(struct drm_i915_error_state_buf *m)
429 static void error_print_instdone(struct drm_i915_error_state_buf *m,
430 const struct intel_engine_coredump *ee)
432 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
437 err_printf(m, " INSTDONE: 0x%08x\n",
438 ee->instdone.instdone);
440 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
443 err_printf(m, " SC_INSTDONE: 0x%08x\n",
444 ee->instdone.slice_common);
446 if (GRAPHICS_VER(m->i915) <= 6)
449 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) {
450 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
451 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
453 ee->instdone.sampler[slice][subslice]);
455 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
456 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
458 ee->instdone.row[slice][subslice]);
460 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
461 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
463 ee->instdone.sampler[slice][subslice]);
465 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
466 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
468 ee->instdone.row[slice][subslice]);
471 if (GRAPHICS_VER(m->i915) < 12)
474 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
475 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
476 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
478 ee->instdone.geom_svg[slice][subslice]);
481 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
482 ee->instdone.slice_common_extra[0]);
483 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
484 ee->instdone.slice_common_extra[1]);
487 static void error_print_request(struct drm_i915_error_state_buf *m,
489 const struct i915_request_coredump *erq)
494 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
495 prefix, erq->pid, erq->context, erq->seqno,
496 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
497 &erq->flags) ? "!" : "",
498 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
499 &erq->flags) ? "+" : "",
500 erq->sched_attr.priority,
501 erq->head, erq->tail);
504 static void error_print_context(struct drm_i915_error_state_buf *m,
506 const struct i915_gem_context_coredump *ctx)
508 const u32 period = to_gt(m->i915)->clock_period_ns;
510 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
511 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
512 ctx->guilty, ctx->active,
513 ctx->total_runtime * period,
514 mul_u32_u32(ctx->avg_runtime, period));
517 static struct i915_vma_coredump *
518 __find_vma(struct i915_vma_coredump *vma, const char *name)
521 if (strcmp(vma->name, name) == 0)
529 static struct i915_vma_coredump *
530 find_batch(const struct intel_engine_coredump *ee)
532 return __find_vma(ee->vma, "batch");
535 static void error_print_engine(struct drm_i915_error_state_buf *m,
536 const struct intel_engine_coredump *ee)
538 struct i915_vma_coredump *batch;
541 err_printf(m, "%s command stream:\n", ee->engine->name);
542 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
543 err_printf(m, " START: 0x%08x\n", ee->start);
544 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
545 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
546 ee->tail, ee->rq_post, ee->rq_tail);
547 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
548 err_printf(m, " MODE: 0x%08x\n", ee->mode);
549 err_printf(m, " HWS: 0x%08x\n", ee->hws);
550 err_printf(m, " ACTHD: 0x%08x %08x\n",
551 (u32)(ee->acthd>>32), (u32)ee->acthd);
552 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
553 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
554 err_printf(m, " ESR: 0x%08x\n", ee->esr);
556 error_print_instdone(m, ee);
558 batch = find_batch(ee);
560 u64 start = batch->gtt_offset;
561 u64 end = start + batch->gtt_size;
563 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
564 upper_32_bits(start), lower_32_bits(start),
565 upper_32_bits(end), lower_32_bits(end));
567 if (GRAPHICS_VER(m->i915) >= 4) {
568 err_printf(m, " BBADDR: 0x%08x_%08x\n",
569 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
570 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
571 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
573 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
574 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
575 lower_32_bits(ee->faddr));
576 if (GRAPHICS_VER(m->i915) >= 6) {
577 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
578 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
580 if (HAS_PPGTT(m->i915)) {
581 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
583 if (GRAPHICS_VER(m->i915) >= 8) {
585 for (i = 0; i < 4; i++)
586 err_printf(m, " PDP%d: 0x%016llx\n",
587 i, ee->vm_info.pdp[i]);
589 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
590 ee->vm_info.pp_dir_base);
593 err_printf(m, " hung: %u\n", ee->hung);
594 err_printf(m, " engine reset count: %u\n", ee->reset_count);
596 for (n = 0; n < ee->num_ports; n++) {
597 err_printf(m, " ELSP[%d]:", n);
598 error_print_request(m, " ", &ee->execlist[n]);
601 error_print_context(m, " Active context: ", &ee->context);
604 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
609 i915_error_vprintf(e, f, args);
613 static void print_error_vma(struct drm_i915_error_state_buf *m,
614 const struct intel_engine_cs *engine,
615 const struct i915_vma_coredump *vma)
617 char out[ASCII85_BUFSZ];
623 err_printf(m, "%s --- %s = 0x%08x %08x\n",
624 engine ? engine->name : "global", vma->name,
625 upper_32_bits(vma->gtt_offset),
626 lower_32_bits(vma->gtt_offset));
628 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
629 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
631 err_compression_marker(m);
632 list_for_each_entry(page, &vma->page_list, lru) {
634 const u32 *addr = page_address(page);
637 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
639 len = ascii85_encode_len(len);
641 for (i = 0; i < len; i++)
642 err_puts(m, ascii85_encode(addr[i], out));
647 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
648 struct i915_gpu_coredump *error)
650 struct drm_printer p = i915_error_printer(m);
652 intel_device_info_print_static(&error->device_info, &p);
653 intel_device_info_print_runtime(&error->runtime_info, &p);
654 intel_driver_caps_print(&error->driver_caps, &p);
657 static void err_print_params(struct drm_i915_error_state_buf *m,
658 const struct i915_params *params)
660 struct drm_printer p = i915_error_printer(m);
662 i915_params_dump(params, &p);
665 static void err_print_pciid(struct drm_i915_error_state_buf *m,
666 struct drm_i915_private *i915)
668 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
670 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
671 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
672 err_printf(m, "PCI Subsystem: %04x:%04x\n",
673 pdev->subsystem_vendor,
674 pdev->subsystem_device);
677 static void err_print_uc(struct drm_i915_error_state_buf *m,
678 const struct intel_uc_coredump *error_uc)
680 struct drm_printer p = i915_error_printer(m);
682 intel_uc_fw_dump(&error_uc->guc_fw, &p);
683 intel_uc_fw_dump(&error_uc->huc_fw, &p);
684 print_error_vma(m, NULL, error_uc->guc_log);
687 static void err_free_sgl(struct scatterlist *sgl)
690 struct scatterlist *sg;
692 for (sg = sgl; !sg_is_chain(sg); sg++) {
698 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
699 free_page((unsigned long)sgl);
704 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
705 struct intel_gt_coredump *gt)
707 struct drm_printer p = i915_error_printer(m);
709 intel_gt_info_print(>->info, &p);
710 intel_sseu_print_topology(>->info.sseu, &p);
713 static void err_print_gt(struct drm_i915_error_state_buf *m,
714 struct intel_gt_coredump *gt)
716 const struct intel_engine_coredump *ee;
719 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
720 err_printf(m, "EIR: 0x%08x\n", gt->eir);
721 err_printf(m, "IER: 0x%08x\n", gt->ier);
722 for (i = 0; i < gt->ngtier; i++)
723 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
724 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
725 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
726 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
728 for (i = 0; i < gt->nfence; i++)
729 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
731 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
732 err_printf(m, "ERROR: 0x%08x\n", gt->error);
733 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
736 if (GRAPHICS_VER(m->i915) >= 8)
737 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
738 gt->fault_data1, gt->fault_data0);
740 if (GRAPHICS_VER(m->i915) == 7)
741 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
743 if (IS_GRAPHICS_VER(m->i915, 8, 11))
744 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
746 if (GRAPHICS_VER(m->i915) == 12)
747 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
749 if (GRAPHICS_VER(m->i915) >= 12) {
752 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
754 * SFC_DONE resides in the VD forcewake domain, so it
755 * only exists if the corresponding VCS engine is
758 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
759 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
762 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
766 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
769 for (ee = gt->engine; ee; ee = ee->next) {
770 const struct i915_vma_coredump *vma;
772 error_print_engine(m, ee);
773 for (vma = ee->vma; vma; vma = vma->next)
774 print_error_vma(m, ee->engine, vma);
778 err_print_uc(m, gt->uc);
780 err_print_gt_info(m, gt);
783 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
784 struct i915_gpu_coredump *error)
786 const struct intel_engine_coredump *ee;
787 struct timespec64 ts;
789 if (*error->error_msg)
790 err_printf(m, "%s\n", error->error_msg);
791 err_printf(m, "Kernel: %s %s\n",
792 init_utsname()->release,
793 init_utsname()->machine);
794 err_printf(m, "Driver: %s\n", DRIVER_DATE);
795 ts = ktime_to_timespec64(error->time);
796 err_printf(m, "Time: %lld s %ld us\n",
797 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
798 ts = ktime_to_timespec64(error->boottime);
799 err_printf(m, "Boottime: %lld s %ld us\n",
800 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
801 ts = ktime_to_timespec64(error->uptime);
802 err_printf(m, "Uptime: %lld s %ld us\n",
803 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
804 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
805 error->capture, jiffies_to_msecs(jiffies - error->capture));
807 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
808 err_printf(m, "Active process (on ring %s): %s [%d]\n",
813 err_printf(m, "Reset count: %u\n", error->reset_count);
814 err_printf(m, "Suspend count: %u\n", error->suspend_count);
815 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
816 err_printf(m, "Subplatform: 0x%x\n",
817 intel_subplatform(&error->runtime_info,
818 error->device_info.platform));
819 err_print_pciid(m, m->i915);
821 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
823 if (HAS_DMC(m->i915)) {
824 struct intel_dmc *dmc = &m->i915->dmc;
826 err_printf(m, "DMC loaded: %s\n",
827 yesno(intel_dmc_has_payload(m->i915) != 0));
828 err_printf(m, "DMC fw version: %d.%d\n",
829 DMC_VERSION_MAJOR(dmc->version),
830 DMC_VERSION_MINOR(dmc->version));
833 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
834 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
837 err_print_gt(m, error->gt);
840 intel_overlay_print_error_state(m, error->overlay);
842 err_print_capabilities(m, error);
843 err_print_params(m, &error->params);
846 static int err_print_to_sgl(struct i915_gpu_coredump *error)
848 struct drm_i915_error_state_buf m;
851 return PTR_ERR(error);
853 if (READ_ONCE(error->sgl))
856 memset(&m, 0, sizeof(m));
857 m.i915 = error->i915;
859 __err_print_to_sgl(&m, error);
862 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
867 GEM_BUG_ON(m.end < m.cur);
868 sg_mark_end(m.cur - 1);
870 GEM_BUG_ON(m.sgl && !m.cur);
877 if (cmpxchg(&error->sgl, NULL, m.sgl))
883 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
884 char *buf, loff_t off, size_t rem)
886 struct scatterlist *sg;
894 err = err_print_to_sgl(error);
898 sg = READ_ONCE(error->fit);
899 if (!sg || off < sg->dma_address)
904 pos = sg->dma_address;
909 if (sg_is_chain(sg)) {
910 sg = sg_chain_ptr(sg);
911 GEM_BUG_ON(sg_is_chain(sg));
915 if (pos + len <= off) {
922 GEM_BUG_ON(off - pos > len);
929 GEM_BUG_ON(!len || len > sg->length);
931 memcpy(buf, page_address(sg_page(sg)) + start, len);
939 WRITE_ONCE(error->fit, sg);
942 } while (!sg_is_last(sg++));
947 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
950 struct i915_vma_coredump *next = vma->next;
951 struct page *page, *n;
953 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
954 list_del_init(&page->lru);
963 static void cleanup_params(struct i915_gpu_coredump *error)
965 i915_params_free(&error->params);
968 static void cleanup_uc(struct intel_uc_coredump *uc)
970 kfree(uc->guc_fw.path);
971 kfree(uc->huc_fw.path);
972 i915_vma_coredump_free(uc->guc_log);
977 static void cleanup_gt(struct intel_gt_coredump *gt)
980 struct intel_engine_coredump *ee = gt->engine;
982 gt->engine = ee->next;
984 i915_vma_coredump_free(ee->vma);
994 void __i915_gpu_coredump_free(struct kref *error_ref)
996 struct i915_gpu_coredump *error =
997 container_of(error_ref, typeof(*error), ref);
1000 struct intel_gt_coredump *gt = error->gt;
1002 error->gt = gt->next;
1006 kfree(error->overlay);
1008 cleanup_params(error);
1010 err_free_sgl(error->sgl);
1014 static struct i915_vma_coredump *
1015 i915_vma_coredump_create(const struct intel_gt *gt,
1016 const struct i915_vma_snapshot *vsnap,
1017 struct i915_vma_compress *compress)
1019 struct i915_ggtt *ggtt = gt->ggtt;
1020 const u64 slot = ggtt->error_capture.start;
1021 struct i915_vma_coredump *dst;
1022 struct sgt_iter iter;
1027 if (!vsnap || !vsnap->pages || !compress)
1030 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1034 if (!compress_start(compress)) {
1039 INIT_LIST_HEAD(&dst->page_list);
1040 strcpy(dst->name, vsnap->name);
1043 dst->gtt_offset = vsnap->gtt_offset;
1044 dst->gtt_size = vsnap->gtt_size;
1045 dst->gtt_page_sizes = vsnap->page_sizes;
1049 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1053 for_each_sgt_daddr(dma, iter, vsnap->pages) {
1054 mutex_lock(&ggtt->error_mutex);
1055 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1056 I915_CACHE_NONE, 0);
1059 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1060 ret = compress_page(compress,
1061 (void __force *)s, dst,
1063 io_mapping_unmap(s);
1066 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1067 mutex_unlock(&ggtt->error_mutex);
1071 } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) {
1072 struct intel_memory_region *mem = vsnap->mr;
1075 for_each_sgt_daddr(dma, iter, vsnap->pages) {
1078 s = io_mapping_map_wc(&mem->iomap,
1079 dma - mem->region.start,
1081 ret = compress_page(compress,
1082 (void __force *)s, dst,
1084 io_mapping_unmap(s);
1091 for_each_sgt_page(page, iter, vsnap->pages) {
1094 drm_clflush_pages(&page, 1);
1097 ret = compress_page(compress, s, dst, false);
1100 drm_clflush_pages(&page, 1);
1107 if (ret || compress_flush(compress, dst)) {
1108 struct page *page, *n;
1110 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1111 list_del_init(&page->lru);
1112 pool_free(&compress->pool, page_address(page));
1118 compress_finish(compress);
1123 static void gt_record_fences(struct intel_gt_coredump *gt)
1125 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1126 struct intel_uncore *uncore = gt->_gt->uncore;
1129 if (GRAPHICS_VER(uncore->i915) >= 6) {
1130 for (i = 0; i < ggtt->num_fences; i++)
1132 intel_uncore_read64(uncore,
1133 FENCE_REG_GEN6_LO(i));
1134 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1135 for (i = 0; i < ggtt->num_fences; i++)
1137 intel_uncore_read64(uncore,
1138 FENCE_REG_965_LO(i));
1140 for (i = 0; i < ggtt->num_fences; i++)
1142 intel_uncore_read(uncore, FENCE_REG(i));
1147 static void engine_record_registers(struct intel_engine_coredump *ee)
1149 const struct intel_engine_cs *engine = ee->engine;
1150 struct drm_i915_private *i915 = engine->i915;
1152 if (GRAPHICS_VER(i915) >= 6) {
1153 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1155 if (GRAPHICS_VER(i915) >= 12)
1156 ee->fault_reg = intel_uncore_read(engine->uncore,
1157 GEN12_RING_FAULT_REG);
1158 else if (GRAPHICS_VER(i915) >= 8)
1159 ee->fault_reg = intel_uncore_read(engine->uncore,
1160 GEN8_RING_FAULT_REG);
1162 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1165 if (GRAPHICS_VER(i915) >= 4) {
1166 ee->esr = ENGINE_READ(engine, RING_ESR);
1167 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1168 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1169 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1170 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1171 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1172 ee->ccid = ENGINE_READ(engine, CCID);
1173 if (GRAPHICS_VER(i915) >= 8) {
1174 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1175 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1177 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1179 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1180 ee->ipeir = ENGINE_READ(engine, IPEIR);
1181 ee->ipehr = ENGINE_READ(engine, IPEHR);
1184 intel_engine_get_instdone(engine, &ee->instdone);
1186 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1187 ee->acthd = intel_engine_get_active_head(engine);
1188 ee->start = ENGINE_READ(engine, RING_START);
1189 ee->head = ENGINE_READ(engine, RING_HEAD);
1190 ee->tail = ENGINE_READ(engine, RING_TAIL);
1191 ee->ctl = ENGINE_READ(engine, RING_CTL);
1192 if (GRAPHICS_VER(i915) > 2)
1193 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1195 if (!HWS_NEEDS_PHYSICAL(i915)) {
1198 if (GRAPHICS_VER(i915) == 7) {
1199 switch (engine->id) {
1201 MISSING_CASE(engine->id);
1204 mmio = RENDER_HWS_PGA_GEN7;
1207 mmio = BLT_HWS_PGA_GEN7;
1210 mmio = BSD_HWS_PGA_GEN7;
1213 mmio = VEBOX_HWS_PGA_GEN7;
1216 } else if (GRAPHICS_VER(engine->i915) == 6) {
1217 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1219 /* XXX: gen8 returns to sanity */
1220 mmio = RING_HWS_PGA(engine->mmio_base);
1223 ee->hws = intel_uncore_read(engine->uncore, mmio);
1226 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1228 if (HAS_PPGTT(i915)) {
1231 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1233 if (GRAPHICS_VER(i915) == 6) {
1234 ee->vm_info.pp_dir_base =
1235 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1236 } else if (GRAPHICS_VER(i915) == 7) {
1237 ee->vm_info.pp_dir_base =
1238 ENGINE_READ(engine, RING_PP_DIR_BASE);
1239 } else if (GRAPHICS_VER(i915) >= 8) {
1240 u32 base = engine->mmio_base;
1242 for (i = 0; i < 4; i++) {
1243 ee->vm_info.pdp[i] =
1244 intel_uncore_read(engine->uncore,
1245 GEN8_RING_PDP_UDW(base, i));
1246 ee->vm_info.pdp[i] <<= 32;
1247 ee->vm_info.pdp[i] |=
1248 intel_uncore_read(engine->uncore,
1249 GEN8_RING_PDP_LDW(base, i));
1255 static void record_request(const struct i915_request *request,
1256 struct i915_request_coredump *erq)
1258 erq->flags = request->fence.flags;
1259 erq->context = request->fence.context;
1260 erq->seqno = request->fence.seqno;
1261 erq->sched_attr = request->sched.attr;
1262 erq->head = request->head;
1263 erq->tail = request->tail;
1267 if (!intel_context_is_closed(request->context)) {
1268 const struct i915_gem_context *ctx;
1270 ctx = rcu_dereference(request->context->gem_context);
1272 erq->pid = pid_nr(ctx->pid);
1277 static void engine_record_execlists(struct intel_engine_coredump *ee)
1279 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1280 struct i915_request * const *port = el->active;
1284 record_request(*port++, &ee->execlist[n++]);
1289 static bool record_context(struct i915_gem_context_coredump *e,
1290 const struct i915_request *rq)
1292 struct i915_gem_context *ctx;
1293 struct task_struct *task;
1297 ctx = rcu_dereference(rq->context->gem_context);
1298 if (ctx && !kref_get_unless_zero(&ctx->ref))
1305 task = pid_task(ctx->pid, PIDTYPE_PID);
1307 strcpy(e->comm, task->comm);
1312 e->sched_attr = ctx->sched;
1313 e->guilty = atomic_read(&ctx->guilty_count);
1314 e->active = atomic_read(&ctx->active_count);
1316 e->total_runtime = rq->context->runtime.total;
1317 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1319 simulated = i915_gem_context_no_error_capture(ctx);
1321 i915_gem_context_put(ctx);
1325 struct intel_engine_capture_vma {
1326 struct intel_engine_capture_vma *next;
1327 struct i915_vma_snapshot *vsnap;
1329 bool lockdep_cookie;
1332 static struct intel_engine_capture_vma *
1333 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1334 struct i915_vma_snapshot *vsnap,
1337 struct intel_engine_capture_vma *c;
1339 if (!i915_vma_snapshot_present(vsnap))
1342 c = kmalloc(sizeof(*c), gfp);
1346 if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) {
1351 strcpy(c->name, vsnap->name);
1353 i915_vma_snapshot_get(vsnap);
1359 static struct intel_engine_capture_vma *
1360 capture_vma(struct intel_engine_capture_vma *next,
1361 struct i915_vma *vma,
1365 struct i915_vma_snapshot *vsnap;
1371 * If the vma isn't pinned, then the vma should be snapshotted
1372 * to a struct i915_vma_snapshot at command submission time.
1375 GEM_WARN_ON(!i915_vma_is_pinned(vma));
1376 if (!i915_vma_is_pinned(vma))
1379 vsnap = i915_vma_snapshot_alloc(gfp);
1383 i915_vma_snapshot_init(vsnap, vma, name);
1384 next = capture_vma_snapshot(next, vsnap, gfp);
1386 /* FIXME: Replace on async unbind. */
1387 i915_vma_snapshot_put(vsnap);
1392 static struct intel_engine_capture_vma *
1393 capture_user(struct intel_engine_capture_vma *capture,
1394 const struct i915_request *rq,
1397 struct i915_capture_list *c;
1399 for (c = rq->capture_list; c; c = c->next)
1400 capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp);
1405 static void add_vma(struct intel_engine_coredump *ee,
1406 struct i915_vma_coredump *vma)
1409 vma->next = ee->vma;
1414 static struct i915_vma_coredump *
1415 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1416 const char *name, struct i915_vma_compress *compress)
1418 struct i915_vma_coredump *ret;
1419 struct i915_vma_snapshot tmp;
1424 GEM_WARN_ON(!i915_vma_is_pinned(vma));
1425 i915_vma_snapshot_init_onstack(&tmp, vma, name);
1426 ret = i915_vma_coredump_create(gt, &tmp, compress);
1427 i915_vma_snapshot_put_onstack(&tmp);
1432 static void add_vma_coredump(struct intel_engine_coredump *ee,
1433 const struct intel_gt *gt,
1434 struct i915_vma *vma,
1436 struct i915_vma_compress *compress)
1438 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1441 struct intel_engine_coredump *
1442 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1444 struct intel_engine_coredump *ee;
1446 ee = kzalloc(sizeof(*ee), gfp);
1450 ee->engine = engine;
1452 engine_record_registers(ee);
1453 engine_record_execlists(ee);
1458 struct intel_engine_capture_vma *
1459 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1460 struct i915_request *rq,
1463 struct intel_engine_capture_vma *vma = NULL;
1465 ee->simulated |= record_context(&ee->context, rq);
1470 * We need to copy these to an anonymous buffer
1471 * as the simplest method to avoid being overwritten
1474 vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp);
1475 vma = capture_user(vma, rq, gfp);
1476 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1477 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
1479 ee->rq_head = rq->head;
1480 ee->rq_post = rq->postfix;
1481 ee->rq_tail = rq->tail;
1487 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1488 struct intel_engine_capture_vma *capture,
1489 struct i915_vma_compress *compress)
1491 const struct intel_engine_cs *engine = ee->engine;
1494 struct intel_engine_capture_vma *this = capture;
1495 struct i915_vma_snapshot *vsnap = this->vsnap;
1498 i915_vma_coredump_create(engine->gt,
1501 i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie);
1502 i915_vma_snapshot_put(vsnap);
1504 capture = this->next;
1508 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1509 "HW Status", compress);
1511 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1512 "WA context", compress);
1515 static struct intel_engine_coredump *
1516 capture_engine(struct intel_engine_cs *engine,
1517 struct i915_vma_compress *compress)
1519 struct intel_engine_capture_vma *capture = NULL;
1520 struct intel_engine_coredump *ee;
1521 struct intel_context *ce;
1522 struct i915_request *rq = NULL;
1523 unsigned long flags;
1525 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1529 ce = intel_engine_get_hung_context(engine);
1531 intel_engine_clear_hung_context(engine);
1532 rq = intel_context_find_active_request(ce);
1533 if (!rq || !i915_request_started(rq))
1534 goto no_request_capture;
1537 * Getting here with GuC enabled means it is a forced error capture
1538 * with no actual hang. So, no need to attempt the execlist search.
1540 if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
1541 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1542 rq = intel_engine_execlist_find_hung_request(engine);
1543 spin_unlock_irqrestore(&engine->sched_engine->lock,
1548 rq = i915_request_get_rcu(rq);
1551 goto no_request_capture;
1553 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1555 i915_request_put(rq);
1556 goto no_request_capture;
1559 intel_engine_coredump_add_vma(ee, capture, compress);
1560 i915_request_put(rq);
1570 gt_record_engines(struct intel_gt_coredump *gt,
1571 intel_engine_mask_t engine_mask,
1572 struct i915_vma_compress *compress)
1574 struct intel_engine_cs *engine;
1575 enum intel_engine_id id;
1577 for_each_engine(engine, gt->_gt, id) {
1578 struct intel_engine_coredump *ee;
1580 /* Refill our page pool before entering atomic section */
1581 pool_refill(&compress->pool, ALLOW_FAIL);
1583 ee = capture_engine(engine, compress);
1587 ee->hung = engine->mask & engine_mask;
1589 gt->simulated |= ee->simulated;
1590 if (ee->simulated) {
1595 ee->next = gt->engine;
1600 static struct intel_uc_coredump *
1601 gt_record_uc(struct intel_gt_coredump *gt,
1602 struct i915_vma_compress *compress)
1604 const struct intel_uc *uc = >->_gt->uc;
1605 struct intel_uc_coredump *error_uc;
1607 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1611 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1612 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1614 /* Non-default firmware paths will be specified by the modparam.
1615 * As modparams are generally accesible from the userspace make
1616 * explicit copies of the firmware paths.
1618 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1619 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
1620 error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1621 "GuC log buffer", compress);
1626 /* Capture all registers which don't fit into another category. */
1627 static void gt_record_regs(struct intel_gt_coredump *gt)
1629 struct intel_uncore *uncore = gt->_gt->uncore;
1630 struct drm_i915_private *i915 = uncore->i915;
1634 * General organization
1635 * 1. Registers specific to a single generation
1636 * 2. Registers which belong to multiple generations
1637 * 3. Feature specific registers.
1638 * 4. Everything else
1639 * Please try to follow the order.
1642 /* 1: Registers specific to a single generation */
1643 if (IS_VALLEYVIEW(i915)) {
1644 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1645 gt->ier = intel_uncore_read(uncore, VLV_IER);
1646 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1649 if (GRAPHICS_VER(i915) == 7)
1650 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1652 if (GRAPHICS_VER(i915) >= 12) {
1653 gt->fault_data0 = intel_uncore_read(uncore,
1654 GEN12_FAULT_TLB_DATA0);
1655 gt->fault_data1 = intel_uncore_read(uncore,
1656 GEN12_FAULT_TLB_DATA1);
1657 } else if (GRAPHICS_VER(i915) >= 8) {
1658 gt->fault_data0 = intel_uncore_read(uncore,
1659 GEN8_FAULT_TLB_DATA0);
1660 gt->fault_data1 = intel_uncore_read(uncore,
1661 GEN8_FAULT_TLB_DATA1);
1664 if (GRAPHICS_VER(i915) == 6) {
1665 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1666 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1667 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1670 /* 2: Registers which belong to multiple generations */
1671 if (GRAPHICS_VER(i915) >= 7)
1672 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1674 if (GRAPHICS_VER(i915) >= 6) {
1675 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1676 if (GRAPHICS_VER(i915) < 12) {
1677 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1678 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1682 /* 3: Feature specific registers */
1683 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1684 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1685 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1688 if (IS_GRAPHICS_VER(i915, 8, 11))
1689 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1691 if (GRAPHICS_VER(i915) == 12)
1692 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1694 if (GRAPHICS_VER(i915) >= 12) {
1695 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
1697 * SFC_DONE resides in the VD forcewake domain, so it
1698 * only exists if the corresponding VCS engine is
1701 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1702 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1706 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1709 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1712 /* 4: Everything else */
1713 if (GRAPHICS_VER(i915) >= 11) {
1714 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1716 intel_uncore_read(uncore,
1717 GEN11_RENDER_COPY_INTR_ENABLE);
1719 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1721 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1723 intel_uncore_read(uncore,
1724 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1726 intel_uncore_read(uncore,
1727 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1729 intel_uncore_read(uncore,
1730 GEN11_GUNIT_CSME_INTR_ENABLE);
1732 } else if (GRAPHICS_VER(i915) >= 8) {
1733 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1734 for (i = 0; i < 4; i++)
1736 intel_uncore_read(uncore, GEN8_GT_IER(i));
1738 } else if (HAS_PCH_SPLIT(i915)) {
1739 gt->ier = intel_uncore_read(uncore, DEIER);
1740 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1742 } else if (GRAPHICS_VER(i915) == 2) {
1743 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1744 } else if (!IS_VALLEYVIEW(i915)) {
1745 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1747 gt->eir = intel_uncore_read(uncore, EIR);
1748 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1751 static void gt_record_info(struct intel_gt_coredump *gt)
1753 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1757 * Generate a semi-unique error code. The code is not meant to have meaning, The
1758 * code's only purpose is to try to prevent false duplicated bug reports by
1759 * grossly estimating a GPU error state.
1761 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1762 * the hang if we could strip the GTT offset information from it.
1764 * It's only a small step better than a random number in its current form.
1766 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1769 * IPEHR would be an ideal way to detect errors, as it's the gross
1770 * measure of "the command that hung." However, has some very common
1771 * synchronization commands which almost always appear in the case
1772 * strictly a client bug. Use instdone to differentiate those some.
1774 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1777 static const char *error_msg(struct i915_gpu_coredump *error)
1779 struct intel_engine_coredump *first = NULL;
1780 unsigned int hung_classes = 0;
1781 struct intel_gt_coredump *gt;
1784 for (gt = error->gt; gt; gt = gt->next) {
1785 struct intel_engine_coredump *cs;
1787 for (cs = gt->engine; cs; cs = cs->next) {
1789 hung_classes |= BIT(cs->engine->uabi_class);
1796 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1797 "GPU HANG: ecode %d:%x:%08x",
1798 GRAPHICS_VER(error->i915), hung_classes,
1799 generate_ecode(first));
1800 if (first && first->context.pid) {
1801 /* Just show the first executing process, more is confusing */
1802 len += scnprintf(error->error_msg + len,
1803 sizeof(error->error_msg) - len,
1805 first->context.comm, first->context.pid);
1808 return error->error_msg;
1811 static void capture_gen(struct i915_gpu_coredump *error)
1813 struct drm_i915_private *i915 = error->i915;
1815 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1816 error->suspended = i915->runtime_pm.suspended;
1818 error->iommu = intel_vtd_active(i915);
1819 error->reset_count = i915_reset_count(&i915->gpu_error);
1820 error->suspend_count = i915->suspend_count;
1822 i915_params_copy(&error->params, &i915->params);
1823 memcpy(&error->device_info,
1825 sizeof(error->device_info));
1826 memcpy(&error->runtime_info,
1828 sizeof(error->runtime_info));
1829 error->driver_caps = i915->caps;
1832 struct i915_gpu_coredump *
1833 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1835 struct i915_gpu_coredump *error;
1837 if (!i915->params.error_capture)
1840 error = kzalloc(sizeof(*error), gfp);
1844 kref_init(&error->ref);
1847 error->time = ktime_get_real();
1848 error->boottime = ktime_get_boottime();
1849 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
1850 error->capture = jiffies;
1857 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1859 struct intel_gt_coredump *
1860 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
1862 struct intel_gt_coredump *gc;
1864 gc = kzalloc(sizeof(*gc), gfp);
1869 gc->awake = intel_gt_pm_is_awake(gt);
1872 gt_record_fences(gc);
1877 struct i915_vma_compress *
1878 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1880 struct i915_vma_compress *compress;
1882 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1886 if (!compress_init(compress)) {
1894 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1895 struct i915_vma_compress *compress)
1900 compress_fini(compress);
1904 static struct i915_gpu_coredump *
1905 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1907 struct drm_i915_private *i915 = gt->i915;
1908 struct i915_gpu_coredump *error;
1910 /* Check if GPU capture has been disabled */
1911 error = READ_ONCE(i915->gpu_error.first_error);
1915 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1917 return ERR_PTR(-ENOMEM);
1919 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
1921 struct i915_vma_compress *compress;
1923 compress = i915_vma_capture_prepare(error->gt);
1927 return ERR_PTR(-ENOMEM);
1930 gt_record_info(error->gt);
1931 gt_record_engines(error->gt, engine_mask, compress);
1933 if (INTEL_INFO(i915)->has_gt_uc)
1934 error->gt->uc = gt_record_uc(error->gt, compress);
1936 i915_vma_capture_finish(error->gt, compress);
1938 error->simulated |= error->gt->simulated;
1941 error->overlay = intel_overlay_capture_error_state(i915);
1946 struct i915_gpu_coredump *
1947 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
1949 static DEFINE_MUTEX(capture_mutex);
1950 int ret = mutex_lock_interruptible(&capture_mutex);
1951 struct i915_gpu_coredump *dump;
1954 return ERR_PTR(ret);
1956 dump = __i915_gpu_coredump(gt, engine_mask);
1957 mutex_unlock(&capture_mutex);
1962 void i915_error_state_store(struct i915_gpu_coredump *error)
1964 struct drm_i915_private *i915;
1967 if (IS_ERR_OR_NULL(error))
1971 drm_info(&i915->drm, "%s\n", error_msg(error));
1973 if (error->simulated ||
1974 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1977 i915_gpu_coredump_get(error);
1979 if (!xchg(&warned, true) &&
1980 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1981 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1982 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1983 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
1984 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1985 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1986 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1987 i915->drm.primary->index);
1992 * i915_capture_error_state - capture an error record for later analysis
1993 * @gt: intel_gt which originated the hang
1994 * @engine_mask: hung engines
1997 * Should be called when an error is detected (either a hang or an error
1998 * interrupt) to capture error state from the time of the error. Fills
1999 * out a structure which becomes available in debugfs for user level tools
2002 void i915_capture_error_state(struct intel_gt *gt,
2003 intel_engine_mask_t engine_mask)
2005 struct i915_gpu_coredump *error;
2007 error = i915_gpu_coredump(gt, engine_mask);
2008 if (IS_ERR(error)) {
2009 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
2013 i915_error_state_store(error);
2014 i915_gpu_coredump_put(error);
2017 struct i915_gpu_coredump *
2018 i915_first_error_state(struct drm_i915_private *i915)
2020 struct i915_gpu_coredump *error;
2022 spin_lock_irq(&i915->gpu_error.lock);
2023 error = i915->gpu_error.first_error;
2024 if (!IS_ERR_OR_NULL(error))
2025 i915_gpu_coredump_get(error);
2026 spin_unlock_irq(&i915->gpu_error.lock);
2031 void i915_reset_error_state(struct drm_i915_private *i915)
2033 struct i915_gpu_coredump *error;
2035 spin_lock_irq(&i915->gpu_error.lock);
2036 error = i915->gpu_error.first_error;
2037 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2038 i915->gpu_error.first_error = NULL;
2039 spin_unlock_irq(&i915->gpu_error.lock);
2041 if (!IS_ERR_OR_NULL(error))
2042 i915_gpu_coredump_put(error);
2045 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2047 spin_lock_irq(&i915->gpu_error.lock);
2048 if (!i915->gpu_error.first_error)
2049 i915->gpu_error.first_error = ERR_PTR(err);
2050 spin_unlock_irq(&i915->gpu_error.lock);