Due to the tracking provided by the scheduler we know exactly which
submit is failing. Only dump this single submit and the required
auxiliary information. This cuts down the size of the devcoredumps
by only including relevant information.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
etnaviv_core_dump_header(iter, type, iter->data + size);
}
etnaviv_core_dump_header(iter, type, iter->data + size);
}
-void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
+ struct etnaviv_gpu *gpu = submit->gpu;
struct core_dump_iterator iter;
struct core_dump_iterator iter;
- struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
struct etnaviv_gem_object *obj;
- struct etnaviv_gem_submit *submit;
- struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
- /* We always dump registers, mmu, ring and end marker */
- n_obj = 4;
+ /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ n_obj = 5;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer.size;
-
- /* Add in the active command buffers */
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- file_size += submit->cmdbuf.size;
- n_obj++;
- }
+ mmu_size + gpu->buffer.size + submit->cmdbuf.size;
/* Add in the active buffer objects */
/* Add in the active buffer objects */
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
- if (!vram->use)
- continue;
-
- obj = vram->object;
+ for (i = 0; i < submit->nr_bos; i++) {
+ obj = submit->bos[i].obj;
file_size += obj->base.size;
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
n_obj++;
file_size += obj->base.size;
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
n_obj++;
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
- submit->cmdbuf.vaddr, submit->cmdbuf.size,
- etnaviv_cmdbuf_get_va(&submit->cmdbuf));
- }
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf));
/* Reserve space for the bomap */
if (n_bomap_pages) {
/* Reserve space for the bomap */
if (n_bomap_pages) {
bomap_start = bomap = NULL;
}
bomap_start = bomap = NULL;
}
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_vram_mapping *vram;
struct page **pages;
void *vaddr;
struct page **pages;
void *vaddr;
- if (vram->use == 0)
- continue;
-
- obj = vram->object;
+ obj = submit->bos[i].obj;
+ vram = submit->bos[i].mapping;
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
- etnaviv_core_dump(gpu);
+ etnaviv_core_dump(submit);
etnaviv_gpu_recover_hang(gpu);
drm_sched_resubmit_jobs(&gpu->sched);
etnaviv_gpu_recover_hang(gpu);
drm_sched_resubmit_jobs(&gpu->sched);