2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_pm.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_reset.h"
38 #include "gt/intel_rc6.h"
39 #include "gt/intel_rps.h"
40 #include "gt/uc/intel_guc_submission.h"
42 #include "i915_debugfs.h"
43 #include "i915_debugfs_params.h"
45 #include "i915_trace.h"
47 #include "intel_sideband.h"
49 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
51 return to_i915(node->minor->dev);
54 static int i915_capabilities(struct seq_file *m, void *data)
56 struct drm_i915_private *i915 = node_to_i915(m->private);
57 struct drm_printer p = drm_seq_file_printer(m);
59 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
61 intel_device_info_print_static(INTEL_INFO(i915), &p);
62 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
63 intel_driver_caps_print(&i915->caps, &p);
65 kernel_param_lock(THIS_MODULE);
66 i915_params_dump(&i915_modparams, &p);
67 kernel_param_unlock(THIS_MODULE);
72 static char get_tiling_flag(struct drm_i915_gem_object *obj)
74 switch (i915_gem_object_get_tiling(obj)) {
76 case I915_TILING_NONE: return ' ';
77 case I915_TILING_X: return 'X';
78 case I915_TILING_Y: return 'Y';
82 static char get_global_flag(struct drm_i915_gem_object *obj)
84 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
87 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
89 return obj->mm.mapping ? 'M' : ' ';
93 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
100 case I915_GTT_PAGE_SIZE_4K:
102 case I915_GTT_PAGE_SIZE_64K:
104 case I915_GTT_PAGE_SIZE_2M:
110 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
111 x += snprintf(buf + x, len - x, "2M, ");
112 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
113 x += snprintf(buf + x, len - x, "64K, ");
114 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
115 x += snprintf(buf + x, len - x, "4K, ");
123 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
126 struct intel_engine_cs *engine;
127 struct i915_vma *vma;
130 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
132 get_tiling_flag(obj),
133 get_global_flag(obj),
134 get_pin_mapped_flag(obj),
135 obj->base.size / 1024,
138 i915_cache_level_str(dev_priv, obj->cache_level),
139 obj->mm.dirty ? " dirty" : "",
140 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 seq_printf(m, " (name: %d)", obj->base.name);
144 spin_lock(&obj->vma.lock);
145 list_for_each_entry(vma, &obj->vma.list, obj_link) {
146 if (!drm_mm_node_allocated(&vma->node))
149 spin_unlock(&obj->vma.lock);
151 if (i915_vma_is_pinned(vma))
154 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
155 i915_vma_is_ggtt(vma) ? "g" : "pp",
156 vma->node.start, vma->node.size,
157 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
158 if (i915_vma_is_ggtt(vma)) {
159 switch (vma->ggtt_view.type) {
160 case I915_GGTT_VIEW_NORMAL:
161 seq_puts(m, ", normal");
164 case I915_GGTT_VIEW_PARTIAL:
165 seq_printf(m, ", partial [%08llx+%x]",
166 vma->ggtt_view.partial.offset << PAGE_SHIFT,
167 vma->ggtt_view.partial.size << PAGE_SHIFT);
170 case I915_GGTT_VIEW_ROTATED:
171 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
172 vma->ggtt_view.rotated.plane[0].width,
173 vma->ggtt_view.rotated.plane[0].height,
174 vma->ggtt_view.rotated.plane[0].stride,
175 vma->ggtt_view.rotated.plane[0].offset,
176 vma->ggtt_view.rotated.plane[1].width,
177 vma->ggtt_view.rotated.plane[1].height,
178 vma->ggtt_view.rotated.plane[1].stride,
179 vma->ggtt_view.rotated.plane[1].offset);
182 case I915_GGTT_VIEW_REMAPPED:
183 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
184 vma->ggtt_view.remapped.plane[0].width,
185 vma->ggtt_view.remapped.plane[0].height,
186 vma->ggtt_view.remapped.plane[0].stride,
187 vma->ggtt_view.remapped.plane[0].offset,
188 vma->ggtt_view.remapped.plane[1].width,
189 vma->ggtt_view.remapped.plane[1].height,
190 vma->ggtt_view.remapped.plane[1].stride,
191 vma->ggtt_view.remapped.plane[1].offset);
195 MISSING_CASE(vma->ggtt_view.type);
200 seq_printf(m, " , fence: %d", vma->fence->id);
203 spin_lock(&obj->vma.lock);
205 spin_unlock(&obj->vma.lock);
207 seq_printf(m, " (pinned x %d)", pin_count);
209 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
210 if (i915_gem_object_is_framebuffer(obj))
211 seq_printf(m, " (fb)");
213 engine = i915_gem_object_last_write_engine(obj);
215 seq_printf(m, " (%s)", engine->name);
219 struct i915_address_space *vm;
222 u64 active, inactive;
226 static int per_file_stats(int id, void *ptr, void *data)
228 struct drm_i915_gem_object *obj = ptr;
229 struct file_stats *stats = data;
230 struct i915_vma *vma;
232 if (!kref_get_unless_zero(&obj->base.refcount))
236 stats->total += obj->base.size;
237 if (!atomic_read(&obj->bind_count))
238 stats->unbound += obj->base.size;
240 spin_lock(&obj->vma.lock);
242 for_each_ggtt_vma(vma, obj) {
243 if (!drm_mm_node_allocated(&vma->node))
246 if (i915_vma_is_active(vma))
247 stats->active += vma->node.size;
249 stats->inactive += vma->node.size;
251 if (i915_vma_is_closed(vma))
252 stats->closed += vma->node.size;
255 struct rb_node *p = obj->vma.tree.rb_node;
260 vma = rb_entry(p, typeof(*vma), obj_node);
261 cmp = i915_vma_compare(vma, stats->vm, NULL);
263 if (drm_mm_node_allocated(&vma->node)) {
264 if (i915_vma_is_active(vma))
265 stats->active += vma->node.size;
267 stats->inactive += vma->node.size;
269 if (i915_vma_is_closed(vma))
270 stats->closed += vma->node.size;
280 spin_unlock(&obj->vma.lock);
282 i915_gem_object_put(obj);
286 #define print_file_stats(m, name, stats) do { \
288 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
298 static void print_context_stats(struct seq_file *m,
299 struct drm_i915_private *i915)
301 struct file_stats kstats = {};
302 struct i915_gem_context *ctx, *cn;
304 spin_lock(&i915->gem.contexts.lock);
305 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
306 struct i915_gem_engines_iter it;
307 struct intel_context *ce;
309 if (!kref_get_unless_zero(&ctx->ref))
312 spin_unlock(&i915->gem.contexts.lock);
314 for_each_gem_engine(ce,
315 i915_gem_context_lock_engines(ctx), it) {
316 if (intel_context_pin_if_active(ce)) {
320 ce->state->obj, &kstats);
321 per_file_stats(0, ce->ring->vma->obj, &kstats);
323 intel_context_unpin(ce);
326 i915_gem_context_unlock_engines(ctx);
328 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
329 struct file_stats stats = {
330 .vm = rcu_access_pointer(ctx->vm),
332 struct drm_file *file = ctx->file_priv->file;
333 struct task_struct *task;
337 idr_for_each(&file->object_idr, per_file_stats, &stats);
341 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
342 snprintf(name, sizeof(name), "%s",
343 task ? task->comm : "<unknown>");
346 print_file_stats(m, name, stats);
349 spin_lock(&i915->gem.contexts.lock);
350 list_safe_reset_next(ctx, cn, link);
351 i915_gem_context_put(ctx);
353 spin_unlock(&i915->gem.contexts.lock);
355 print_file_stats(m, "[k]contexts", kstats);
358 static int i915_gem_object_info(struct seq_file *m, void *data)
360 struct drm_i915_private *i915 = node_to_i915(m->private);
361 struct intel_memory_region *mr;
362 enum intel_region_id id;
364 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
365 i915->mm.shrink_count,
366 atomic_read(&i915->mm.free_count),
367 i915->mm.shrink_memory);
368 for_each_memory_region(mr, i915, id)
369 seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
370 mr->name, &mr->total, &mr->avail);
373 print_context_stats(m, i915);
378 static void gen8_display_interrupt_info(struct seq_file *m)
380 struct drm_i915_private *dev_priv = node_to_i915(m->private);
383 for_each_pipe(dev_priv, pipe) {
384 enum intel_display_power_domain power_domain;
385 intel_wakeref_t wakeref;
387 power_domain = POWER_DOMAIN_PIPE(pipe);
388 wakeref = intel_display_power_get_if_enabled(dev_priv,
391 seq_printf(m, "Pipe %c power disabled\n",
395 seq_printf(m, "Pipe %c IMR:\t%08x\n",
397 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
398 seq_printf(m, "Pipe %c IIR:\t%08x\n",
400 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
401 seq_printf(m, "Pipe %c IER:\t%08x\n",
403 I915_READ(GEN8_DE_PIPE_IER(pipe)));
405 intel_display_power_put(dev_priv, power_domain, wakeref);
408 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
409 I915_READ(GEN8_DE_PORT_IMR));
410 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
411 I915_READ(GEN8_DE_PORT_IIR));
412 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
413 I915_READ(GEN8_DE_PORT_IER));
415 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
416 I915_READ(GEN8_DE_MISC_IMR));
417 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
418 I915_READ(GEN8_DE_MISC_IIR));
419 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
420 I915_READ(GEN8_DE_MISC_IER));
422 seq_printf(m, "PCU interrupt mask:\t%08x\n",
423 I915_READ(GEN8_PCU_IMR));
424 seq_printf(m, "PCU interrupt identity:\t%08x\n",
425 I915_READ(GEN8_PCU_IIR));
426 seq_printf(m, "PCU interrupt enable:\t%08x\n",
427 I915_READ(GEN8_PCU_IER));
430 static int i915_interrupt_info(struct seq_file *m, void *data)
432 struct drm_i915_private *dev_priv = node_to_i915(m->private);
433 struct intel_engine_cs *engine;
434 intel_wakeref_t wakeref;
437 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
439 if (IS_CHERRYVIEW(dev_priv)) {
440 intel_wakeref_t pref;
442 seq_printf(m, "Master Interrupt Control:\t%08x\n",
443 I915_READ(GEN8_MASTER_IRQ));
445 seq_printf(m, "Display IER:\t%08x\n",
447 seq_printf(m, "Display IIR:\t%08x\n",
449 seq_printf(m, "Display IIR_RW:\t%08x\n",
450 I915_READ(VLV_IIR_RW));
451 seq_printf(m, "Display IMR:\t%08x\n",
453 for_each_pipe(dev_priv, pipe) {
454 enum intel_display_power_domain power_domain;
456 power_domain = POWER_DOMAIN_PIPE(pipe);
457 pref = intel_display_power_get_if_enabled(dev_priv,
460 seq_printf(m, "Pipe %c power disabled\n",
465 seq_printf(m, "Pipe %c stat:\t%08x\n",
467 I915_READ(PIPESTAT(pipe)));
469 intel_display_power_put(dev_priv, power_domain, pref);
472 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
473 seq_printf(m, "Port hotplug:\t%08x\n",
474 I915_READ(PORT_HOTPLUG_EN));
475 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
476 I915_READ(VLV_DPFLIPSTAT));
477 seq_printf(m, "DPINVGTT:\t%08x\n",
478 I915_READ(DPINVGTT));
479 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
481 for (i = 0; i < 4; i++) {
482 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
483 i, I915_READ(GEN8_GT_IMR(i)));
484 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
485 i, I915_READ(GEN8_GT_IIR(i)));
486 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
487 i, I915_READ(GEN8_GT_IER(i)));
490 seq_printf(m, "PCU interrupt mask:\t%08x\n",
491 I915_READ(GEN8_PCU_IMR));
492 seq_printf(m, "PCU interrupt identity:\t%08x\n",
493 I915_READ(GEN8_PCU_IIR));
494 seq_printf(m, "PCU interrupt enable:\t%08x\n",
495 I915_READ(GEN8_PCU_IER));
496 } else if (INTEL_GEN(dev_priv) >= 11) {
497 seq_printf(m, "Master Interrupt Control: %08x\n",
498 I915_READ(GEN11_GFX_MSTR_IRQ));
500 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
501 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
502 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
503 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
504 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
505 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
506 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
507 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
508 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
509 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
510 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
511 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
513 seq_printf(m, "Display Interrupt Control:\t%08x\n",
514 I915_READ(GEN11_DISPLAY_INT_CTL));
516 gen8_display_interrupt_info(m);
517 } else if (INTEL_GEN(dev_priv) >= 8) {
518 seq_printf(m, "Master Interrupt Control:\t%08x\n",
519 I915_READ(GEN8_MASTER_IRQ));
521 for (i = 0; i < 4; i++) {
522 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
523 i, I915_READ(GEN8_GT_IMR(i)));
524 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
525 i, I915_READ(GEN8_GT_IIR(i)));
526 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
527 i, I915_READ(GEN8_GT_IER(i)));
530 gen8_display_interrupt_info(m);
531 } else if (IS_VALLEYVIEW(dev_priv)) {
532 intel_wakeref_t pref;
534 seq_printf(m, "Display IER:\t%08x\n",
536 seq_printf(m, "Display IIR:\t%08x\n",
538 seq_printf(m, "Display IIR_RW:\t%08x\n",
539 I915_READ(VLV_IIR_RW));
540 seq_printf(m, "Display IMR:\t%08x\n",
542 for_each_pipe(dev_priv, pipe) {
543 enum intel_display_power_domain power_domain;
545 power_domain = POWER_DOMAIN_PIPE(pipe);
546 pref = intel_display_power_get_if_enabled(dev_priv,
549 seq_printf(m, "Pipe %c power disabled\n",
554 seq_printf(m, "Pipe %c stat:\t%08x\n",
556 I915_READ(PIPESTAT(pipe)));
557 intel_display_power_put(dev_priv, power_domain, pref);
560 seq_printf(m, "Master IER:\t%08x\n",
561 I915_READ(VLV_MASTER_IER));
563 seq_printf(m, "Render IER:\t%08x\n",
565 seq_printf(m, "Render IIR:\t%08x\n",
567 seq_printf(m, "Render IMR:\t%08x\n",
570 seq_printf(m, "PM IER:\t\t%08x\n",
571 I915_READ(GEN6_PMIER));
572 seq_printf(m, "PM IIR:\t\t%08x\n",
573 I915_READ(GEN6_PMIIR));
574 seq_printf(m, "PM IMR:\t\t%08x\n",
575 I915_READ(GEN6_PMIMR));
577 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
578 seq_printf(m, "Port hotplug:\t%08x\n",
579 I915_READ(PORT_HOTPLUG_EN));
580 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
581 I915_READ(VLV_DPFLIPSTAT));
582 seq_printf(m, "DPINVGTT:\t%08x\n",
583 I915_READ(DPINVGTT));
584 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
586 } else if (!HAS_PCH_SPLIT(dev_priv)) {
587 seq_printf(m, "Interrupt enable: %08x\n",
588 I915_READ(GEN2_IER));
589 seq_printf(m, "Interrupt identity: %08x\n",
590 I915_READ(GEN2_IIR));
591 seq_printf(m, "Interrupt mask: %08x\n",
592 I915_READ(GEN2_IMR));
593 for_each_pipe(dev_priv, pipe)
594 seq_printf(m, "Pipe %c stat: %08x\n",
596 I915_READ(PIPESTAT(pipe)));
598 seq_printf(m, "North Display Interrupt enable: %08x\n",
600 seq_printf(m, "North Display Interrupt identity: %08x\n",
602 seq_printf(m, "North Display Interrupt mask: %08x\n",
604 seq_printf(m, "South Display Interrupt enable: %08x\n",
606 seq_printf(m, "South Display Interrupt identity: %08x\n",
608 seq_printf(m, "South Display Interrupt mask: %08x\n",
610 seq_printf(m, "Graphics Interrupt enable: %08x\n",
612 seq_printf(m, "Graphics Interrupt identity: %08x\n",
614 seq_printf(m, "Graphics Interrupt mask: %08x\n",
618 if (INTEL_GEN(dev_priv) >= 11) {
619 seq_printf(m, "RCS Intr Mask:\t %08x\n",
620 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
621 seq_printf(m, "BCS Intr Mask:\t %08x\n",
622 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
623 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
624 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
625 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
626 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
627 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
628 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
629 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
630 I915_READ(GEN11_GUC_SG_INTR_MASK));
631 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
632 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
633 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
634 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
635 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
636 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
638 } else if (INTEL_GEN(dev_priv) >= 6) {
639 for_each_uabi_engine(engine, dev_priv) {
641 "Graphics Interrupt mask (%s): %08x\n",
642 engine->name, ENGINE_READ(engine, RING_IMR));
646 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
651 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
653 struct drm_i915_private *i915 = node_to_i915(m->private);
656 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
659 for (i = 0; i < i915->ggtt.num_fences; i++) {
660 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
661 struct i915_vma *vma = reg->vma;
663 seq_printf(m, "Fence %d, pin count = %d, object = ",
664 i, atomic_read(®->pin_count));
666 seq_puts(m, "unused");
668 i915_debugfs_describe_obj(m, vma->obj);
676 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
677 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
678 size_t count, loff_t *pos)
680 struct i915_gpu_coredump *error;
684 error = file->private_data;
688 /* Bounce buffer required because of kernfs __user API convenience. */
689 buf = kmalloc(count, GFP_KERNEL);
693 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
697 if (!copy_to_user(ubuf, buf, ret))
707 static int gpu_state_release(struct inode *inode, struct file *file)
709 i915_gpu_coredump_put(file->private_data);
713 static int i915_gpu_info_open(struct inode *inode, struct file *file)
715 struct drm_i915_private *i915 = inode->i_private;
716 struct i915_gpu_coredump *gpu;
717 intel_wakeref_t wakeref;
720 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
721 gpu = i915_gpu_coredump(i915);
725 file->private_data = gpu;
729 static const struct file_operations i915_gpu_info_fops = {
730 .owner = THIS_MODULE,
731 .open = i915_gpu_info_open,
732 .read = gpu_state_read,
733 .llseek = default_llseek,
734 .release = gpu_state_release,
738 i915_error_state_write(struct file *filp,
739 const char __user *ubuf,
743 struct i915_gpu_coredump *error = filp->private_data;
748 DRM_DEBUG_DRIVER("Resetting error state\n");
749 i915_reset_error_state(error->i915);
754 static int i915_error_state_open(struct inode *inode, struct file *file)
756 struct i915_gpu_coredump *error;
758 error = i915_first_error_state(inode->i_private);
760 return PTR_ERR(error);
762 file->private_data = error;
766 static const struct file_operations i915_error_state_fops = {
767 .owner = THIS_MODULE,
768 .open = i915_error_state_open,
769 .read = gpu_state_read,
770 .write = i915_error_state_write,
771 .llseek = default_llseek,
772 .release = gpu_state_release,
776 static int i915_frequency_info(struct seq_file *m, void *unused)
778 struct drm_i915_private *dev_priv = node_to_i915(m->private);
779 struct intel_uncore *uncore = &dev_priv->uncore;
780 struct intel_rps *rps = &dev_priv->gt.rps;
781 intel_wakeref_t wakeref;
784 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
786 if (IS_GEN(dev_priv, 5)) {
787 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
788 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
790 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
791 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
792 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
794 seq_printf(m, "Current P-state: %d\n",
795 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
796 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
797 u32 rpmodectl, freq_sts;
799 rpmodectl = I915_READ(GEN6_RP_CONTROL);
800 seq_printf(m, "Video Turbo Mode: %s\n",
801 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
802 seq_printf(m, "HW control enabled: %s\n",
803 yesno(rpmodectl & GEN6_RP_ENABLE));
804 seq_printf(m, "SW control enabled: %s\n",
805 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
806 GEN6_RP_MEDIA_SW_MODE));
808 vlv_punit_get(dev_priv);
809 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
810 vlv_punit_put(dev_priv);
812 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
813 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
815 seq_printf(m, "actual GPU freq: %d MHz\n",
816 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
818 seq_printf(m, "current GPU freq: %d MHz\n",
819 intel_gpu_freq(rps, rps->cur_freq));
821 seq_printf(m, "max GPU freq: %d MHz\n",
822 intel_gpu_freq(rps, rps->max_freq));
824 seq_printf(m, "min GPU freq: %d MHz\n",
825 intel_gpu_freq(rps, rps->min_freq));
827 seq_printf(m, "idle GPU freq: %d MHz\n",
828 intel_gpu_freq(rps, rps->idle_freq));
831 "efficient (RPe) frequency: %d MHz\n",
832 intel_gpu_freq(rps, rps->efficient_freq));
833 } else if (INTEL_GEN(dev_priv) >= 6) {
837 u32 rpmodectl, rpinclimit, rpdeclimit;
838 u32 rpstat, cagf, reqf;
839 u32 rpupei, rpcurup, rpprevup;
840 u32 rpdownei, rpcurdown, rpprevdown;
841 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
844 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
845 if (IS_GEN9_LP(dev_priv)) {
846 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
847 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
849 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
850 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
853 /* RPSTAT1 is in the GT power well */
854 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
856 reqf = I915_READ(GEN6_RPNSWREQ);
857 if (INTEL_GEN(dev_priv) >= 9)
860 reqf &= ~GEN6_TURBO_DISABLE;
861 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
866 reqf = intel_gpu_freq(rps, reqf);
868 rpmodectl = I915_READ(GEN6_RP_CONTROL);
869 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
870 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
872 rpstat = I915_READ(GEN6_RPSTAT1);
873 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
874 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
875 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
876 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
877 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
878 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
879 cagf = intel_rps_read_actual_frequency(rps);
881 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
883 if (INTEL_GEN(dev_priv) >= 11) {
884 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
885 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
887 * The equivalent to the PM ISR & IIR cannot be read
888 * without affecting the current state of the system
892 } else if (INTEL_GEN(dev_priv) >= 8) {
893 pm_ier = I915_READ(GEN8_GT_IER(2));
894 pm_imr = I915_READ(GEN8_GT_IMR(2));
895 pm_isr = I915_READ(GEN8_GT_ISR(2));
896 pm_iir = I915_READ(GEN8_GT_IIR(2));
898 pm_ier = I915_READ(GEN6_PMIER);
899 pm_imr = I915_READ(GEN6_PMIMR);
900 pm_isr = I915_READ(GEN6_PMISR);
901 pm_iir = I915_READ(GEN6_PMIIR);
903 pm_mask = I915_READ(GEN6_PMINTRMSK);
905 seq_printf(m, "Video Turbo Mode: %s\n",
906 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
907 seq_printf(m, "HW control enabled: %s\n",
908 yesno(rpmodectl & GEN6_RP_ENABLE));
909 seq_printf(m, "SW control enabled: %s\n",
910 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
911 GEN6_RP_MEDIA_SW_MODE));
913 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
914 pm_ier, pm_imr, pm_mask);
915 if (INTEL_GEN(dev_priv) <= 10)
916 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
918 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
919 rps->pm_intrmsk_mbz);
920 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
921 seq_printf(m, "Render p-state ratio: %d\n",
922 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
923 seq_printf(m, "Render p-state VID: %d\n",
924 gt_perf_status & 0xff);
925 seq_printf(m, "Render p-state limit: %d\n",
926 rp_state_limits & 0xff);
927 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
928 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
929 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
930 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
931 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
932 seq_printf(m, "CAGF: %dMHz\n", cagf);
933 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
934 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
935 seq_printf(m, "RP CUR UP: %d (%dus)\n",
936 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
937 seq_printf(m, "RP PREV UP: %d (%dus)\n",
938 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
939 seq_printf(m, "Up threshold: %d%%\n",
940 rps->power.up_threshold);
942 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
943 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
944 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
945 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
946 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
947 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
948 seq_printf(m, "Down threshold: %d%%\n",
949 rps->power.down_threshold);
951 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
952 rp_state_cap >> 16) & 0xff;
953 max_freq *= (IS_GEN9_BC(dev_priv) ||
954 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
955 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
956 intel_gpu_freq(rps, max_freq));
958 max_freq = (rp_state_cap & 0xff00) >> 8;
959 max_freq *= (IS_GEN9_BC(dev_priv) ||
960 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
961 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
962 intel_gpu_freq(rps, max_freq));
964 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
965 rp_state_cap >> 0) & 0xff;
966 max_freq *= (IS_GEN9_BC(dev_priv) ||
967 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
968 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
969 intel_gpu_freq(rps, max_freq));
970 seq_printf(m, "Max overclocked frequency: %dMHz\n",
971 intel_gpu_freq(rps, rps->max_freq));
973 seq_printf(m, "Current freq: %d MHz\n",
974 intel_gpu_freq(rps, rps->cur_freq));
975 seq_printf(m, "Actual freq: %d MHz\n", cagf);
976 seq_printf(m, "Idle freq: %d MHz\n",
977 intel_gpu_freq(rps, rps->idle_freq));
978 seq_printf(m, "Min freq: %d MHz\n",
979 intel_gpu_freq(rps, rps->min_freq));
980 seq_printf(m, "Boost freq: %d MHz\n",
981 intel_gpu_freq(rps, rps->boost_freq));
982 seq_printf(m, "Max freq: %d MHz\n",
983 intel_gpu_freq(rps, rps->max_freq));
985 "efficient (RPe) frequency: %d MHz\n",
986 intel_gpu_freq(rps, rps->efficient_freq));
988 seq_puts(m, "no P-state info available\n");
991 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
992 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
993 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
995 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
999 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1001 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1002 struct intel_rps *rps = &dev_priv->gt.rps;
1003 unsigned int max_gpu_freq, min_gpu_freq;
1004 intel_wakeref_t wakeref;
1005 int gpu_freq, ia_freq;
1007 if (!HAS_LLC(dev_priv))
1010 min_gpu_freq = rps->min_freq;
1011 max_gpu_freq = rps->max_freq;
1012 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1013 /* Convert GT frequency to 50 HZ units */
1014 min_gpu_freq /= GEN9_FREQ_SCALER;
1015 max_gpu_freq /= GEN9_FREQ_SCALER;
1018 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1020 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1021 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1023 sandybridge_pcode_read(dev_priv,
1024 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1026 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1029 (IS_GEN9_BC(dev_priv) ||
1030 INTEL_GEN(dev_priv) >= 10 ?
1031 GEN9_FREQ_SCALER : 1))),
1032 ((ia_freq >> 0) & 0xff) * 100,
1033 ((ia_freq >> 8) & 0xff) * 100);
1035 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1040 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1042 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1043 ring->space, ring->head, ring->tail, ring->emit);
1046 static int i915_context_status(struct seq_file *m, void *unused)
1048 struct drm_i915_private *i915 = node_to_i915(m->private);
1049 struct i915_gem_context *ctx, *cn;
1051 spin_lock(&i915->gem.contexts.lock);
1052 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1053 struct i915_gem_engines_iter it;
1054 struct intel_context *ce;
1056 if (!kref_get_unless_zero(&ctx->ref))
1059 spin_unlock(&i915->gem.contexts.lock);
1061 seq_puts(m, "HW context ");
1063 struct task_struct *task;
1065 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1067 seq_printf(m, "(%s [%d]) ",
1068 task->comm, task->pid);
1069 put_task_struct(task);
1071 } else if (IS_ERR(ctx->file_priv)) {
1072 seq_puts(m, "(deleted) ");
1074 seq_puts(m, "(kernel) ");
1077 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1080 for_each_gem_engine(ce,
1081 i915_gem_context_lock_engines(ctx), it) {
1082 if (intel_context_pin_if_active(ce)) {
1083 seq_printf(m, "%s: ", ce->engine->name);
1085 i915_debugfs_describe_obj(m, ce->state->obj);
1086 describe_ctx_ring(m, ce->ring);
1088 intel_context_unpin(ce);
1091 i915_gem_context_unlock_engines(ctx);
1095 spin_lock(&i915->gem.contexts.lock);
1096 list_safe_reset_next(ctx, cn, link);
1097 i915_gem_context_put(ctx);
1099 spin_unlock(&i915->gem.contexts.lock);
1104 static const char *swizzle_string(unsigned swizzle)
1107 case I915_BIT_6_SWIZZLE_NONE:
1109 case I915_BIT_6_SWIZZLE_9:
1111 case I915_BIT_6_SWIZZLE_9_10:
1112 return "bit9/bit10";
1113 case I915_BIT_6_SWIZZLE_9_11:
1114 return "bit9/bit11";
1115 case I915_BIT_6_SWIZZLE_9_10_11:
1116 return "bit9/bit10/bit11";
1117 case I915_BIT_6_SWIZZLE_9_17:
1118 return "bit9/bit17";
1119 case I915_BIT_6_SWIZZLE_9_10_17:
1120 return "bit9/bit10/bit17";
1121 case I915_BIT_6_SWIZZLE_UNKNOWN:
1128 static int i915_swizzle_info(struct seq_file *m, void *data)
1130 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1131 struct intel_uncore *uncore = &dev_priv->uncore;
1132 intel_wakeref_t wakeref;
1134 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1136 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1137 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1138 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1139 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1141 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1142 seq_printf(m, "DDC = 0x%08x\n",
1143 intel_uncore_read(uncore, DCC));
1144 seq_printf(m, "DDC2 = 0x%08x\n",
1145 intel_uncore_read(uncore, DCC2));
1146 seq_printf(m, "C0DRB3 = 0x%04x\n",
1147 intel_uncore_read16(uncore, C0DRB3));
1148 seq_printf(m, "C1DRB3 = 0x%04x\n",
1149 intel_uncore_read16(uncore, C1DRB3));
1150 } else if (INTEL_GEN(dev_priv) >= 6) {
1151 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1152 intel_uncore_read(uncore, MAD_DIMM_C0));
1153 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1154 intel_uncore_read(uncore, MAD_DIMM_C1));
1155 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1156 intel_uncore_read(uncore, MAD_DIMM_C2));
1157 seq_printf(m, "TILECTL = 0x%08x\n",
1158 intel_uncore_read(uncore, TILECTL));
1159 if (INTEL_GEN(dev_priv) >= 8)
1160 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1161 intel_uncore_read(uncore, GAMTARBMODE));
1163 seq_printf(m, "ARB_MODE = 0x%08x\n",
1164 intel_uncore_read(uncore, ARB_MODE));
1165 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1166 intel_uncore_read(uncore, DISP_ARB_CTL));
1169 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1170 seq_puts(m, "L-shaped memory detected\n");
1172 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1177 static const char *rps_power_to_str(unsigned int power)
1179 static const char * const strings[] = {
1180 [LOW_POWER] = "low power",
1181 [BETWEEN] = "mixed",
1182 [HIGH_POWER] = "high power",
1185 if (power >= ARRAY_SIZE(strings) || !strings[power])
1188 return strings[power];
1191 static int i915_rps_boost_info(struct seq_file *m, void *data)
1193 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1194 struct intel_rps *rps = &dev_priv->gt.rps;
1196 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1197 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1198 seq_printf(m, "Boosts outstanding? %d\n",
1199 atomic_read(&rps->num_waiters));
1200 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1201 seq_printf(m, "Frequency requested %d, actual %d\n",
1202 intel_gpu_freq(rps, rps->cur_freq),
1203 intel_rps_read_actual_frequency(rps));
1204 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1205 intel_gpu_freq(rps, rps->min_freq),
1206 intel_gpu_freq(rps, rps->min_freq_softlimit),
1207 intel_gpu_freq(rps, rps->max_freq_softlimit),
1208 intel_gpu_freq(rps, rps->max_freq));
1209 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1210 intel_gpu_freq(rps, rps->idle_freq),
1211 intel_gpu_freq(rps, rps->efficient_freq),
1212 intel_gpu_freq(rps, rps->boost_freq));
1214 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1216 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1218 u32 rpdown, rpdownei;
1220 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1221 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1222 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1223 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1224 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1225 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1227 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1228 rps_power_to_str(rps->power.mode));
1229 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1230 rpup && rpupei ? 100 * rpup / rpupei : 0,
1231 rps->power.up_threshold);
1232 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1233 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1234 rps->power.down_threshold);
1236 seq_puts(m, "\nRPS Autotuning inactive\n");
1242 static int i915_llc(struct seq_file *m, void *data)
1244 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1245 const bool edram = INTEL_GEN(dev_priv) > 8;
1247 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1248 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1249 dev_priv->edram_size_mb);
1254 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1256 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1257 intel_wakeref_t wakeref;
1258 struct drm_printer p;
1260 if (!HAS_GT_UC(dev_priv))
1263 p = drm_seq_file_printer(m);
1264 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1266 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1267 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1272 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1274 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1275 intel_wakeref_t wakeref;
1276 struct drm_printer p;
1278 if (!HAS_GT_UC(dev_priv))
1281 p = drm_seq_file_printer(m);
1282 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1284 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1285 u32 tmp = I915_READ(GUC_STATUS);
1288 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1289 seq_printf(m, "\tBootrom status = 0x%x\n",
1290 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1291 seq_printf(m, "\tuKernel status = 0x%x\n",
1292 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1293 seq_printf(m, "\tMIA Core status = 0x%x\n",
1294 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1295 seq_puts(m, "\nScratch registers:\n");
1296 for (i = 0; i < 16; i++) {
1297 seq_printf(m, "\t%2d: \t0x%x\n",
1298 i, I915_READ(SOFT_SCRATCH(i)));
1306 stringify_guc_log_type(enum guc_log_buffer_type type)
1309 case GUC_ISR_LOG_BUFFER:
1311 case GUC_DPC_LOG_BUFFER:
1313 case GUC_CRASH_DUMP_LOG_BUFFER:
1322 static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
1324 enum guc_log_buffer_type type;
1326 if (!intel_guc_log_relay_created(log)) {
1327 seq_puts(m, "GuC log relay not created\n");
1331 seq_puts(m, "GuC logging stats:\n");
1333 seq_printf(m, "\tRelay full count: %u\n",
1334 log->relay.full_count);
1336 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1337 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1338 stringify_guc_log_type(type),
1339 log->stats[type].flush,
1340 log->stats[type].sampled_overflow);
1344 static int i915_guc_info(struct seq_file *m, void *data)
1346 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1347 struct intel_uc *uc = &dev_priv->gt.uc;
1349 if (!intel_uc_uses_guc(uc))
1352 i915_guc_log_info(m, &uc->guc.log);
1354 /* Add more as required ... */
1359 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1361 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1362 struct intel_uc *uc = &dev_priv->gt.uc;
1363 struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
1366 if (!intel_uc_uses_guc_submission(uc))
1369 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1370 struct intel_engine_cs *engine;
1372 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1375 seq_printf(m, "GuC stage descriptor %u:\n", index);
1376 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1377 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1378 seq_printf(m, "\tPriority: %d\n", desc->priority);
1379 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1380 seq_printf(m, "\tEngines used: 0x%x\n",
1381 desc->engines_used);
1382 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1383 desc->db_trigger_phy,
1384 desc->db_trigger_cpu,
1385 desc->db_trigger_uk);
1386 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1387 desc->process_desc);
1388 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1389 desc->wq_addr, desc->wq_size);
1392 for_each_uabi_engine(engine, dev_priv) {
1393 u32 guc_engine_id = engine->guc_id;
1394 struct guc_execlist_context *lrc =
1395 &desc->lrc[guc_engine_id];
1397 seq_printf(m, "\t%s LRC:\n", engine->name);
1398 seq_printf(m, "\t\tContext desc: 0x%x\n",
1400 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1401 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1402 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1403 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1411 static int i915_guc_log_dump(struct seq_file *m, void *data)
1413 struct drm_info_node *node = m->private;
1414 struct drm_i915_private *dev_priv = node_to_i915(node);
1415 bool dump_load_err = !!node->info_ent->data;
1416 struct drm_i915_gem_object *obj = NULL;
1420 if (!HAS_GT_UC(dev_priv))
1424 obj = dev_priv->gt.uc.load_err_log;
1425 else if (dev_priv->gt.uc.guc.log.vma)
1426 obj = dev_priv->gt.uc.guc.log.vma->obj;
1431 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1433 DRM_DEBUG("Failed to pin object\n");
1434 seq_puts(m, "(log data unaccessible)\n");
1435 return PTR_ERR(log);
1438 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1439 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1440 *(log + i), *(log + i + 1),
1441 *(log + i + 2), *(log + i + 3));
1445 i915_gem_object_unpin_map(obj);
1450 static int i915_guc_log_level_get(void *data, u64 *val)
1452 struct drm_i915_private *dev_priv = data;
1453 struct intel_uc *uc = &dev_priv->gt.uc;
1455 if (!intel_uc_uses_guc(uc))
1458 *val = intel_guc_log_get_level(&uc->guc.log);
1463 static int i915_guc_log_level_set(void *data, u64 val)
1465 struct drm_i915_private *dev_priv = data;
1466 struct intel_uc *uc = &dev_priv->gt.uc;
1468 if (!intel_uc_uses_guc(uc))
1471 return intel_guc_log_set_level(&uc->guc.log, val);
1474 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1475 i915_guc_log_level_get, i915_guc_log_level_set,
1478 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1480 struct drm_i915_private *i915 = inode->i_private;
1481 struct intel_guc *guc = &i915->gt.uc.guc;
1482 struct intel_guc_log *log = &guc->log;
1484 if (!intel_guc_is_ready(guc))
1487 file->private_data = log;
1489 return intel_guc_log_relay_open(log);
1493 i915_guc_log_relay_write(struct file *filp,
1494 const char __user *ubuf,
1498 struct intel_guc_log *log = filp->private_data;
1502 ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1507 * Enable and start the guc log relay on value of 1.
1508 * Flush log relay for any other value.
1511 ret = intel_guc_log_relay_start(log);
1513 intel_guc_log_relay_flush(log);
1518 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1520 struct drm_i915_private *i915 = inode->i_private;
1521 struct intel_guc *guc = &i915->gt.uc.guc;
1523 intel_guc_log_relay_close(&guc->log);
1527 static const struct file_operations i915_guc_log_relay_fops = {
1528 .owner = THIS_MODULE,
1529 .open = i915_guc_log_relay_open,
1530 .write = i915_guc_log_relay_write,
1531 .release = i915_guc_log_relay_release,
1534 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1536 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1537 struct pci_dev *pdev = dev_priv->drm.pdev;
1539 if (!HAS_RUNTIME_PM(dev_priv))
1540 seq_puts(m, "Runtime power management not supported\n");
1542 seq_printf(m, "Runtime power status: %s\n",
1543 enableddisabled(!dev_priv->power_domains.wakeref));
1545 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1546 seq_printf(m, "IRQs disabled: %s\n",
1547 yesno(!intel_irqs_enabled(dev_priv)));
1549 seq_printf(m, "Usage count: %d\n",
1550 atomic_read(&dev_priv->drm.dev->power.usage_count));
1552 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
1554 seq_printf(m, "PCI device power state: %s [%d]\n",
1555 pci_power_name(pdev->current_state),
1556 pdev->current_state);
1558 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1559 struct drm_printer p = drm_seq_file_printer(m);
1561 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1567 static int i915_engine_info(struct seq_file *m, void *unused)
1569 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1570 struct intel_engine_cs *engine;
1571 intel_wakeref_t wakeref;
1572 struct drm_printer p;
1574 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1576 seq_printf(m, "GT awake? %s [%d]\n",
1577 yesno(dev_priv->gt.awake),
1578 atomic_read(&dev_priv->gt.wakeref.count));
1579 seq_printf(m, "CS timestamp frequency: %u kHz\n",
1580 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
1582 p = drm_seq_file_printer(m);
1583 for_each_uabi_engine(engine, dev_priv)
1584 intel_engine_dump(engine, &p, "%s\n", engine->name);
1586 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1591 static int i915_rcs_topology(struct seq_file *m, void *unused)
1593 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1594 struct drm_printer p = drm_seq_file_printer(m);
1596 intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1601 static int i915_shrinker_info(struct seq_file *m, void *unused)
1603 struct drm_i915_private *i915 = node_to_i915(m->private);
1605 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
1606 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1611 static int i915_wa_registers(struct seq_file *m, void *unused)
1613 struct drm_i915_private *i915 = node_to_i915(m->private);
1614 struct intel_engine_cs *engine;
1616 for_each_uabi_engine(engine, i915) {
1617 const struct i915_wa_list *wal = &engine->ctx_wa_list;
1618 const struct i915_wa *wa;
1625 seq_printf(m, "%s: Workarounds applied: %u\n",
1626 engine->name, count);
1628 for (wa = wal->list; count--; wa++)
1629 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1630 i915_mmio_reg_offset(wa->reg),
1633 seq_printf(m, "\n");
1640 i915_wedged_get(void *data, u64 *val)
1642 struct drm_i915_private *i915 = data;
1643 int ret = intel_gt_terminally_wedged(&i915->gt);
1658 i915_wedged_set(void *data, u64 val)
1660 struct drm_i915_private *i915 = data;
1662 /* Flush any previous reset before applying for a new one */
1663 wait_event(i915->gt.reset.queue,
1664 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1666 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1667 "Manually set wedged engine mask = %llx", val);
1671 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1672 i915_wedged_get, i915_wedged_set,
1676 i915_perf_noa_delay_set(void *data, u64 val)
1678 struct drm_i915_private *i915 = data;
1679 const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
1682 * This would lead to infinite waits as we're doing timestamp
1683 * difference on the CS with only 32bits.
1685 if (val > mul_u32_u32(U32_MAX, clk))
1688 atomic64_set(&i915->perf.noa_programming_delay, val);
1693 i915_perf_noa_delay_get(void *data, u64 *val)
1695 struct drm_i915_private *i915 = data;
1697 *val = atomic64_read(&i915->perf.noa_programming_delay);
1701 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1702 i915_perf_noa_delay_get,
1703 i915_perf_noa_delay_set,
1706 #define DROP_UNBOUND BIT(0)
1707 #define DROP_BOUND BIT(1)
1708 #define DROP_RETIRE BIT(2)
1709 #define DROP_ACTIVE BIT(3)
1710 #define DROP_FREED BIT(4)
1711 #define DROP_SHRINK_ALL BIT(5)
1712 #define DROP_IDLE BIT(6)
1713 #define DROP_RESET_ACTIVE BIT(7)
1714 #define DROP_RESET_SEQNO BIT(8)
1715 #define DROP_RCU BIT(9)
1716 #define DROP_ALL (DROP_UNBOUND | \
1723 DROP_RESET_ACTIVE | \
1724 DROP_RESET_SEQNO | \
1727 i915_drop_caches_get(void *data, u64 *val)
1734 gt_drop_caches(struct intel_gt *gt, u64 val)
1738 if (val & DROP_RESET_ACTIVE &&
1739 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1740 intel_gt_set_wedged(gt);
1742 if (val & DROP_RETIRE)
1743 intel_gt_retire_requests(gt);
1745 if (val & (DROP_IDLE | DROP_ACTIVE)) {
1746 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1751 if (val & DROP_IDLE) {
1752 ret = intel_gt_pm_wait_for_idle(gt);
1757 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1758 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1764 i915_drop_caches_set(void *data, u64 val)
1766 struct drm_i915_private *i915 = data;
1769 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1770 val, val & DROP_ALL);
1772 ret = gt_drop_caches(&i915->gt, val);
1776 fs_reclaim_acquire(GFP_KERNEL);
1777 if (val & DROP_BOUND)
1778 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1780 if (val & DROP_UNBOUND)
1781 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1783 if (val & DROP_SHRINK_ALL)
1784 i915_gem_shrink_all(i915);
1785 fs_reclaim_release(GFP_KERNEL);
1790 if (val & DROP_FREED)
1791 i915_gem_drain_freed_objects(i915);
1796 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1797 i915_drop_caches_get, i915_drop_caches_set,
1801 i915_cache_sharing_get(void *data, u64 *val)
1803 struct drm_i915_private *dev_priv = data;
1804 intel_wakeref_t wakeref;
1807 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1810 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1811 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1813 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1819 i915_cache_sharing_set(void *data, u64 val)
1821 struct drm_i915_private *dev_priv = data;
1822 intel_wakeref_t wakeref;
1824 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1830 drm_dbg(&dev_priv->drm,
1831 "Manually setting uncore sharing to %llu\n", val);
1832 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1835 /* Update the cache sharing policy here as well */
1836 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1837 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1838 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
1839 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1846 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
1849 int offset = slice * sseu->ss_stride;
1851 memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
1854 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
1855 i915_cache_sharing_get, i915_cache_sharing_set,
1858 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
1859 struct sseu_dev_info *sseu)
1862 const int ss_max = SS_MAX;
1863 u32 sig1[SS_MAX], sig2[SS_MAX];
1866 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
1867 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
1868 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
1869 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
1871 for (ss = 0; ss < ss_max; ss++) {
1872 unsigned int eu_cnt;
1874 if (sig1[ss] & CHV_SS_PG_ENABLE)
1875 /* skip disabled subslice */
1878 sseu->slice_mask = BIT(0);
1879 sseu->subslice_mask[0] |= BIT(ss);
1880 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
1881 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
1882 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
1883 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
1884 sseu->eu_total += eu_cnt;
1885 sseu->eu_per_subslice = max_t(unsigned int,
1886 sseu->eu_per_subslice, eu_cnt);
1891 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
1892 struct sseu_dev_info *sseu)
1895 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1896 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1899 for (s = 0; s < info->sseu.max_slices; s++) {
1901 * FIXME: Valid SS Mask respects the spec and read
1902 * only valid bits for those registers, excluding reserved
1903 * although this seems wrong because it would leave many
1904 * subslices without ACK.
1906 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
1907 GEN10_PGCTL_VALID_SS_MASK(s);
1908 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
1909 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
1912 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1913 GEN9_PGCTL_SSA_EU19_ACK |
1914 GEN9_PGCTL_SSA_EU210_ACK |
1915 GEN9_PGCTL_SSA_EU311_ACK;
1916 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1917 GEN9_PGCTL_SSB_EU19_ACK |
1918 GEN9_PGCTL_SSB_EU210_ACK |
1919 GEN9_PGCTL_SSB_EU311_ACK;
1921 for (s = 0; s < info->sseu.max_slices; s++) {
1922 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1923 /* skip disabled slice */
1926 sseu->slice_mask |= BIT(s);
1927 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
1929 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1930 unsigned int eu_cnt;
1932 if (info->sseu.has_subslice_pg &&
1933 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1934 /* skip disabled subslice */
1937 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
1939 sseu->eu_total += eu_cnt;
1940 sseu->eu_per_subslice = max_t(unsigned int,
1941 sseu->eu_per_subslice,
1948 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
1949 struct sseu_dev_info *sseu)
1952 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1953 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1956 for (s = 0; s < info->sseu.max_slices; s++) {
1957 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
1958 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
1959 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
1962 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1963 GEN9_PGCTL_SSA_EU19_ACK |
1964 GEN9_PGCTL_SSA_EU210_ACK |
1965 GEN9_PGCTL_SSA_EU311_ACK;
1966 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1967 GEN9_PGCTL_SSB_EU19_ACK |
1968 GEN9_PGCTL_SSB_EU210_ACK |
1969 GEN9_PGCTL_SSB_EU311_ACK;
1971 for (s = 0; s < info->sseu.max_slices; s++) {
1972 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1973 /* skip disabled slice */
1976 sseu->slice_mask |= BIT(s);
1978 if (IS_GEN9_BC(dev_priv))
1979 intel_sseu_copy_subslices(&info->sseu, s,
1980 sseu->subslice_mask);
1982 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1983 unsigned int eu_cnt;
1984 u8 ss_idx = s * info->sseu.ss_stride +
1987 if (IS_GEN9_LP(dev_priv)) {
1988 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1989 /* skip disabled subslice */
1992 sseu->subslice_mask[ss_idx] |=
1993 BIT(ss % BITS_PER_BYTE);
1996 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
1998 sseu->eu_total += eu_cnt;
1999 sseu->eu_per_subslice = max_t(unsigned int,
2000 sseu->eu_per_subslice,
2007 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
2008 struct sseu_dev_info *sseu)
2010 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2011 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
2014 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
2016 if (sseu->slice_mask) {
2017 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
2018 for (s = 0; s < fls(sseu->slice_mask); s++)
2019 intel_sseu_copy_subslices(&info->sseu, s,
2020 sseu->subslice_mask);
2021 sseu->eu_total = sseu->eu_per_subslice *
2022 intel_sseu_subslice_total(sseu);
2024 /* subtract fused off EU(s) from enabled slice(s) */
2025 for (s = 0; s < fls(sseu->slice_mask); s++) {
2026 u8 subslice_7eu = info->sseu.subslice_7eu[s];
2028 sseu->eu_total -= hweight8(subslice_7eu);
2033 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
2034 const struct sseu_dev_info *sseu)
2036 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2037 const char *type = is_available_info ? "Available" : "Enabled";
2040 seq_printf(m, " %s Slice Mask: %04x\n", type,
2042 seq_printf(m, " %s Slice Total: %u\n", type,
2043 hweight8(sseu->slice_mask));
2044 seq_printf(m, " %s Subslice Total: %u\n", type,
2045 intel_sseu_subslice_total(sseu));
2046 for (s = 0; s < fls(sseu->slice_mask); s++) {
2047 seq_printf(m, " %s Slice%i subslices: %u\n", type,
2048 s, intel_sseu_subslices_per_slice(sseu, s));
2050 seq_printf(m, " %s EU Total: %u\n", type,
2052 seq_printf(m, " %s EU Per Subslice: %u\n", type,
2053 sseu->eu_per_subslice);
2055 if (!is_available_info)
2058 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
2059 if (HAS_POOLED_EU(dev_priv))
2060 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
2062 seq_printf(m, " Has Slice Power Gating: %s\n",
2063 yesno(sseu->has_slice_pg));
2064 seq_printf(m, " Has Subslice Power Gating: %s\n",
2065 yesno(sseu->has_subslice_pg));
2066 seq_printf(m, " Has EU Power Gating: %s\n",
2067 yesno(sseu->has_eu_pg));
2070 static int i915_sseu_status(struct seq_file *m, void *unused)
2072 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2073 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2074 struct sseu_dev_info sseu;
2075 intel_wakeref_t wakeref;
2077 if (INTEL_GEN(dev_priv) < 8)
2080 seq_puts(m, "SSEU Device Info\n");
2081 i915_print_sseu_info(m, true, &info->sseu);
2083 seq_puts(m, "SSEU Device Status\n");
2084 memset(&sseu, 0, sizeof(sseu));
2085 intel_sseu_set_info(&sseu, info->sseu.max_slices,
2086 info->sseu.max_subslices,
2087 info->sseu.max_eus_per_subslice);
2089 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2090 if (IS_CHERRYVIEW(dev_priv))
2091 cherryview_sseu_device_status(dev_priv, &sseu);
2092 else if (IS_BROADWELL(dev_priv))
2093 bdw_sseu_device_status(dev_priv, &sseu);
2094 else if (IS_GEN(dev_priv, 9))
2095 gen9_sseu_device_status(dev_priv, &sseu);
2096 else if (INTEL_GEN(dev_priv) >= 10)
2097 gen10_sseu_device_status(dev_priv, &sseu);
2100 i915_print_sseu_info(m, false, &sseu);
2105 static int i915_forcewake_open(struct inode *inode, struct file *file)
2107 struct drm_i915_private *i915 = inode->i_private;
2108 struct intel_gt *gt = &i915->gt;
2110 atomic_inc(>->user_wakeref);
2111 intel_gt_pm_get(gt);
2112 if (INTEL_GEN(i915) >= 6)
2113 intel_uncore_forcewake_user_get(gt->uncore);
2118 static int i915_forcewake_release(struct inode *inode, struct file *file)
2120 struct drm_i915_private *i915 = inode->i_private;
2121 struct intel_gt *gt = &i915->gt;
2123 if (INTEL_GEN(i915) >= 6)
2124 intel_uncore_forcewake_user_put(&i915->uncore);
2125 intel_gt_pm_put(gt);
2126 atomic_dec(>->user_wakeref);
2131 static const struct file_operations i915_forcewake_fops = {
2132 .owner = THIS_MODULE,
2133 .open = i915_forcewake_open,
2134 .release = i915_forcewake_release,
2137 static const struct drm_info_list i915_debugfs_list[] = {
2138 {"i915_capabilities", i915_capabilities, 0},
2139 {"i915_gem_objects", i915_gem_object_info, 0},
2140 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2141 {"i915_gem_interrupt", i915_interrupt_info, 0},
2142 {"i915_guc_info", i915_guc_info, 0},
2143 {"i915_guc_load_status", i915_guc_load_status_info, 0},
2144 {"i915_guc_log_dump", i915_guc_log_dump, 0},
2145 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
2146 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
2147 {"i915_huc_load_status", i915_huc_load_status_info, 0},
2148 {"i915_frequency_info", i915_frequency_info, 0},
2149 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2150 {"i915_context_status", i915_context_status, 0},
2151 {"i915_swizzle_info", i915_swizzle_info, 0},
2152 {"i915_llc", i915_llc, 0},
2153 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
2154 {"i915_engine_info", i915_engine_info, 0},
2155 {"i915_rcs_topology", i915_rcs_topology, 0},
2156 {"i915_shrinker_info", i915_shrinker_info, 0},
2157 {"i915_wa_registers", i915_wa_registers, 0},
2158 {"i915_sseu_status", i915_sseu_status, 0},
2159 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2161 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2163 static const struct i915_debugfs_files {
2165 const struct file_operations *fops;
2166 } i915_debugfs_files[] = {
2167 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
2168 {"i915_wedged", &i915_wedged_fops},
2169 {"i915_cache_sharing", &i915_cache_sharing_fops},
2170 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2171 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2172 {"i915_error_state", &i915_error_state_fops},
2173 {"i915_gpu_info", &i915_gpu_info_fops},
2175 {"i915_guc_log_level", &i915_guc_log_level_fops},
2176 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
2179 int i915_debugfs_register(struct drm_i915_private *dev_priv)
2181 struct drm_minor *minor = dev_priv->drm.primary;
2184 i915_debugfs_params(dev_priv);
2186 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
2187 to_i915(minor->dev), &i915_forcewake_fops);
2188 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2189 debugfs_create_file(i915_debugfs_files[i].name,
2191 minor->debugfs_root,
2192 to_i915(minor->dev),
2193 i915_debugfs_files[i].fops);
2196 return drm_debugfs_create_files(i915_debugfs_list,
2197 I915_DEBUGFS_ENTRIES,
2198 minor->debugfs_root, minor);