2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_reset.h"
45 #include "gt/uc/intel_guc_submission.h"
47 #include "i915_debugfs.h"
49 #include "i915_trace.h"
50 #include "intel_csr.h"
52 #include "intel_sideband.h"
54 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
56 return to_i915(node->minor->dev);
59 static int i915_capabilities(struct seq_file *m, void *data)
61 struct drm_i915_private *dev_priv = node_to_i915(m->private);
62 const struct intel_device_info *info = INTEL_INFO(dev_priv);
63 struct drm_printer p = drm_seq_file_printer(m);
65 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
66 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
67 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
69 intel_device_info_dump_flags(info, &p);
70 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
71 intel_driver_caps_print(&dev_priv->caps, &p);
73 kernel_param_lock(THIS_MODULE);
74 i915_params_dump(&i915_modparams, &p);
75 kernel_param_unlock(THIS_MODULE);
80 static char get_tiling_flag(struct drm_i915_gem_object *obj)
82 switch (i915_gem_object_get_tiling(obj)) {
84 case I915_TILING_NONE: return ' ';
85 case I915_TILING_X: return 'X';
86 case I915_TILING_Y: return 'Y';
90 static char get_global_flag(struct drm_i915_gem_object *obj)
92 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
95 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
97 return obj->mm.mapping ? 'M' : ' ';
101 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
105 switch (page_sizes) {
108 case I915_GTT_PAGE_SIZE_4K:
110 case I915_GTT_PAGE_SIZE_64K:
112 case I915_GTT_PAGE_SIZE_2M:
118 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
119 x += snprintf(buf + x, len - x, "2M, ");
120 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
121 x += snprintf(buf + x, len - x, "64K, ");
122 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
123 x += snprintf(buf + x, len - x, "4K, ");
131 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
133 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
134 struct intel_engine_cs *engine;
135 struct i915_vma *vma;
138 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
140 get_tiling_flag(obj),
141 get_global_flag(obj),
142 get_pin_mapped_flag(obj),
143 obj->base.size / 1024,
146 i915_cache_level_str(dev_priv, obj->cache_level),
147 obj->mm.dirty ? " dirty" : "",
148 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
150 seq_printf(m, " (name: %d)", obj->base.name);
152 spin_lock(&obj->vma.lock);
153 list_for_each_entry(vma, &obj->vma.list, obj_link) {
154 if (!drm_mm_node_allocated(&vma->node))
157 spin_unlock(&obj->vma.lock);
159 if (i915_vma_is_pinned(vma))
162 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
163 i915_vma_is_ggtt(vma) ? "g" : "pp",
164 vma->node.start, vma->node.size,
165 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
166 if (i915_vma_is_ggtt(vma)) {
167 switch (vma->ggtt_view.type) {
168 case I915_GGTT_VIEW_NORMAL:
169 seq_puts(m, ", normal");
172 case I915_GGTT_VIEW_PARTIAL:
173 seq_printf(m, ", partial [%08llx+%x]",
174 vma->ggtt_view.partial.offset << PAGE_SHIFT,
175 vma->ggtt_view.partial.size << PAGE_SHIFT);
178 case I915_GGTT_VIEW_ROTATED:
179 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
180 vma->ggtt_view.rotated.plane[0].width,
181 vma->ggtt_view.rotated.plane[0].height,
182 vma->ggtt_view.rotated.plane[0].stride,
183 vma->ggtt_view.rotated.plane[0].offset,
184 vma->ggtt_view.rotated.plane[1].width,
185 vma->ggtt_view.rotated.plane[1].height,
186 vma->ggtt_view.rotated.plane[1].stride,
187 vma->ggtt_view.rotated.plane[1].offset);
190 case I915_GGTT_VIEW_REMAPPED:
191 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192 vma->ggtt_view.remapped.plane[0].width,
193 vma->ggtt_view.remapped.plane[0].height,
194 vma->ggtt_view.remapped.plane[0].stride,
195 vma->ggtt_view.remapped.plane[0].offset,
196 vma->ggtt_view.remapped.plane[1].width,
197 vma->ggtt_view.remapped.plane[1].height,
198 vma->ggtt_view.remapped.plane[1].stride,
199 vma->ggtt_view.remapped.plane[1].offset);
203 MISSING_CASE(vma->ggtt_view.type);
208 seq_printf(m, " , fence: %d", vma->fence->id);
211 spin_lock(&obj->vma.lock);
213 spin_unlock(&obj->vma.lock);
215 seq_printf(m, " (pinned x %d)", pin_count);
217 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
218 if (i915_gem_object_is_framebuffer(obj))
219 seq_printf(m, " (fb)");
221 engine = i915_gem_object_last_write_engine(obj);
223 seq_printf(m, " (%s)", engine->name);
227 struct i915_address_space *vm;
230 u64 active, inactive;
234 static int per_file_stats(int id, void *ptr, void *data)
236 struct drm_i915_gem_object *obj = ptr;
237 struct file_stats *stats = data;
238 struct i915_vma *vma;
240 if (!kref_get_unless_zero(&obj->base.refcount))
244 stats->total += obj->base.size;
245 if (!atomic_read(&obj->bind_count))
246 stats->unbound += obj->base.size;
248 spin_lock(&obj->vma.lock);
250 for_each_ggtt_vma(vma, obj) {
251 if (!drm_mm_node_allocated(&vma->node))
254 if (i915_vma_is_active(vma))
255 stats->active += vma->node.size;
257 stats->inactive += vma->node.size;
259 if (i915_vma_is_closed(vma))
260 stats->closed += vma->node.size;
263 struct rb_node *p = obj->vma.tree.rb_node;
268 vma = rb_entry(p, typeof(*vma), obj_node);
269 cmp = i915_vma_compare(vma, stats->vm, NULL);
271 if (drm_mm_node_allocated(&vma->node)) {
272 if (i915_vma_is_active(vma))
273 stats->active += vma->node.size;
275 stats->inactive += vma->node.size;
277 if (i915_vma_is_closed(vma))
278 stats->closed += vma->node.size;
288 spin_unlock(&obj->vma.lock);
290 i915_gem_object_put(obj);
294 #define print_file_stats(m, name, stats) do { \
296 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
306 static void print_context_stats(struct seq_file *m,
307 struct drm_i915_private *i915)
309 struct file_stats kstats = {};
310 struct i915_gem_context *ctx;
312 list_for_each_entry(ctx, &i915->contexts.list, link) {
313 struct i915_gem_engines_iter it;
314 struct intel_context *ce;
316 for_each_gem_engine(ce,
317 i915_gem_context_lock_engines(ctx), it) {
318 intel_context_lock_pinned(ce);
319 if (intel_context_is_pinned(ce)) {
323 ce->state->obj, &kstats);
324 per_file_stats(0, ce->ring->vma->obj, &kstats);
327 intel_context_unlock_pinned(ce);
329 i915_gem_context_unlock_engines(ctx);
331 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
332 struct file_stats stats = { .vm = ctx->vm, };
333 struct drm_file *file = ctx->file_priv->file;
334 struct task_struct *task;
338 idr_for_each(&file->object_idr, per_file_stats, &stats);
342 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
343 snprintf(name, sizeof(name), "%s",
344 task ? task->comm : "<unknown>");
347 print_file_stats(m, name, stats);
351 print_file_stats(m, "[k]contexts", kstats);
354 static int i915_gem_object_info(struct seq_file *m, void *data)
356 struct drm_i915_private *i915 = node_to_i915(m->private);
359 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
360 i915->mm.shrink_count,
361 atomic_read(&i915->mm.free_count),
362 i915->mm.shrink_memory);
366 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
370 print_context_stats(m, i915);
371 mutex_unlock(&i915->drm.struct_mutex);
376 static void gen8_display_interrupt_info(struct seq_file *m)
378 struct drm_i915_private *dev_priv = node_to_i915(m->private);
381 for_each_pipe(dev_priv, pipe) {
382 enum intel_display_power_domain power_domain;
383 intel_wakeref_t wakeref;
385 power_domain = POWER_DOMAIN_PIPE(pipe);
386 wakeref = intel_display_power_get_if_enabled(dev_priv,
389 seq_printf(m, "Pipe %c power disabled\n",
393 seq_printf(m, "Pipe %c IMR:\t%08x\n",
395 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
396 seq_printf(m, "Pipe %c IIR:\t%08x\n",
398 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
399 seq_printf(m, "Pipe %c IER:\t%08x\n",
401 I915_READ(GEN8_DE_PIPE_IER(pipe)));
403 intel_display_power_put(dev_priv, power_domain, wakeref);
406 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
407 I915_READ(GEN8_DE_PORT_IMR));
408 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
409 I915_READ(GEN8_DE_PORT_IIR));
410 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
411 I915_READ(GEN8_DE_PORT_IER));
413 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
414 I915_READ(GEN8_DE_MISC_IMR));
415 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
416 I915_READ(GEN8_DE_MISC_IIR));
417 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
418 I915_READ(GEN8_DE_MISC_IER));
420 seq_printf(m, "PCU interrupt mask:\t%08x\n",
421 I915_READ(GEN8_PCU_IMR));
422 seq_printf(m, "PCU interrupt identity:\t%08x\n",
423 I915_READ(GEN8_PCU_IIR));
424 seq_printf(m, "PCU interrupt enable:\t%08x\n",
425 I915_READ(GEN8_PCU_IER));
428 static int i915_interrupt_info(struct seq_file *m, void *data)
430 struct drm_i915_private *dev_priv = node_to_i915(m->private);
431 struct intel_engine_cs *engine;
432 intel_wakeref_t wakeref;
435 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
437 if (IS_CHERRYVIEW(dev_priv)) {
438 intel_wakeref_t pref;
440 seq_printf(m, "Master Interrupt Control:\t%08x\n",
441 I915_READ(GEN8_MASTER_IRQ));
443 seq_printf(m, "Display IER:\t%08x\n",
445 seq_printf(m, "Display IIR:\t%08x\n",
447 seq_printf(m, "Display IIR_RW:\t%08x\n",
448 I915_READ(VLV_IIR_RW));
449 seq_printf(m, "Display IMR:\t%08x\n",
451 for_each_pipe(dev_priv, pipe) {
452 enum intel_display_power_domain power_domain;
454 power_domain = POWER_DOMAIN_PIPE(pipe);
455 pref = intel_display_power_get_if_enabled(dev_priv,
458 seq_printf(m, "Pipe %c power disabled\n",
463 seq_printf(m, "Pipe %c stat:\t%08x\n",
465 I915_READ(PIPESTAT(pipe)));
467 intel_display_power_put(dev_priv, power_domain, pref);
470 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
471 seq_printf(m, "Port hotplug:\t%08x\n",
472 I915_READ(PORT_HOTPLUG_EN));
473 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
474 I915_READ(VLV_DPFLIPSTAT));
475 seq_printf(m, "DPINVGTT:\t%08x\n",
476 I915_READ(DPINVGTT));
477 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
479 for (i = 0; i < 4; i++) {
480 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
481 i, I915_READ(GEN8_GT_IMR(i)));
482 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
483 i, I915_READ(GEN8_GT_IIR(i)));
484 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
485 i, I915_READ(GEN8_GT_IER(i)));
488 seq_printf(m, "PCU interrupt mask:\t%08x\n",
489 I915_READ(GEN8_PCU_IMR));
490 seq_printf(m, "PCU interrupt identity:\t%08x\n",
491 I915_READ(GEN8_PCU_IIR));
492 seq_printf(m, "PCU interrupt enable:\t%08x\n",
493 I915_READ(GEN8_PCU_IER));
494 } else if (INTEL_GEN(dev_priv) >= 11) {
495 seq_printf(m, "Master Interrupt Control: %08x\n",
496 I915_READ(GEN11_GFX_MSTR_IRQ));
498 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
499 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
500 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
501 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
502 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
503 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
504 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
505 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
506 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
507 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
508 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
509 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
511 seq_printf(m, "Display Interrupt Control:\t%08x\n",
512 I915_READ(GEN11_DISPLAY_INT_CTL));
514 gen8_display_interrupt_info(m);
515 } else if (INTEL_GEN(dev_priv) >= 8) {
516 seq_printf(m, "Master Interrupt Control:\t%08x\n",
517 I915_READ(GEN8_MASTER_IRQ));
519 for (i = 0; i < 4; i++) {
520 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
521 i, I915_READ(GEN8_GT_IMR(i)));
522 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
523 i, I915_READ(GEN8_GT_IIR(i)));
524 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
525 i, I915_READ(GEN8_GT_IER(i)));
528 gen8_display_interrupt_info(m);
529 } else if (IS_VALLEYVIEW(dev_priv)) {
530 seq_printf(m, "Display IER:\t%08x\n",
532 seq_printf(m, "Display IIR:\t%08x\n",
534 seq_printf(m, "Display IIR_RW:\t%08x\n",
535 I915_READ(VLV_IIR_RW));
536 seq_printf(m, "Display IMR:\t%08x\n",
538 for_each_pipe(dev_priv, pipe) {
539 enum intel_display_power_domain power_domain;
540 intel_wakeref_t pref;
542 power_domain = POWER_DOMAIN_PIPE(pipe);
543 pref = intel_display_power_get_if_enabled(dev_priv,
546 seq_printf(m, "Pipe %c power disabled\n",
551 seq_printf(m, "Pipe %c stat:\t%08x\n",
553 I915_READ(PIPESTAT(pipe)));
554 intel_display_power_put(dev_priv, power_domain, pref);
557 seq_printf(m, "Master IER:\t%08x\n",
558 I915_READ(VLV_MASTER_IER));
560 seq_printf(m, "Render IER:\t%08x\n",
562 seq_printf(m, "Render IIR:\t%08x\n",
564 seq_printf(m, "Render IMR:\t%08x\n",
567 seq_printf(m, "PM IER:\t\t%08x\n",
568 I915_READ(GEN6_PMIER));
569 seq_printf(m, "PM IIR:\t\t%08x\n",
570 I915_READ(GEN6_PMIIR));
571 seq_printf(m, "PM IMR:\t\t%08x\n",
572 I915_READ(GEN6_PMIMR));
574 seq_printf(m, "Port hotplug:\t%08x\n",
575 I915_READ(PORT_HOTPLUG_EN));
576 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
577 I915_READ(VLV_DPFLIPSTAT));
578 seq_printf(m, "DPINVGTT:\t%08x\n",
579 I915_READ(DPINVGTT));
581 } else if (!HAS_PCH_SPLIT(dev_priv)) {
582 seq_printf(m, "Interrupt enable: %08x\n",
583 I915_READ(GEN2_IER));
584 seq_printf(m, "Interrupt identity: %08x\n",
585 I915_READ(GEN2_IIR));
586 seq_printf(m, "Interrupt mask: %08x\n",
587 I915_READ(GEN2_IMR));
588 for_each_pipe(dev_priv, pipe)
589 seq_printf(m, "Pipe %c stat: %08x\n",
591 I915_READ(PIPESTAT(pipe)));
593 seq_printf(m, "North Display Interrupt enable: %08x\n",
595 seq_printf(m, "North Display Interrupt identity: %08x\n",
597 seq_printf(m, "North Display Interrupt mask: %08x\n",
599 seq_printf(m, "South Display Interrupt enable: %08x\n",
601 seq_printf(m, "South Display Interrupt identity: %08x\n",
603 seq_printf(m, "South Display Interrupt mask: %08x\n",
605 seq_printf(m, "Graphics Interrupt enable: %08x\n",
607 seq_printf(m, "Graphics Interrupt identity: %08x\n",
609 seq_printf(m, "Graphics Interrupt mask: %08x\n",
613 if (INTEL_GEN(dev_priv) >= 11) {
614 seq_printf(m, "RCS Intr Mask:\t %08x\n",
615 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
616 seq_printf(m, "BCS Intr Mask:\t %08x\n",
617 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
618 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
619 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
620 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
621 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
622 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
623 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
624 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
625 I915_READ(GEN11_GUC_SG_INTR_MASK));
626 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
627 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
628 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
629 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
630 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
631 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
633 } else if (INTEL_GEN(dev_priv) >= 6) {
634 for_each_uabi_engine(engine, dev_priv) {
636 "Graphics Interrupt mask (%s): %08x\n",
637 engine->name, ENGINE_READ(engine, RING_IMR));
641 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
646 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
648 struct drm_i915_private *i915 = node_to_i915(m->private);
651 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
654 for (i = 0; i < i915->ggtt.num_fences; i++) {
655 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
656 struct i915_vma *vma = reg->vma;
658 seq_printf(m, "Fence %d, pin count = %d, object = ",
659 i, atomic_read(®->pin_count));
661 seq_puts(m, "unused");
663 describe_obj(m, vma->obj);
671 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
672 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
673 size_t count, loff_t *pos)
675 struct i915_gpu_state *error;
679 error = file->private_data;
683 /* Bounce buffer required because of kernfs __user API convenience. */
684 buf = kmalloc(count, GFP_KERNEL);
688 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
692 if (!copy_to_user(ubuf, buf, ret))
702 static int gpu_state_release(struct inode *inode, struct file *file)
704 i915_gpu_state_put(file->private_data);
708 static int i915_gpu_info_open(struct inode *inode, struct file *file)
710 struct drm_i915_private *i915 = inode->i_private;
711 struct i915_gpu_state *gpu;
712 intel_wakeref_t wakeref;
715 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
716 gpu = i915_capture_gpu_state(i915);
720 file->private_data = gpu;
724 static const struct file_operations i915_gpu_info_fops = {
725 .owner = THIS_MODULE,
726 .open = i915_gpu_info_open,
727 .read = gpu_state_read,
728 .llseek = default_llseek,
729 .release = gpu_state_release,
733 i915_error_state_write(struct file *filp,
734 const char __user *ubuf,
738 struct i915_gpu_state *error = filp->private_data;
743 DRM_DEBUG_DRIVER("Resetting error state\n");
744 i915_reset_error_state(error->i915);
749 static int i915_error_state_open(struct inode *inode, struct file *file)
751 struct i915_gpu_state *error;
753 error = i915_first_error_state(inode->i_private);
755 return PTR_ERR(error);
757 file->private_data = error;
761 static const struct file_operations i915_error_state_fops = {
762 .owner = THIS_MODULE,
763 .open = i915_error_state_open,
764 .read = gpu_state_read,
765 .write = i915_error_state_write,
766 .llseek = default_llseek,
767 .release = gpu_state_release,
771 static int i915_frequency_info(struct seq_file *m, void *unused)
773 struct drm_i915_private *dev_priv = node_to_i915(m->private);
774 struct intel_uncore *uncore = &dev_priv->uncore;
775 struct intel_rps *rps = &dev_priv->gt_pm.rps;
776 intel_wakeref_t wakeref;
779 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
781 if (IS_GEN(dev_priv, 5)) {
782 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
783 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
785 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
786 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
787 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
789 seq_printf(m, "Current P-state: %d\n",
790 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
791 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
792 u32 rpmodectl, freq_sts;
794 rpmodectl = I915_READ(GEN6_RP_CONTROL);
795 seq_printf(m, "Video Turbo Mode: %s\n",
796 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
797 seq_printf(m, "HW control enabled: %s\n",
798 yesno(rpmodectl & GEN6_RP_ENABLE));
799 seq_printf(m, "SW control enabled: %s\n",
800 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
801 GEN6_RP_MEDIA_SW_MODE));
803 vlv_punit_get(dev_priv);
804 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
805 vlv_punit_put(dev_priv);
807 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
808 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
810 seq_printf(m, "actual GPU freq: %d MHz\n",
811 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
813 seq_printf(m, "current GPU freq: %d MHz\n",
814 intel_gpu_freq(dev_priv, rps->cur_freq));
816 seq_printf(m, "max GPU freq: %d MHz\n",
817 intel_gpu_freq(dev_priv, rps->max_freq));
819 seq_printf(m, "min GPU freq: %d MHz\n",
820 intel_gpu_freq(dev_priv, rps->min_freq));
822 seq_printf(m, "idle GPU freq: %d MHz\n",
823 intel_gpu_freq(dev_priv, rps->idle_freq));
826 "efficient (RPe) frequency: %d MHz\n",
827 intel_gpu_freq(dev_priv, rps->efficient_freq));
828 } else if (INTEL_GEN(dev_priv) >= 6) {
832 u32 rpmodectl, rpinclimit, rpdeclimit;
833 u32 rpstat, cagf, reqf;
834 u32 rpupei, rpcurup, rpprevup;
835 u32 rpdownei, rpcurdown, rpprevdown;
836 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
839 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
840 if (IS_GEN9_LP(dev_priv)) {
841 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
842 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
844 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
845 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
848 /* RPSTAT1 is in the GT power well */
849 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
851 reqf = I915_READ(GEN6_RPNSWREQ);
852 if (INTEL_GEN(dev_priv) >= 9)
855 reqf &= ~GEN6_TURBO_DISABLE;
856 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
861 reqf = intel_gpu_freq(dev_priv, reqf);
863 rpmodectl = I915_READ(GEN6_RP_CONTROL);
864 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
865 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
867 rpstat = I915_READ(GEN6_RPSTAT1);
868 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
869 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
870 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
871 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
872 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
873 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
874 cagf = intel_gpu_freq(dev_priv,
875 intel_get_cagf(dev_priv, rpstat));
877 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
879 if (INTEL_GEN(dev_priv) >= 11) {
880 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
881 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
883 * The equivalent to the PM ISR & IIR cannot be read
884 * without affecting the current state of the system
888 } else if (INTEL_GEN(dev_priv) >= 8) {
889 pm_ier = I915_READ(GEN8_GT_IER(2));
890 pm_imr = I915_READ(GEN8_GT_IMR(2));
891 pm_isr = I915_READ(GEN8_GT_ISR(2));
892 pm_iir = I915_READ(GEN8_GT_IIR(2));
894 pm_ier = I915_READ(GEN6_PMIER);
895 pm_imr = I915_READ(GEN6_PMIMR);
896 pm_isr = I915_READ(GEN6_PMISR);
897 pm_iir = I915_READ(GEN6_PMIIR);
899 pm_mask = I915_READ(GEN6_PMINTRMSK);
901 seq_printf(m, "Video Turbo Mode: %s\n",
902 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
903 seq_printf(m, "HW control enabled: %s\n",
904 yesno(rpmodectl & GEN6_RP_ENABLE));
905 seq_printf(m, "SW control enabled: %s\n",
906 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
907 GEN6_RP_MEDIA_SW_MODE));
909 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
910 pm_ier, pm_imr, pm_mask);
911 if (INTEL_GEN(dev_priv) <= 10)
912 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
914 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
915 rps->pm_intrmsk_mbz);
916 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
917 seq_printf(m, "Render p-state ratio: %d\n",
918 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
919 seq_printf(m, "Render p-state VID: %d\n",
920 gt_perf_status & 0xff);
921 seq_printf(m, "Render p-state limit: %d\n",
922 rp_state_limits & 0xff);
923 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
924 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
925 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
926 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
927 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
928 seq_printf(m, "CAGF: %dMHz\n", cagf);
929 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
930 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
931 seq_printf(m, "RP CUR UP: %d (%dus)\n",
932 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
933 seq_printf(m, "RP PREV UP: %d (%dus)\n",
934 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
935 seq_printf(m, "Up threshold: %d%%\n",
936 rps->power.up_threshold);
938 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
939 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
940 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
941 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
942 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
943 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
944 seq_printf(m, "Down threshold: %d%%\n",
945 rps->power.down_threshold);
947 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
948 rp_state_cap >> 16) & 0xff;
949 max_freq *= (IS_GEN9_BC(dev_priv) ||
950 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
951 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
952 intel_gpu_freq(dev_priv, max_freq));
954 max_freq = (rp_state_cap & 0xff00) >> 8;
955 max_freq *= (IS_GEN9_BC(dev_priv) ||
956 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
957 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
958 intel_gpu_freq(dev_priv, max_freq));
960 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
961 rp_state_cap >> 0) & 0xff;
962 max_freq *= (IS_GEN9_BC(dev_priv) ||
963 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
964 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
965 intel_gpu_freq(dev_priv, max_freq));
966 seq_printf(m, "Max overclocked frequency: %dMHz\n",
967 intel_gpu_freq(dev_priv, rps->max_freq));
969 seq_printf(m, "Current freq: %d MHz\n",
970 intel_gpu_freq(dev_priv, rps->cur_freq));
971 seq_printf(m, "Actual freq: %d MHz\n", cagf);
972 seq_printf(m, "Idle freq: %d MHz\n",
973 intel_gpu_freq(dev_priv, rps->idle_freq));
974 seq_printf(m, "Min freq: %d MHz\n",
975 intel_gpu_freq(dev_priv, rps->min_freq));
976 seq_printf(m, "Boost freq: %d MHz\n",
977 intel_gpu_freq(dev_priv, rps->boost_freq));
978 seq_printf(m, "Max freq: %d MHz\n",
979 intel_gpu_freq(dev_priv, rps->max_freq));
981 "efficient (RPe) frequency: %d MHz\n",
982 intel_gpu_freq(dev_priv, rps->efficient_freq));
984 seq_puts(m, "no P-state info available\n");
987 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
988 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
989 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
991 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
995 static void i915_instdone_info(struct drm_i915_private *dev_priv,
997 struct intel_instdone *instdone)
999 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
1003 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1004 instdone->instdone);
1006 if (INTEL_GEN(dev_priv) <= 3)
1009 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1010 instdone->slice_common);
1012 if (INTEL_GEN(dev_priv) <= 6)
1015 for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
1016 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1017 slice, subslice, instdone->sampler[slice][subslice]);
1019 for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
1020 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1021 slice, subslice, instdone->row[slice][subslice]);
1024 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1026 struct drm_i915_private *i915 = node_to_i915(m->private);
1027 struct intel_gt *gt = &i915->gt;
1028 struct intel_engine_cs *engine;
1029 intel_wakeref_t wakeref;
1030 enum intel_engine_id id;
1032 seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1033 if (test_bit(I915_WEDGED, >->reset.flags))
1034 seq_puts(m, "\tWedged\n");
1035 if (test_bit(I915_RESET_BACKOFF, >->reset.flags))
1036 seq_puts(m, "\tDevice (global) reset in progress\n");
1038 if (!i915_modparams.enable_hangcheck) {
1039 seq_puts(m, "Hangcheck disabled\n");
1043 if (timer_pending(>->hangcheck.work.timer))
1044 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1045 jiffies_to_msecs(gt->hangcheck.work.timer.expires -
1047 else if (delayed_work_pending(>->hangcheck.work))
1048 seq_puts(m, "Hangcheck active, work pending\n");
1050 seq_puts(m, "Hangcheck inactive\n");
1052 seq_printf(m, "GT active? %s\n", yesno(gt->awake));
1054 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1055 for_each_engine(engine, i915, id) {
1056 struct intel_instdone instdone;
1058 seq_printf(m, "%s: %d ms ago\n",
1060 jiffies_to_msecs(jiffies -
1061 engine->hangcheck.action_timestamp));
1063 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1064 (long long)engine->hangcheck.acthd,
1065 intel_engine_get_active_head(engine));
1067 intel_engine_get_instdone(engine, &instdone);
1069 seq_puts(m, "\tinstdone read =\n");
1070 i915_instdone_info(i915, m, &instdone);
1072 seq_puts(m, "\tinstdone accu =\n");
1073 i915_instdone_info(i915, m,
1074 &engine->hangcheck.instdone);
1081 static int ironlake_drpc_info(struct seq_file *m)
1083 struct drm_i915_private *i915 = node_to_i915(m->private);
1084 struct intel_uncore *uncore = &i915->uncore;
1085 u32 rgvmodectl, rstdbyctl;
1088 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1089 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1090 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1092 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1093 seq_printf(m, "Boost freq: %d\n",
1094 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1095 MEMMODE_BOOST_FREQ_SHIFT);
1096 seq_printf(m, "HW control enabled: %s\n",
1097 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1098 seq_printf(m, "SW control enabled: %s\n",
1099 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1100 seq_printf(m, "Gated voltage change: %s\n",
1101 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1102 seq_printf(m, "Starting frequency: P%d\n",
1103 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1104 seq_printf(m, "Max P-state: P%d\n",
1105 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1106 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1107 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1108 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1109 seq_printf(m, "Render standby enabled: %s\n",
1110 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1111 seq_puts(m, "Current RS state: ");
1112 switch (rstdbyctl & RSX_STATUS_MASK) {
1114 seq_puts(m, "on\n");
1116 case RSX_STATUS_RC1:
1117 seq_puts(m, "RC1\n");
1119 case RSX_STATUS_RC1E:
1120 seq_puts(m, "RC1E\n");
1122 case RSX_STATUS_RS1:
1123 seq_puts(m, "RS1\n");
1125 case RSX_STATUS_RS2:
1126 seq_puts(m, "RS2 (RC6)\n");
1128 case RSX_STATUS_RS3:
1129 seq_puts(m, "RC3 (RC6+)\n");
1132 seq_puts(m, "unknown\n");
1139 static int i915_forcewake_domains(struct seq_file *m, void *data)
1141 struct drm_i915_private *i915 = node_to_i915(m->private);
1142 struct intel_uncore *uncore = &i915->uncore;
1143 struct intel_uncore_forcewake_domain *fw_domain;
1146 seq_printf(m, "user.bypass_count = %u\n",
1147 uncore->user_forcewake_count);
1149 for_each_fw_domain(fw_domain, uncore, tmp)
1150 seq_printf(m, "%s.wake_count = %u\n",
1151 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1152 READ_ONCE(fw_domain->wake_count));
1157 static void print_rc6_res(struct seq_file *m,
1159 const i915_reg_t reg)
1161 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1163 seq_printf(m, "%s %u (%llu us)\n",
1164 title, I915_READ(reg),
1165 intel_rc6_residency_us(dev_priv, reg));
1168 static int vlv_drpc_info(struct seq_file *m)
1170 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1171 u32 rcctl1, pw_status;
1173 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1174 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1176 seq_printf(m, "RC6 Enabled: %s\n",
1177 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1178 GEN6_RC_CTL_EI_MODE(1))));
1179 seq_printf(m, "Render Power Well: %s\n",
1180 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1181 seq_printf(m, "Media Power Well: %s\n",
1182 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1184 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1185 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1187 return i915_forcewake_domains(m, NULL);
1190 static int gen6_drpc_info(struct seq_file *m)
1192 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1193 u32 gt_core_status, rcctl1, rc6vids = 0;
1194 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1196 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1197 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1199 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1200 if (INTEL_GEN(dev_priv) >= 9) {
1201 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1202 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1205 if (INTEL_GEN(dev_priv) <= 7)
1206 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1209 seq_printf(m, "RC1e Enabled: %s\n",
1210 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1211 seq_printf(m, "RC6 Enabled: %s\n",
1212 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1213 if (INTEL_GEN(dev_priv) >= 9) {
1214 seq_printf(m, "Render Well Gating Enabled: %s\n",
1215 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1216 seq_printf(m, "Media Well Gating Enabled: %s\n",
1217 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1219 seq_printf(m, "Deep RC6 Enabled: %s\n",
1220 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1221 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1222 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1223 seq_puts(m, "Current RC state: ");
1224 switch (gt_core_status & GEN6_RCn_MASK) {
1226 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1227 seq_puts(m, "Core Power Down\n");
1229 seq_puts(m, "on\n");
1232 seq_puts(m, "RC3\n");
1235 seq_puts(m, "RC6\n");
1238 seq_puts(m, "RC7\n");
1241 seq_puts(m, "Unknown\n");
1245 seq_printf(m, "Core Power Down: %s\n",
1246 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1247 if (INTEL_GEN(dev_priv) >= 9) {
1248 seq_printf(m, "Render Power Well: %s\n",
1249 (gen9_powergate_status &
1250 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1251 seq_printf(m, "Media Power Well: %s\n",
1252 (gen9_powergate_status &
1253 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1256 /* Not exactly sure what this is */
1257 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1258 GEN6_GT_GFX_RC6_LOCKED);
1259 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1260 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1261 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1263 if (INTEL_GEN(dev_priv) <= 7) {
1264 seq_printf(m, "RC6 voltage: %dmV\n",
1265 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1266 seq_printf(m, "RC6+ voltage: %dmV\n",
1267 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1268 seq_printf(m, "RC6++ voltage: %dmV\n",
1269 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1272 return i915_forcewake_domains(m, NULL);
1275 static int i915_drpc_info(struct seq_file *m, void *unused)
1277 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1278 intel_wakeref_t wakeref;
1281 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1282 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1283 err = vlv_drpc_info(m);
1284 else if (INTEL_GEN(dev_priv) >= 6)
1285 err = gen6_drpc_info(m);
1287 err = ironlake_drpc_info(m);
1293 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1295 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1297 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1298 dev_priv->fb_tracking.busy_bits);
1300 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1301 dev_priv->fb_tracking.flip_bits);
1306 static int i915_fbc_status(struct seq_file *m, void *unused)
1308 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1309 struct intel_fbc *fbc = &dev_priv->fbc;
1310 intel_wakeref_t wakeref;
1312 if (!HAS_FBC(dev_priv))
1315 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1316 mutex_lock(&fbc->lock);
1318 if (intel_fbc_is_active(dev_priv))
1319 seq_puts(m, "FBC enabled\n");
1321 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1323 if (intel_fbc_is_active(dev_priv)) {
1326 if (INTEL_GEN(dev_priv) >= 8)
1327 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1328 else if (INTEL_GEN(dev_priv) >= 7)
1329 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1330 else if (INTEL_GEN(dev_priv) >= 5)
1331 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1332 else if (IS_G4X(dev_priv))
1333 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1335 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1336 FBC_STAT_COMPRESSED);
1338 seq_printf(m, "Compressing: %s\n", yesno(mask));
1341 mutex_unlock(&fbc->lock);
1342 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1347 static int i915_fbc_false_color_get(void *data, u64 *val)
1349 struct drm_i915_private *dev_priv = data;
1351 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1354 *val = dev_priv->fbc.false_color;
1359 static int i915_fbc_false_color_set(void *data, u64 val)
1361 struct drm_i915_private *dev_priv = data;
1364 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1367 mutex_lock(&dev_priv->fbc.lock);
1369 reg = I915_READ(ILK_DPFC_CONTROL);
1370 dev_priv->fbc.false_color = val;
1372 I915_WRITE(ILK_DPFC_CONTROL, val ?
1373 (reg | FBC_CTL_FALSE_COLOR) :
1374 (reg & ~FBC_CTL_FALSE_COLOR));
1376 mutex_unlock(&dev_priv->fbc.lock);
1380 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1381 i915_fbc_false_color_get, i915_fbc_false_color_set,
1384 static int i915_ips_status(struct seq_file *m, void *unused)
1386 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1387 intel_wakeref_t wakeref;
1389 if (!HAS_IPS(dev_priv))
1392 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1394 seq_printf(m, "Enabled by kernel parameter: %s\n",
1395 yesno(i915_modparams.enable_ips));
1397 if (INTEL_GEN(dev_priv) >= 8) {
1398 seq_puts(m, "Currently: unknown\n");
1400 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1401 seq_puts(m, "Currently: enabled\n");
1403 seq_puts(m, "Currently: disabled\n");
1406 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1411 static int i915_sr_status(struct seq_file *m, void *unused)
1413 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1414 intel_wakeref_t wakeref;
1415 bool sr_enabled = false;
1417 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1419 if (INTEL_GEN(dev_priv) >= 9)
1420 /* no global SR status; inspect per-plane WM */;
1421 else if (HAS_PCH_SPLIT(dev_priv))
1422 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1423 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1424 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1425 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1426 else if (IS_I915GM(dev_priv))
1427 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1428 else if (IS_PINEVIEW(dev_priv))
1429 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1430 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1431 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1433 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1435 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1440 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1442 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1443 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1444 unsigned int max_gpu_freq, min_gpu_freq;
1445 intel_wakeref_t wakeref;
1446 int gpu_freq, ia_freq;
1448 if (!HAS_LLC(dev_priv))
1451 min_gpu_freq = rps->min_freq;
1452 max_gpu_freq = rps->max_freq;
1453 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1454 /* Convert GT frequency to 50 HZ units */
1455 min_gpu_freq /= GEN9_FREQ_SCALER;
1456 max_gpu_freq /= GEN9_FREQ_SCALER;
1459 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1461 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1462 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1464 sandybridge_pcode_read(dev_priv,
1465 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1467 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1468 intel_gpu_freq(dev_priv, (gpu_freq *
1469 (IS_GEN9_BC(dev_priv) ||
1470 INTEL_GEN(dev_priv) >= 10 ?
1471 GEN9_FREQ_SCALER : 1))),
1472 ((ia_freq >> 0) & 0xff) * 100,
1473 ((ia_freq >> 8) & 0xff) * 100);
1475 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1480 static int i915_opregion(struct seq_file *m, void *unused)
1482 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1483 struct drm_device *dev = &dev_priv->drm;
1484 struct intel_opregion *opregion = &dev_priv->opregion;
1487 ret = mutex_lock_interruptible(&dev->struct_mutex);
1491 if (opregion->header)
1492 seq_write(m, opregion->header, OPREGION_SIZE);
1494 mutex_unlock(&dev->struct_mutex);
1500 static int i915_vbt(struct seq_file *m, void *unused)
1502 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1505 seq_write(m, opregion->vbt, opregion->vbt_size);
1510 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1512 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1513 struct drm_device *dev = &dev_priv->drm;
1514 struct intel_framebuffer *fbdev_fb = NULL;
1515 struct drm_framebuffer *drm_fb;
1518 ret = mutex_lock_interruptible(&dev->struct_mutex);
1522 #ifdef CONFIG_DRM_FBDEV_EMULATION
1523 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1524 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1526 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1527 fbdev_fb->base.width,
1528 fbdev_fb->base.height,
1529 fbdev_fb->base.format->depth,
1530 fbdev_fb->base.format->cpp[0] * 8,
1531 fbdev_fb->base.modifier,
1532 drm_framebuffer_read_refcount(&fbdev_fb->base));
1533 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1538 mutex_lock(&dev->mode_config.fb_lock);
1539 drm_for_each_fb(drm_fb, dev) {
1540 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1544 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1547 fb->base.format->depth,
1548 fb->base.format->cpp[0] * 8,
1550 drm_framebuffer_read_refcount(&fb->base));
1551 describe_obj(m, intel_fb_obj(&fb->base));
1554 mutex_unlock(&dev->mode_config.fb_lock);
1555 mutex_unlock(&dev->struct_mutex);
1560 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1562 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1563 ring->space, ring->head, ring->tail, ring->emit);
1566 static int i915_context_status(struct seq_file *m, void *unused)
1568 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1569 struct drm_device *dev = &dev_priv->drm;
1570 struct i915_gem_context *ctx;
1573 ret = mutex_lock_interruptible(&dev->struct_mutex);
1577 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1578 struct i915_gem_engines_iter it;
1579 struct intel_context *ce;
1581 seq_puts(m, "HW context ");
1582 if (!list_empty(&ctx->hw_id_link))
1583 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1584 atomic_read(&ctx->hw_id_pin_count));
1586 struct task_struct *task;
1588 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1590 seq_printf(m, "(%s [%d]) ",
1591 task->comm, task->pid);
1592 put_task_struct(task);
1594 } else if (IS_ERR(ctx->file_priv)) {
1595 seq_puts(m, "(deleted) ");
1597 seq_puts(m, "(kernel) ");
1600 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1603 for_each_gem_engine(ce,
1604 i915_gem_context_lock_engines(ctx), it) {
1605 intel_context_lock_pinned(ce);
1606 if (intel_context_is_pinned(ce)) {
1607 seq_printf(m, "%s: ", ce->engine->name);
1609 describe_obj(m, ce->state->obj);
1610 describe_ctx_ring(m, ce->ring);
1613 intel_context_unlock_pinned(ce);
1615 i915_gem_context_unlock_engines(ctx);
1620 mutex_unlock(&dev->struct_mutex);
1625 static const char *swizzle_string(unsigned swizzle)
1628 case I915_BIT_6_SWIZZLE_NONE:
1630 case I915_BIT_6_SWIZZLE_9:
1632 case I915_BIT_6_SWIZZLE_9_10:
1633 return "bit9/bit10";
1634 case I915_BIT_6_SWIZZLE_9_11:
1635 return "bit9/bit11";
1636 case I915_BIT_6_SWIZZLE_9_10_11:
1637 return "bit9/bit10/bit11";
1638 case I915_BIT_6_SWIZZLE_9_17:
1639 return "bit9/bit17";
1640 case I915_BIT_6_SWIZZLE_9_10_17:
1641 return "bit9/bit10/bit17";
1642 case I915_BIT_6_SWIZZLE_UNKNOWN:
1649 static int i915_swizzle_info(struct seq_file *m, void *data)
1651 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1652 struct intel_uncore *uncore = &dev_priv->uncore;
1653 intel_wakeref_t wakeref;
1655 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1657 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1658 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1659 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1660 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1662 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1663 seq_printf(m, "DDC = 0x%08x\n",
1664 intel_uncore_read(uncore, DCC));
1665 seq_printf(m, "DDC2 = 0x%08x\n",
1666 intel_uncore_read(uncore, DCC2));
1667 seq_printf(m, "C0DRB3 = 0x%04x\n",
1668 intel_uncore_read16(uncore, C0DRB3));
1669 seq_printf(m, "C1DRB3 = 0x%04x\n",
1670 intel_uncore_read16(uncore, C1DRB3));
1671 } else if (INTEL_GEN(dev_priv) >= 6) {
1672 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1673 intel_uncore_read(uncore, MAD_DIMM_C0));
1674 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1675 intel_uncore_read(uncore, MAD_DIMM_C1));
1676 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1677 intel_uncore_read(uncore, MAD_DIMM_C2));
1678 seq_printf(m, "TILECTL = 0x%08x\n",
1679 intel_uncore_read(uncore, TILECTL));
1680 if (INTEL_GEN(dev_priv) >= 8)
1681 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1682 intel_uncore_read(uncore, GAMTARBMODE));
1684 seq_printf(m, "ARB_MODE = 0x%08x\n",
1685 intel_uncore_read(uncore, ARB_MODE));
1686 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1687 intel_uncore_read(uncore, DISP_ARB_CTL));
1690 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1691 seq_puts(m, "L-shaped memory detected\n");
1693 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1698 static const char *rps_power_to_str(unsigned int power)
1700 static const char * const strings[] = {
1701 [LOW_POWER] = "low power",
1702 [BETWEEN] = "mixed",
1703 [HIGH_POWER] = "high power",
1706 if (power >= ARRAY_SIZE(strings) || !strings[power])
1709 return strings[power];
1712 static int i915_rps_boost_info(struct seq_file *m, void *data)
1714 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1715 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1716 u32 act_freq = rps->cur_freq;
1717 intel_wakeref_t wakeref;
1719 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1720 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1721 vlv_punit_get(dev_priv);
1722 act_freq = vlv_punit_read(dev_priv,
1723 PUNIT_REG_GPU_FREQ_STS);
1724 vlv_punit_put(dev_priv);
1725 act_freq = (act_freq >> 8) & 0xff;
1727 act_freq = intel_get_cagf(dev_priv,
1728 I915_READ(GEN6_RPSTAT1));
1732 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1733 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1734 seq_printf(m, "Boosts outstanding? %d\n",
1735 atomic_read(&rps->num_waiters));
1736 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1737 seq_printf(m, "Frequency requested %d, actual %d\n",
1738 intel_gpu_freq(dev_priv, rps->cur_freq),
1739 intel_gpu_freq(dev_priv, act_freq));
1740 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1741 intel_gpu_freq(dev_priv, rps->min_freq),
1742 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1743 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1744 intel_gpu_freq(dev_priv, rps->max_freq));
1745 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1746 intel_gpu_freq(dev_priv, rps->idle_freq),
1747 intel_gpu_freq(dev_priv, rps->efficient_freq),
1748 intel_gpu_freq(dev_priv, rps->boost_freq));
1750 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1752 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1754 u32 rpdown, rpdownei;
1756 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1757 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1758 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1759 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1760 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1761 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1763 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1764 rps_power_to_str(rps->power.mode));
1765 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1766 rpup && rpupei ? 100 * rpup / rpupei : 0,
1767 rps->power.up_threshold);
1768 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1769 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1770 rps->power.down_threshold);
1772 seq_puts(m, "\nRPS Autotuning inactive\n");
1778 static int i915_llc(struct seq_file *m, void *data)
1780 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1781 const bool edram = INTEL_GEN(dev_priv) > 8;
1783 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1784 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1785 dev_priv->edram_size_mb);
1790 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1792 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1793 intel_wakeref_t wakeref;
1794 struct drm_printer p;
1796 if (!HAS_GT_UC(dev_priv))
1799 p = drm_seq_file_printer(m);
1800 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1802 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1803 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1808 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1811 intel_wakeref_t wakeref;
1812 struct drm_printer p;
1814 if (!HAS_GT_UC(dev_priv))
1817 p = drm_seq_file_printer(m);
1818 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1820 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1821 u32 tmp = I915_READ(GUC_STATUS);
1824 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1825 seq_printf(m, "\tBootrom status = 0x%x\n",
1826 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1827 seq_printf(m, "\tuKernel status = 0x%x\n",
1828 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1829 seq_printf(m, "\tMIA Core status = 0x%x\n",
1830 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1831 seq_puts(m, "\nScratch registers:\n");
1832 for (i = 0; i < 16; i++) {
1833 seq_printf(m, "\t%2d: \t0x%x\n",
1834 i, I915_READ(SOFT_SCRATCH(i)));
1842 stringify_guc_log_type(enum guc_log_buffer_type type)
1845 case GUC_ISR_LOG_BUFFER:
1847 case GUC_DPC_LOG_BUFFER:
1849 case GUC_CRASH_DUMP_LOG_BUFFER:
1858 static void i915_guc_log_info(struct seq_file *m,
1859 struct drm_i915_private *dev_priv)
1861 struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1862 enum guc_log_buffer_type type;
1864 if (!intel_guc_log_relay_enabled(log)) {
1865 seq_puts(m, "GuC log relay disabled\n");
1869 seq_puts(m, "GuC logging stats:\n");
1871 seq_printf(m, "\tRelay full count: %u\n",
1872 log->relay.full_count);
1874 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1875 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1876 stringify_guc_log_type(type),
1877 log->stats[type].flush,
1878 log->stats[type].sampled_overflow);
1882 static int i915_guc_info(struct seq_file *m, void *data)
1884 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1885 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1886 struct intel_guc_client *client = guc->execbuf_client;
1888 if (!USES_GUC(dev_priv))
1891 i915_guc_log_info(m, dev_priv);
1893 if (!USES_GUC_SUBMISSION(dev_priv))
1896 GEM_BUG_ON(!guc->execbuf_client);
1898 seq_printf(m, "\nDoorbell map:\n");
1899 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1900 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1902 seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1903 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1906 client->proc_desc_offset);
1907 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1908 client->doorbell_id, client->doorbell_offset);
1909 /* Add more as required ... */
1914 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1916 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1917 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1918 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1921 if (!USES_GUC_SUBMISSION(dev_priv))
1924 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1925 struct intel_engine_cs *engine;
1927 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1930 seq_printf(m, "GuC stage descriptor %u:\n", index);
1931 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1932 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1933 seq_printf(m, "\tPriority: %d\n", desc->priority);
1934 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1935 seq_printf(m, "\tEngines used: 0x%x\n",
1936 desc->engines_used);
1937 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1938 desc->db_trigger_phy,
1939 desc->db_trigger_cpu,
1940 desc->db_trigger_uk);
1941 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1942 desc->process_desc);
1943 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1944 desc->wq_addr, desc->wq_size);
1947 for_each_uabi_engine(engine, dev_priv) {
1948 u32 guc_engine_id = engine->guc_id;
1949 struct guc_execlist_context *lrc =
1950 &desc->lrc[guc_engine_id];
1952 seq_printf(m, "\t%s LRC:\n", engine->name);
1953 seq_printf(m, "\t\tContext desc: 0x%x\n",
1955 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1956 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1957 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1958 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1966 static int i915_guc_log_dump(struct seq_file *m, void *data)
1968 struct drm_info_node *node = m->private;
1969 struct drm_i915_private *dev_priv = node_to_i915(node);
1970 bool dump_load_err = !!node->info_ent->data;
1971 struct drm_i915_gem_object *obj = NULL;
1975 if (!HAS_GT_UC(dev_priv))
1979 obj = dev_priv->gt.uc.load_err_log;
1980 else if (dev_priv->gt.uc.guc.log.vma)
1981 obj = dev_priv->gt.uc.guc.log.vma->obj;
1986 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1988 DRM_DEBUG("Failed to pin object\n");
1989 seq_puts(m, "(log data unaccessible)\n");
1990 return PTR_ERR(log);
1993 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1994 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1995 *(log + i), *(log + i + 1),
1996 *(log + i + 2), *(log + i + 3));
2000 i915_gem_object_unpin_map(obj);
2005 static int i915_guc_log_level_get(void *data, u64 *val)
2007 struct drm_i915_private *dev_priv = data;
2009 if (!USES_GUC(dev_priv))
2012 *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
2017 static int i915_guc_log_level_set(void *data, u64 val)
2019 struct drm_i915_private *dev_priv = data;
2021 if (!USES_GUC(dev_priv))
2024 return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
2027 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2028 i915_guc_log_level_get, i915_guc_log_level_set,
2031 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2033 struct drm_i915_private *i915 = inode->i_private;
2034 struct intel_guc *guc = &i915->gt.uc.guc;
2035 struct intel_guc_log *log = &guc->log;
2037 if (!intel_guc_is_running(guc))
2040 file->private_data = log;
2042 return intel_guc_log_relay_open(log);
2046 i915_guc_log_relay_write(struct file *filp,
2047 const char __user *ubuf,
2051 struct intel_guc_log *log = filp->private_data;
2053 intel_guc_log_relay_flush(log);
2057 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2059 struct drm_i915_private *i915 = inode->i_private;
2060 struct intel_guc *guc = &i915->gt.uc.guc;
2062 intel_guc_log_relay_close(&guc->log);
2066 static const struct file_operations i915_guc_log_relay_fops = {
2067 .owner = THIS_MODULE,
2068 .open = i915_guc_log_relay_open,
2069 .write = i915_guc_log_relay_write,
2070 .release = i915_guc_log_relay_release,
2073 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2076 static const char * const sink_status[] = {
2078 "transition to active, capture and display",
2079 "active, display from RFB",
2080 "active, capture and display on sink device timings",
2081 "transition to inactive, capture and display, timing re-sync",
2084 "sink internal error",
2086 struct drm_connector *connector = m->private;
2087 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2088 struct intel_dp *intel_dp =
2089 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2092 if (!CAN_PSR(dev_priv)) {
2093 seq_puts(m, "PSR Unsupported\n");
2097 if (connector->status != connector_status_connected)
2100 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2103 const char *str = "unknown";
2105 val &= DP_PSR_SINK_STATE_MASK;
2106 if (val < ARRAY_SIZE(sink_status))
2107 str = sink_status[val];
2108 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2115 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2118 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2120 u32 val, status_val;
2121 const char *status = "unknown";
2123 if (dev_priv->psr.psr2_enabled) {
2124 static const char * const live_status[] = {
2137 val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2138 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2139 EDP_PSR2_STATUS_STATE_SHIFT;
2140 if (status_val < ARRAY_SIZE(live_status))
2141 status = live_status[status_val];
2143 static const char * const live_status[] = {
2153 val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2154 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2155 EDP_PSR_STATUS_STATE_SHIFT;
2156 if (status_val < ARRAY_SIZE(live_status))
2157 status = live_status[status_val];
2160 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2163 static int i915_edp_psr_status(struct seq_file *m, void *data)
2165 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2166 struct i915_psr *psr = &dev_priv->psr;
2167 intel_wakeref_t wakeref;
2172 if (!HAS_PSR(dev_priv))
2175 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2177 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2180 if (!psr->sink_support)
2183 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2184 mutex_lock(&psr->lock);
2187 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2189 status = "disabled";
2190 seq_printf(m, "PSR mode: %s\n", status);
2195 if (psr->psr2_enabled) {
2196 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2197 enabled = val & EDP_PSR2_ENABLE;
2199 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2200 enabled = val & EDP_PSR_ENABLE;
2202 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2203 enableddisabled(enabled), val);
2204 psr_source_status(dev_priv, m);
2205 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2206 psr->busy_frontbuffer_bits);
2209 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2211 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2212 val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2213 val &= EDP_PSR_PERF_CNT_MASK;
2214 seq_printf(m, "Performance counter: %u\n", val);
2217 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2218 seq_printf(m, "Last attempted entry at: %lld\n",
2219 psr->last_entry_attempt);
2220 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2223 if (psr->psr2_enabled) {
2224 u32 su_frames_val[3];
2228 * Reading all 3 registers before hand to minimize crossing a
2229 * frame boundary between register reads
2231 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2232 val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2234 su_frames_val[frame / 3] = val;
2237 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2239 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2242 su_blocks = su_frames_val[frame / 3] &
2243 PSR2_SU_STATUS_MASK(frame);
2244 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2245 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2250 mutex_unlock(&psr->lock);
2251 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2257 i915_edp_psr_debug_set(void *data, u64 val)
2259 struct drm_i915_private *dev_priv = data;
2260 intel_wakeref_t wakeref;
2263 if (!CAN_PSR(dev_priv))
2266 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2268 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2270 ret = intel_psr_debug_set(dev_priv, val);
2272 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2278 i915_edp_psr_debug_get(void *data, u64 *val)
2280 struct drm_i915_private *dev_priv = data;
2282 if (!CAN_PSR(dev_priv))
2285 *val = READ_ONCE(dev_priv->psr.debug);
2289 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2290 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2293 static int i915_energy_uJ(struct seq_file *m, void *data)
2295 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2296 unsigned long long power;
2297 intel_wakeref_t wakeref;
2300 if (INTEL_GEN(dev_priv) < 6)
2303 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2306 units = (power & 0x1f00) >> 8;
2307 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2308 power = I915_READ(MCH_SECP_NRG_STTS);
2310 power = (1000000 * power) >> units; /* convert to uJ */
2311 seq_printf(m, "%llu", power);
2316 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2318 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2319 struct pci_dev *pdev = dev_priv->drm.pdev;
2321 if (!HAS_RUNTIME_PM(dev_priv))
2322 seq_puts(m, "Runtime power management not supported\n");
2324 seq_printf(m, "Runtime power status: %s\n",
2325 enableddisabled(!dev_priv->power_domains.wakeref));
2327 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2328 seq_printf(m, "IRQs disabled: %s\n",
2329 yesno(!intel_irqs_enabled(dev_priv)));
2331 seq_printf(m, "Usage count: %d\n",
2332 atomic_read(&dev_priv->drm.dev->power.usage_count));
2334 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2336 seq_printf(m, "PCI device power state: %s [%d]\n",
2337 pci_power_name(pdev->current_state),
2338 pdev->current_state);
2340 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2341 struct drm_printer p = drm_seq_file_printer(m);
2343 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2349 static int i915_power_domain_info(struct seq_file *m, void *unused)
2351 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2352 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2355 mutex_lock(&power_domains->lock);
2357 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2358 for (i = 0; i < power_domains->power_well_count; i++) {
2359 struct i915_power_well *power_well;
2360 enum intel_display_power_domain power_domain;
2362 power_well = &power_domains->power_wells[i];
2363 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2366 for_each_power_domain(power_domain, power_well->desc->domains)
2367 seq_printf(m, " %-23s %d\n",
2368 intel_display_power_domain_str(power_domain),
2369 power_domains->domain_use_count[power_domain]);
2372 mutex_unlock(&power_domains->lock);
2377 static int i915_dmc_info(struct seq_file *m, void *unused)
2379 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2380 intel_wakeref_t wakeref;
2381 struct intel_csr *csr;
2382 i915_reg_t dc5_reg, dc6_reg = {};
2384 if (!HAS_CSR(dev_priv))
2387 csr = &dev_priv->csr;
2389 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2391 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2392 seq_printf(m, "path: %s\n", csr->fw_path);
2394 if (!csr->dmc_payload)
2397 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2398 CSR_VERSION_MINOR(csr->version));
2400 if (INTEL_GEN(dev_priv) >= 12) {
2401 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2402 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2404 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2405 SKL_CSR_DC3_DC5_COUNT;
2406 if (!IS_GEN9_LP(dev_priv))
2407 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2410 seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2412 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2415 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2416 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2417 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2419 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2424 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2425 struct drm_display_mode *mode)
2429 for (i = 0; i < tabs; i++)
2432 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2435 static void intel_encoder_info(struct seq_file *m,
2436 struct intel_crtc *intel_crtc,
2437 struct intel_encoder *intel_encoder)
2439 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2440 struct drm_device *dev = &dev_priv->drm;
2441 struct drm_crtc *crtc = &intel_crtc->base;
2442 struct intel_connector *intel_connector;
2443 struct drm_encoder *encoder;
2445 encoder = &intel_encoder->base;
2446 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2447 encoder->base.id, encoder->name);
2448 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2449 struct drm_connector *connector = &intel_connector->base;
2450 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2453 drm_get_connector_status_name(connector->status));
2454 if (connector->status == connector_status_connected) {
2455 struct drm_display_mode *mode = &crtc->mode;
2456 seq_printf(m, ", mode:\n");
2457 intel_seq_print_mode(m, 2, mode);
2464 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2466 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2467 struct drm_device *dev = &dev_priv->drm;
2468 struct drm_crtc *crtc = &intel_crtc->base;
2469 struct intel_encoder *intel_encoder;
2470 struct drm_plane_state *plane_state = crtc->primary->state;
2471 struct drm_framebuffer *fb = plane_state->fb;
2474 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2475 fb->base.id, plane_state->src_x >> 16,
2476 plane_state->src_y >> 16, fb->width, fb->height);
2478 seq_puts(m, "\tprimary plane disabled\n");
2479 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2480 intel_encoder_info(m, intel_crtc, intel_encoder);
2483 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2485 struct drm_display_mode *mode = panel->fixed_mode;
2487 seq_printf(m, "\tfixed mode:\n");
2488 intel_seq_print_mode(m, 2, mode);
2491 static void intel_hdcp_info(struct seq_file *m,
2492 struct intel_connector *intel_connector)
2494 bool hdcp_cap, hdcp2_cap;
2496 hdcp_cap = intel_hdcp_capable(intel_connector);
2497 hdcp2_cap = intel_hdcp2_capable(intel_connector);
2500 seq_puts(m, "HDCP1.4 ");
2502 seq_puts(m, "HDCP2.2 ");
2504 if (!hdcp_cap && !hdcp2_cap)
2505 seq_puts(m, "None");
2510 static void intel_dp_info(struct seq_file *m,
2511 struct intel_connector *intel_connector)
2513 struct intel_encoder *intel_encoder = intel_connector->encoder;
2514 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2516 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2517 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2518 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2519 intel_panel_info(m, &intel_connector->panel);
2521 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2523 if (intel_connector->hdcp.shim) {
2524 seq_puts(m, "\tHDCP version: ");
2525 intel_hdcp_info(m, intel_connector);
2529 static void intel_dp_mst_info(struct seq_file *m,
2530 struct intel_connector *intel_connector)
2532 struct intel_encoder *intel_encoder = intel_connector->encoder;
2533 struct intel_dp_mst_encoder *intel_mst =
2534 enc_to_mst(&intel_encoder->base);
2535 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2536 struct intel_dp *intel_dp = &intel_dig_port->dp;
2537 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2538 intel_connector->port);
2540 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2543 static void intel_hdmi_info(struct seq_file *m,
2544 struct intel_connector *intel_connector)
2546 struct intel_encoder *intel_encoder = intel_connector->encoder;
2547 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2549 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2550 if (intel_connector->hdcp.shim) {
2551 seq_puts(m, "\tHDCP version: ");
2552 intel_hdcp_info(m, intel_connector);
2556 static void intel_lvds_info(struct seq_file *m,
2557 struct intel_connector *intel_connector)
2559 intel_panel_info(m, &intel_connector->panel);
2562 static void intel_connector_info(struct seq_file *m,
2563 struct drm_connector *connector)
2565 struct intel_connector *intel_connector = to_intel_connector(connector);
2566 struct intel_encoder *intel_encoder = intel_connector->encoder;
2567 struct drm_display_mode *mode;
2569 seq_printf(m, "connector %d: type %s, status: %s\n",
2570 connector->base.id, connector->name,
2571 drm_get_connector_status_name(connector->status));
2573 if (connector->status == connector_status_disconnected)
2576 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2577 connector->display_info.width_mm,
2578 connector->display_info.height_mm);
2579 seq_printf(m, "\tsubpixel order: %s\n",
2580 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2581 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2586 switch (connector->connector_type) {
2587 case DRM_MODE_CONNECTOR_DisplayPort:
2588 case DRM_MODE_CONNECTOR_eDP:
2589 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2590 intel_dp_mst_info(m, intel_connector);
2592 intel_dp_info(m, intel_connector);
2594 case DRM_MODE_CONNECTOR_LVDS:
2595 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2596 intel_lvds_info(m, intel_connector);
2598 case DRM_MODE_CONNECTOR_HDMIA:
2599 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2600 intel_encoder->type == INTEL_OUTPUT_DDI)
2601 intel_hdmi_info(m, intel_connector);
2607 seq_printf(m, "\tmodes:\n");
2608 list_for_each_entry(mode, &connector->modes, head)
2609 intel_seq_print_mode(m, 2, mode);
2612 static const char *plane_type(enum drm_plane_type type)
2615 case DRM_PLANE_TYPE_OVERLAY:
2617 case DRM_PLANE_TYPE_PRIMARY:
2619 case DRM_PLANE_TYPE_CURSOR:
2622 * Deliberately omitting default: to generate compiler warnings
2623 * when a new drm_plane_type gets added.
2630 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2633 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2634 * will print them all to visualize if the values are misused
2636 snprintf(buf, bufsize,
2637 "%s%s%s%s%s%s(0x%08x)",
2638 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2639 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2640 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2641 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2642 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2643 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2647 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2649 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2650 struct drm_device *dev = &dev_priv->drm;
2651 struct intel_plane *intel_plane;
2653 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2654 struct drm_plane_state *state;
2655 struct drm_plane *plane = &intel_plane->base;
2656 struct drm_format_name_buf format_name;
2659 if (!plane->state) {
2660 seq_puts(m, "plane->state is NULL!\n");
2664 state = plane->state;
2667 drm_get_format_name(state->fb->format->format,
2670 sprintf(format_name.str, "N/A");
2673 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2675 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2677 plane_type(intel_plane->base.type),
2678 state->crtc_x, state->crtc_y,
2679 state->crtc_w, state->crtc_h,
2680 (state->src_x >> 16),
2681 ((state->src_x & 0xffff) * 15625) >> 10,
2682 (state->src_y >> 16),
2683 ((state->src_y & 0xffff) * 15625) >> 10,
2684 (state->src_w >> 16),
2685 ((state->src_w & 0xffff) * 15625) >> 10,
2686 (state->src_h >> 16),
2687 ((state->src_h & 0xffff) * 15625) >> 10,
2693 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2695 struct intel_crtc_state *pipe_config;
2696 int num_scalers = intel_crtc->num_scalers;
2699 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2701 /* Not all platformas have a scaler */
2703 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2705 pipe_config->scaler_state.scaler_users,
2706 pipe_config->scaler_state.scaler_id);
2708 for (i = 0; i < num_scalers; i++) {
2709 struct intel_scaler *sc =
2710 &pipe_config->scaler_state.scalers[i];
2712 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2713 i, yesno(sc->in_use), sc->mode);
2717 seq_puts(m, "\tNo scalers available on this platform\n");
2721 static int i915_display_info(struct seq_file *m, void *unused)
2723 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2724 struct drm_device *dev = &dev_priv->drm;
2725 struct intel_crtc *crtc;
2726 struct drm_connector *connector;
2727 struct drm_connector_list_iter conn_iter;
2728 intel_wakeref_t wakeref;
2730 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2732 seq_printf(m, "CRTC info\n");
2733 seq_printf(m, "---------\n");
2734 for_each_intel_crtc(dev, crtc) {
2735 struct intel_crtc_state *pipe_config;
2737 drm_modeset_lock(&crtc->base.mutex, NULL);
2738 pipe_config = to_intel_crtc_state(crtc->base.state);
2740 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2741 crtc->base.base.id, pipe_name(crtc->pipe),
2742 yesno(pipe_config->base.active),
2743 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2744 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2746 if (pipe_config->base.active) {
2747 struct intel_plane *cursor =
2748 to_intel_plane(crtc->base.cursor);
2750 intel_crtc_info(m, crtc);
2752 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2753 yesno(cursor->base.state->visible),
2754 cursor->base.state->crtc_x,
2755 cursor->base.state->crtc_y,
2756 cursor->base.state->crtc_w,
2757 cursor->base.state->crtc_h,
2758 cursor->cursor.base);
2759 intel_scaler_info(m, crtc);
2760 intel_plane_info(m, crtc);
2763 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2764 yesno(!crtc->cpu_fifo_underrun_disabled),
2765 yesno(!crtc->pch_fifo_underrun_disabled));
2766 drm_modeset_unlock(&crtc->base.mutex);
2769 seq_printf(m, "\n");
2770 seq_printf(m, "Connector info\n");
2771 seq_printf(m, "--------------\n");
2772 mutex_lock(&dev->mode_config.mutex);
2773 drm_connector_list_iter_begin(dev, &conn_iter);
2774 drm_for_each_connector_iter(connector, &conn_iter)
2775 intel_connector_info(m, connector);
2776 drm_connector_list_iter_end(&conn_iter);
2777 mutex_unlock(&dev->mode_config.mutex);
2779 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2784 static int i915_engine_info(struct seq_file *m, void *unused)
2786 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2787 struct intel_engine_cs *engine;
2788 intel_wakeref_t wakeref;
2789 struct drm_printer p;
2791 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2793 seq_printf(m, "GT awake? %s [%d]\n",
2794 yesno(dev_priv->gt.awake),
2795 atomic_read(&dev_priv->gt.wakeref.count));
2796 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2797 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2799 p = drm_seq_file_printer(m);
2800 for_each_uabi_engine(engine, dev_priv)
2801 intel_engine_dump(engine, &p, "%s\n", engine->name);
2803 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2808 static int i915_rcs_topology(struct seq_file *m, void *unused)
2810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2811 struct drm_printer p = drm_seq_file_printer(m);
2813 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2818 static int i915_shrinker_info(struct seq_file *m, void *unused)
2820 struct drm_i915_private *i915 = node_to_i915(m->private);
2822 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2823 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2828 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2830 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2831 struct drm_device *dev = &dev_priv->drm;
2834 drm_modeset_lock_all(dev);
2835 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2836 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2838 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2840 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2841 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2842 seq_printf(m, " tracked hardware state:\n");
2843 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2844 seq_printf(m, " dpll_md: 0x%08x\n",
2845 pll->state.hw_state.dpll_md);
2846 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2847 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2848 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2849 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2850 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2851 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2852 pll->state.hw_state.mg_refclkin_ctl);
2853 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2854 pll->state.hw_state.mg_clktop2_coreclkctl1);
2855 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2856 pll->state.hw_state.mg_clktop2_hsclkctl);
2857 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2858 pll->state.hw_state.mg_pll_div0);
2859 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2860 pll->state.hw_state.mg_pll_div1);
2861 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2862 pll->state.hw_state.mg_pll_lf);
2863 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2864 pll->state.hw_state.mg_pll_frac_lock);
2865 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2866 pll->state.hw_state.mg_pll_ssc);
2867 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2868 pll->state.hw_state.mg_pll_bias);
2869 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2870 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2872 drm_modeset_unlock_all(dev);
2877 static int i915_wa_registers(struct seq_file *m, void *unused)
2879 struct drm_i915_private *i915 = node_to_i915(m->private);
2880 struct intel_engine_cs *engine;
2882 for_each_uabi_engine(engine, i915) {
2883 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2884 const struct i915_wa *wa;
2891 seq_printf(m, "%s: Workarounds applied: %u\n",
2892 engine->name, count);
2894 for (wa = wal->list; count--; wa++)
2895 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2896 i915_mmio_reg_offset(wa->reg),
2899 seq_printf(m, "\n");
2905 static int i915_ipc_status_show(struct seq_file *m, void *data)
2907 struct drm_i915_private *dev_priv = m->private;
2909 seq_printf(m, "Isochronous Priority Control: %s\n",
2910 yesno(dev_priv->ipc_enabled));
2914 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2916 struct drm_i915_private *dev_priv = inode->i_private;
2918 if (!HAS_IPC(dev_priv))
2921 return single_open(file, i915_ipc_status_show, dev_priv);
2924 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2925 size_t len, loff_t *offp)
2927 struct seq_file *m = file->private_data;
2928 struct drm_i915_private *dev_priv = m->private;
2929 intel_wakeref_t wakeref;
2933 ret = kstrtobool_from_user(ubuf, len, &enable);
2937 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2938 if (!dev_priv->ipc_enabled && enable)
2939 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2940 dev_priv->wm.distrust_bios_wm = true;
2941 dev_priv->ipc_enabled = enable;
2942 intel_enable_ipc(dev_priv);
2948 static const struct file_operations i915_ipc_status_fops = {
2949 .owner = THIS_MODULE,
2950 .open = i915_ipc_status_open,
2952 .llseek = seq_lseek,
2953 .release = single_release,
2954 .write = i915_ipc_status_write
2957 static int i915_ddb_info(struct seq_file *m, void *unused)
2959 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2960 struct drm_device *dev = &dev_priv->drm;
2961 struct skl_ddb_entry *entry;
2962 struct intel_crtc *crtc;
2964 if (INTEL_GEN(dev_priv) < 9)
2967 drm_modeset_lock_all(dev);
2969 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2971 for_each_intel_crtc(&dev_priv->drm, crtc) {
2972 struct intel_crtc_state *crtc_state =
2973 to_intel_crtc_state(crtc->base.state);
2974 enum pipe pipe = crtc->pipe;
2975 enum plane_id plane_id;
2977 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2979 for_each_plane_id_on_crtc(crtc, plane_id) {
2980 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2981 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
2982 entry->start, entry->end,
2983 skl_ddb_entry_size(entry));
2986 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2987 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2988 entry->end, skl_ddb_entry_size(entry));
2991 drm_modeset_unlock_all(dev);
2996 static void drrs_status_per_crtc(struct seq_file *m,
2997 struct drm_device *dev,
2998 struct intel_crtc *intel_crtc)
3000 struct drm_i915_private *dev_priv = to_i915(dev);
3001 struct i915_drrs *drrs = &dev_priv->drrs;
3003 struct drm_connector *connector;
3004 struct drm_connector_list_iter conn_iter;
3006 drm_connector_list_iter_begin(dev, &conn_iter);
3007 drm_for_each_connector_iter(connector, &conn_iter) {
3008 if (connector->state->crtc != &intel_crtc->base)
3011 seq_printf(m, "%s:\n", connector->name);
3013 drm_connector_list_iter_end(&conn_iter);
3015 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3016 seq_puts(m, "\tVBT: DRRS_type: Static");
3017 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3018 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3019 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3020 seq_puts(m, "\tVBT: DRRS_type: None");
3022 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3024 seq_puts(m, "\n\n");
3026 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3027 struct intel_panel *panel;
3029 mutex_lock(&drrs->mutex);
3030 /* DRRS Supported */
3031 seq_puts(m, "\tDRRS Supported: Yes\n");
3033 /* disable_drrs() will make drrs->dp NULL */
3035 seq_puts(m, "Idleness DRRS: Disabled\n");
3036 if (dev_priv->psr.enabled)
3038 "\tAs PSR is enabled, DRRS is not enabled\n");
3039 mutex_unlock(&drrs->mutex);
3043 panel = &drrs->dp->attached_connector->panel;
3044 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3045 drrs->busy_frontbuffer_bits);
3047 seq_puts(m, "\n\t\t");
3048 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3049 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3050 vrefresh = panel->fixed_mode->vrefresh;
3051 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3052 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3053 vrefresh = panel->downclock_mode->vrefresh;
3055 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3056 drrs->refresh_rate_type);
3057 mutex_unlock(&drrs->mutex);
3060 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3062 seq_puts(m, "\n\t\t");
3063 mutex_unlock(&drrs->mutex);
3065 /* DRRS not supported. Print the VBT parameter*/
3066 seq_puts(m, "\tDRRS Supported : No");
3071 static int i915_drrs_status(struct seq_file *m, void *unused)
3073 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3074 struct drm_device *dev = &dev_priv->drm;
3075 struct intel_crtc *intel_crtc;
3076 int active_crtc_cnt = 0;
3078 drm_modeset_lock_all(dev);
3079 for_each_intel_crtc(dev, intel_crtc) {
3080 if (intel_crtc->base.state->active) {
3082 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3084 drrs_status_per_crtc(m, dev, intel_crtc);
3087 drm_modeset_unlock_all(dev);
3089 if (!active_crtc_cnt)
3090 seq_puts(m, "No active crtc found\n");
3095 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3097 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3098 struct drm_device *dev = &dev_priv->drm;
3099 struct intel_encoder *intel_encoder;
3100 struct intel_digital_port *intel_dig_port;
3101 struct drm_connector *connector;
3102 struct drm_connector_list_iter conn_iter;
3104 drm_connector_list_iter_begin(dev, &conn_iter);
3105 drm_for_each_connector_iter(connector, &conn_iter) {
3106 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3109 intel_encoder = intel_attached_encoder(connector);
3110 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3113 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3114 if (!intel_dig_port->dp.can_mst)
3117 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3118 intel_dig_port->base.base.base.id,
3119 intel_dig_port->base.base.name);
3120 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3122 drm_connector_list_iter_end(&conn_iter);
3127 static ssize_t i915_displayport_test_active_write(struct file *file,
3128 const char __user *ubuf,
3129 size_t len, loff_t *offp)
3133 struct drm_device *dev;
3134 struct drm_connector *connector;
3135 struct drm_connector_list_iter conn_iter;
3136 struct intel_dp *intel_dp;
3139 dev = ((struct seq_file *)file->private_data)->private;
3144 input_buffer = memdup_user_nul(ubuf, len);
3145 if (IS_ERR(input_buffer))
3146 return PTR_ERR(input_buffer);
3148 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3150 drm_connector_list_iter_begin(dev, &conn_iter);
3151 drm_for_each_connector_iter(connector, &conn_iter) {
3152 struct intel_encoder *encoder;
3154 if (connector->connector_type !=
3155 DRM_MODE_CONNECTOR_DisplayPort)
3158 encoder = to_intel_encoder(connector->encoder);
3159 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3162 if (encoder && connector->status == connector_status_connected) {
3163 intel_dp = enc_to_intel_dp(&encoder->base);
3164 status = kstrtoint(input_buffer, 10, &val);
3167 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3168 /* To prevent erroneous activation of the compliance
3169 * testing code, only accept an actual value of 1 here
3172 intel_dp->compliance.test_active = 1;
3174 intel_dp->compliance.test_active = 0;
3177 drm_connector_list_iter_end(&conn_iter);
3178 kfree(input_buffer);
3186 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3188 struct drm_i915_private *dev_priv = m->private;
3189 struct drm_device *dev = &dev_priv->drm;
3190 struct drm_connector *connector;
3191 struct drm_connector_list_iter conn_iter;
3192 struct intel_dp *intel_dp;
3194 drm_connector_list_iter_begin(dev, &conn_iter);
3195 drm_for_each_connector_iter(connector, &conn_iter) {
3196 struct intel_encoder *encoder;
3198 if (connector->connector_type !=
3199 DRM_MODE_CONNECTOR_DisplayPort)
3202 encoder = to_intel_encoder(connector->encoder);
3203 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3206 if (encoder && connector->status == connector_status_connected) {
3207 intel_dp = enc_to_intel_dp(&encoder->base);
3208 if (intel_dp->compliance.test_active)
3215 drm_connector_list_iter_end(&conn_iter);
3220 static int i915_displayport_test_active_open(struct inode *inode,
3223 return single_open(file, i915_displayport_test_active_show,
3227 static const struct file_operations i915_displayport_test_active_fops = {
3228 .owner = THIS_MODULE,
3229 .open = i915_displayport_test_active_open,
3231 .llseek = seq_lseek,
3232 .release = single_release,
3233 .write = i915_displayport_test_active_write
3236 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3238 struct drm_i915_private *dev_priv = m->private;
3239 struct drm_device *dev = &dev_priv->drm;
3240 struct drm_connector *connector;
3241 struct drm_connector_list_iter conn_iter;
3242 struct intel_dp *intel_dp;
3244 drm_connector_list_iter_begin(dev, &conn_iter);
3245 drm_for_each_connector_iter(connector, &conn_iter) {
3246 struct intel_encoder *encoder;
3248 if (connector->connector_type !=
3249 DRM_MODE_CONNECTOR_DisplayPort)
3252 encoder = to_intel_encoder(connector->encoder);
3253 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3256 if (encoder && connector->status == connector_status_connected) {
3257 intel_dp = enc_to_intel_dp(&encoder->base);
3258 if (intel_dp->compliance.test_type ==
3259 DP_TEST_LINK_EDID_READ)
3260 seq_printf(m, "%lx",
3261 intel_dp->compliance.test_data.edid);
3262 else if (intel_dp->compliance.test_type ==
3263 DP_TEST_LINK_VIDEO_PATTERN) {
3264 seq_printf(m, "hdisplay: %d\n",
3265 intel_dp->compliance.test_data.hdisplay);
3266 seq_printf(m, "vdisplay: %d\n",
3267 intel_dp->compliance.test_data.vdisplay);
3268 seq_printf(m, "bpc: %u\n",
3269 intel_dp->compliance.test_data.bpc);
3274 drm_connector_list_iter_end(&conn_iter);
3278 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3280 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3282 struct drm_i915_private *dev_priv = m->private;
3283 struct drm_device *dev = &dev_priv->drm;
3284 struct drm_connector *connector;
3285 struct drm_connector_list_iter conn_iter;
3286 struct intel_dp *intel_dp;
3288 drm_connector_list_iter_begin(dev, &conn_iter);
3289 drm_for_each_connector_iter(connector, &conn_iter) {
3290 struct intel_encoder *encoder;
3292 if (connector->connector_type !=
3293 DRM_MODE_CONNECTOR_DisplayPort)
3296 encoder = to_intel_encoder(connector->encoder);
3297 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3300 if (encoder && connector->status == connector_status_connected) {
3301 intel_dp = enc_to_intel_dp(&encoder->base);
3302 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3306 drm_connector_list_iter_end(&conn_iter);
3310 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3312 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3314 struct drm_i915_private *dev_priv = m->private;
3315 struct drm_device *dev = &dev_priv->drm;
3319 if (IS_CHERRYVIEW(dev_priv))
3321 else if (IS_VALLEYVIEW(dev_priv))
3323 else if (IS_G4X(dev_priv))
3326 num_levels = ilk_wm_max_level(dev_priv) + 1;
3328 drm_modeset_lock_all(dev);
3330 for (level = 0; level < num_levels; level++) {
3331 unsigned int latency = wm[level];
3334 * - WM1+ latency values in 0.5us units
3335 * - latencies are in us on gen9/vlv/chv
3337 if (INTEL_GEN(dev_priv) >= 9 ||
3338 IS_VALLEYVIEW(dev_priv) ||
3339 IS_CHERRYVIEW(dev_priv) ||
3345 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3346 level, wm[level], latency / 10, latency % 10);
3349 drm_modeset_unlock_all(dev);
3352 static int pri_wm_latency_show(struct seq_file *m, void *data)
3354 struct drm_i915_private *dev_priv = m->private;
3355 const u16 *latencies;
3357 if (INTEL_GEN(dev_priv) >= 9)
3358 latencies = dev_priv->wm.skl_latency;
3360 latencies = dev_priv->wm.pri_latency;
3362 wm_latency_show(m, latencies);
3367 static int spr_wm_latency_show(struct seq_file *m, void *data)
3369 struct drm_i915_private *dev_priv = m->private;
3370 const u16 *latencies;
3372 if (INTEL_GEN(dev_priv) >= 9)
3373 latencies = dev_priv->wm.skl_latency;
3375 latencies = dev_priv->wm.spr_latency;
3377 wm_latency_show(m, latencies);
3382 static int cur_wm_latency_show(struct seq_file *m, void *data)
3384 struct drm_i915_private *dev_priv = m->private;
3385 const u16 *latencies;
3387 if (INTEL_GEN(dev_priv) >= 9)
3388 latencies = dev_priv->wm.skl_latency;
3390 latencies = dev_priv->wm.cur_latency;
3392 wm_latency_show(m, latencies);
3397 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3399 struct drm_i915_private *dev_priv = inode->i_private;
3401 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3404 return single_open(file, pri_wm_latency_show, dev_priv);
3407 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3409 struct drm_i915_private *dev_priv = inode->i_private;
3411 if (HAS_GMCH(dev_priv))
3414 return single_open(file, spr_wm_latency_show, dev_priv);
3417 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3419 struct drm_i915_private *dev_priv = inode->i_private;
3421 if (HAS_GMCH(dev_priv))
3424 return single_open(file, cur_wm_latency_show, dev_priv);
3427 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3428 size_t len, loff_t *offp, u16 wm[8])
3430 struct seq_file *m = file->private_data;
3431 struct drm_i915_private *dev_priv = m->private;
3432 struct drm_device *dev = &dev_priv->drm;
3439 if (IS_CHERRYVIEW(dev_priv))
3441 else if (IS_VALLEYVIEW(dev_priv))
3443 else if (IS_G4X(dev_priv))
3446 num_levels = ilk_wm_max_level(dev_priv) + 1;
3448 if (len >= sizeof(tmp))
3451 if (copy_from_user(tmp, ubuf, len))
3456 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3457 &new[0], &new[1], &new[2], &new[3],
3458 &new[4], &new[5], &new[6], &new[7]);
3459 if (ret != num_levels)
3462 drm_modeset_lock_all(dev);
3464 for (level = 0; level < num_levels; level++)
3465 wm[level] = new[level];
3467 drm_modeset_unlock_all(dev);
3473 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3474 size_t len, loff_t *offp)
3476 struct seq_file *m = file->private_data;
3477 struct drm_i915_private *dev_priv = m->private;
3480 if (INTEL_GEN(dev_priv) >= 9)
3481 latencies = dev_priv->wm.skl_latency;
3483 latencies = dev_priv->wm.pri_latency;
3485 return wm_latency_write(file, ubuf, len, offp, latencies);
3488 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3489 size_t len, loff_t *offp)
3491 struct seq_file *m = file->private_data;
3492 struct drm_i915_private *dev_priv = m->private;
3495 if (INTEL_GEN(dev_priv) >= 9)
3496 latencies = dev_priv->wm.skl_latency;
3498 latencies = dev_priv->wm.spr_latency;
3500 return wm_latency_write(file, ubuf, len, offp, latencies);
3503 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3504 size_t len, loff_t *offp)
3506 struct seq_file *m = file->private_data;
3507 struct drm_i915_private *dev_priv = m->private;
3510 if (INTEL_GEN(dev_priv) >= 9)
3511 latencies = dev_priv->wm.skl_latency;
3513 latencies = dev_priv->wm.cur_latency;
3515 return wm_latency_write(file, ubuf, len, offp, latencies);
3518 static const struct file_operations i915_pri_wm_latency_fops = {
3519 .owner = THIS_MODULE,
3520 .open = pri_wm_latency_open,
3522 .llseek = seq_lseek,
3523 .release = single_release,
3524 .write = pri_wm_latency_write
3527 static const struct file_operations i915_spr_wm_latency_fops = {
3528 .owner = THIS_MODULE,
3529 .open = spr_wm_latency_open,
3531 .llseek = seq_lseek,
3532 .release = single_release,
3533 .write = spr_wm_latency_write
3536 static const struct file_operations i915_cur_wm_latency_fops = {
3537 .owner = THIS_MODULE,
3538 .open = cur_wm_latency_open,
3540 .llseek = seq_lseek,
3541 .release = single_release,
3542 .write = cur_wm_latency_write
3546 i915_wedged_get(void *data, u64 *val)
3548 struct drm_i915_private *i915 = data;
3549 int ret = intel_gt_terminally_wedged(&i915->gt);
3564 i915_wedged_set(void *data, u64 val)
3566 struct drm_i915_private *i915 = data;
3568 /* Flush any previous reset before applying for a new one */
3569 wait_event(i915->gt.reset.queue,
3570 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3572 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3573 "Manually set wedged engine mask = %llx", val);
3577 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3578 i915_wedged_get, i915_wedged_set,
3581 #define DROP_UNBOUND BIT(0)
3582 #define DROP_BOUND BIT(1)
3583 #define DROP_RETIRE BIT(2)
3584 #define DROP_ACTIVE BIT(3)
3585 #define DROP_FREED BIT(4)
3586 #define DROP_SHRINK_ALL BIT(5)
3587 #define DROP_IDLE BIT(6)
3588 #define DROP_RESET_ACTIVE BIT(7)
3589 #define DROP_RESET_SEQNO BIT(8)
3590 #define DROP_ALL (DROP_UNBOUND | \
3597 DROP_RESET_ACTIVE | \
3600 i915_drop_caches_get(void *data, u64 *val)
3608 i915_drop_caches_set(void *data, u64 val)
3610 struct drm_i915_private *i915 = data;
3612 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3613 val, val & DROP_ALL);
3615 if (val & DROP_RESET_ACTIVE &&
3616 wait_for(intel_engines_are_idle(&i915->gt),
3617 I915_IDLE_ENGINES_TIMEOUT))
3618 intel_gt_set_wedged(&i915->gt);
3620 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3621 * on ioctls on -EAGAIN. */
3622 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3625 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3630 * To finish the flush of the idle_worker, we must complete
3631 * the switch-to-kernel-context, which requires a double
3632 * pass through wait_for_idle: first queues the switch,
3633 * second waits for the switch.
3635 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3636 ret = i915_gem_wait_for_idle(i915,
3637 I915_WAIT_INTERRUPTIBLE |
3639 MAX_SCHEDULE_TIMEOUT);
3641 if (ret == 0 && val & DROP_IDLE)
3642 ret = i915_gem_wait_for_idle(i915,
3643 I915_WAIT_INTERRUPTIBLE |
3645 MAX_SCHEDULE_TIMEOUT);
3647 if (val & DROP_RETIRE)
3648 i915_retire_requests(i915);
3650 mutex_unlock(&i915->drm.struct_mutex);
3652 if (ret == 0 && val & DROP_IDLE)
3653 ret = intel_gt_pm_wait_for_idle(&i915->gt);
3656 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3657 intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
3659 fs_reclaim_acquire(GFP_KERNEL);
3660 if (val & DROP_BOUND)
3661 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3663 if (val & DROP_UNBOUND)
3664 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3666 if (val & DROP_SHRINK_ALL)
3667 i915_gem_shrink_all(i915);
3668 fs_reclaim_release(GFP_KERNEL);
3670 if (val & DROP_IDLE) {
3671 flush_delayed_work(&i915->gem.retire_work);
3672 flush_work(&i915->gem.idle_work);
3675 if (val & DROP_FREED)
3676 i915_gem_drain_freed_objects(i915);
3681 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3682 i915_drop_caches_get, i915_drop_caches_set,
3686 i915_cache_sharing_get(void *data, u64 *val)
3688 struct drm_i915_private *dev_priv = data;
3689 intel_wakeref_t wakeref;
3692 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3695 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3696 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3698 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3704 i915_cache_sharing_set(void *data, u64 val)
3706 struct drm_i915_private *dev_priv = data;
3707 intel_wakeref_t wakeref;
3709 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3715 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3716 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3719 /* Update the cache sharing policy here as well */
3720 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3721 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3722 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3723 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3730 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3733 int offset = slice * sseu->ss_stride;
3735 memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3738 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3739 i915_cache_sharing_get, i915_cache_sharing_set,
3742 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3743 struct sseu_dev_info *sseu)
3746 const int ss_max = SS_MAX;
3747 u32 sig1[SS_MAX], sig2[SS_MAX];
3750 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3751 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3752 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3753 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3755 for (ss = 0; ss < ss_max; ss++) {
3756 unsigned int eu_cnt;
3758 if (sig1[ss] & CHV_SS_PG_ENABLE)
3759 /* skip disabled subslice */
3762 sseu->slice_mask = BIT(0);
3763 sseu->subslice_mask[0] |= BIT(ss);
3764 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3765 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3766 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3767 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3768 sseu->eu_total += eu_cnt;
3769 sseu->eu_per_subslice = max_t(unsigned int,
3770 sseu->eu_per_subslice, eu_cnt);
3775 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3776 struct sseu_dev_info *sseu)
3779 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3780 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3783 for (s = 0; s < info->sseu.max_slices; s++) {
3785 * FIXME: Valid SS Mask respects the spec and read
3786 * only valid bits for those registers, excluding reserved
3787 * although this seems wrong because it would leave many
3788 * subslices without ACK.
3790 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3791 GEN10_PGCTL_VALID_SS_MASK(s);
3792 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3793 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3796 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3797 GEN9_PGCTL_SSA_EU19_ACK |
3798 GEN9_PGCTL_SSA_EU210_ACK |
3799 GEN9_PGCTL_SSA_EU311_ACK;
3800 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3801 GEN9_PGCTL_SSB_EU19_ACK |
3802 GEN9_PGCTL_SSB_EU210_ACK |
3803 GEN9_PGCTL_SSB_EU311_ACK;
3805 for (s = 0; s < info->sseu.max_slices; s++) {
3806 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3807 /* skip disabled slice */
3810 sseu->slice_mask |= BIT(s);
3811 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3813 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3814 unsigned int eu_cnt;
3816 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3817 /* skip disabled subslice */
3820 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3822 sseu->eu_total += eu_cnt;
3823 sseu->eu_per_subslice = max_t(unsigned int,
3824 sseu->eu_per_subslice,
3831 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3832 struct sseu_dev_info *sseu)
3835 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3836 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3839 for (s = 0; s < info->sseu.max_slices; s++) {
3840 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3841 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3842 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3845 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3846 GEN9_PGCTL_SSA_EU19_ACK |
3847 GEN9_PGCTL_SSA_EU210_ACK |
3848 GEN9_PGCTL_SSA_EU311_ACK;
3849 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3850 GEN9_PGCTL_SSB_EU19_ACK |
3851 GEN9_PGCTL_SSB_EU210_ACK |
3852 GEN9_PGCTL_SSB_EU311_ACK;
3854 for (s = 0; s < info->sseu.max_slices; s++) {
3855 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3856 /* skip disabled slice */
3859 sseu->slice_mask |= BIT(s);
3861 if (IS_GEN9_BC(dev_priv))
3862 intel_sseu_copy_subslices(&info->sseu, s,
3863 sseu->subslice_mask);
3865 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3866 unsigned int eu_cnt;
3867 u8 ss_idx = s * info->sseu.ss_stride +
3870 if (IS_GEN9_LP(dev_priv)) {
3871 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3872 /* skip disabled subslice */
3875 sseu->subslice_mask[ss_idx] |=
3876 BIT(ss % BITS_PER_BYTE);
3879 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3881 sseu->eu_total += eu_cnt;
3882 sseu->eu_per_subslice = max_t(unsigned int,
3883 sseu->eu_per_subslice,
3890 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3891 struct sseu_dev_info *sseu)
3893 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3894 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3897 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3899 if (sseu->slice_mask) {
3900 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3901 for (s = 0; s < fls(sseu->slice_mask); s++)
3902 intel_sseu_copy_subslices(&info->sseu, s,
3903 sseu->subslice_mask);
3904 sseu->eu_total = sseu->eu_per_subslice *
3905 intel_sseu_subslice_total(sseu);
3907 /* subtract fused off EU(s) from enabled slice(s) */
3908 for (s = 0; s < fls(sseu->slice_mask); s++) {
3909 u8 subslice_7eu = info->sseu.subslice_7eu[s];
3911 sseu->eu_total -= hweight8(subslice_7eu);
3916 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3917 const struct sseu_dev_info *sseu)
3919 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3920 const char *type = is_available_info ? "Available" : "Enabled";
3923 seq_printf(m, " %s Slice Mask: %04x\n", type,
3925 seq_printf(m, " %s Slice Total: %u\n", type,
3926 hweight8(sseu->slice_mask));
3927 seq_printf(m, " %s Subslice Total: %u\n", type,
3928 intel_sseu_subslice_total(sseu));
3929 for (s = 0; s < fls(sseu->slice_mask); s++) {
3930 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3931 s, intel_sseu_subslices_per_slice(sseu, s));
3933 seq_printf(m, " %s EU Total: %u\n", type,
3935 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3936 sseu->eu_per_subslice);
3938 if (!is_available_info)
3941 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3942 if (HAS_POOLED_EU(dev_priv))
3943 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
3945 seq_printf(m, " Has Slice Power Gating: %s\n",
3946 yesno(sseu->has_slice_pg));
3947 seq_printf(m, " Has Subslice Power Gating: %s\n",
3948 yesno(sseu->has_subslice_pg));
3949 seq_printf(m, " Has EU Power Gating: %s\n",
3950 yesno(sseu->has_eu_pg));
3953 static int i915_sseu_status(struct seq_file *m, void *unused)
3955 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3956 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3957 struct sseu_dev_info sseu;
3958 intel_wakeref_t wakeref;
3960 if (INTEL_GEN(dev_priv) < 8)
3963 seq_puts(m, "SSEU Device Info\n");
3964 i915_print_sseu_info(m, true, &info->sseu);
3966 seq_puts(m, "SSEU Device Status\n");
3967 memset(&sseu, 0, sizeof(sseu));
3968 intel_sseu_set_info(&sseu, info->sseu.max_slices,
3969 info->sseu.max_subslices,
3970 info->sseu.max_eus_per_subslice);
3972 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3973 if (IS_CHERRYVIEW(dev_priv))
3974 cherryview_sseu_device_status(dev_priv, &sseu);
3975 else if (IS_BROADWELL(dev_priv))
3976 broadwell_sseu_device_status(dev_priv, &sseu);
3977 else if (IS_GEN(dev_priv, 9))
3978 gen9_sseu_device_status(dev_priv, &sseu);
3979 else if (INTEL_GEN(dev_priv) >= 10)
3980 gen10_sseu_device_status(dev_priv, &sseu);
3983 i915_print_sseu_info(m, false, &sseu);
3988 static int i915_forcewake_open(struct inode *inode, struct file *file)
3990 struct drm_i915_private *i915 = inode->i_private;
3992 if (INTEL_GEN(i915) < 6)
3995 file->private_data =
3996 (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
3997 intel_uncore_forcewake_user_get(&i915->uncore);
4002 static int i915_forcewake_release(struct inode *inode, struct file *file)
4004 struct drm_i915_private *i915 = inode->i_private;
4006 if (INTEL_GEN(i915) < 6)
4009 intel_uncore_forcewake_user_put(&i915->uncore);
4010 intel_runtime_pm_put(&i915->runtime_pm,
4011 (intel_wakeref_t)(uintptr_t)file->private_data);
4016 static const struct file_operations i915_forcewake_fops = {
4017 .owner = THIS_MODULE,
4018 .open = i915_forcewake_open,
4019 .release = i915_forcewake_release,
4022 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4024 struct drm_i915_private *dev_priv = m->private;
4025 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4027 /* Synchronize with everything first in case there's been an HPD
4028 * storm, but we haven't finished handling it in the kernel yet
4030 intel_synchronize_irq(dev_priv);
4031 flush_work(&dev_priv->hotplug.dig_port_work);
4032 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
4034 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4035 seq_printf(m, "Detected: %s\n",
4036 yesno(delayed_work_pending(&hotplug->reenable_work)));
4041 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4042 const char __user *ubuf, size_t len,
4045 struct seq_file *m = file->private_data;
4046 struct drm_i915_private *dev_priv = m->private;
4047 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4048 unsigned int new_threshold;
4053 if (len >= sizeof(tmp))
4056 if (copy_from_user(tmp, ubuf, len))
4061 /* Strip newline, if any */
4062 newline = strchr(tmp, '\n');
4066 if (strcmp(tmp, "reset") == 0)
4067 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4068 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4071 if (new_threshold > 0)
4072 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4075 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4077 spin_lock_irq(&dev_priv->irq_lock);
4078 hotplug->hpd_storm_threshold = new_threshold;
4079 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4081 hotplug->stats[i].count = 0;
4082 spin_unlock_irq(&dev_priv->irq_lock);
4084 /* Re-enable hpd immediately if we were in an irq storm */
4085 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4090 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4092 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4095 static const struct file_operations i915_hpd_storm_ctl_fops = {
4096 .owner = THIS_MODULE,
4097 .open = i915_hpd_storm_ctl_open,
4099 .llseek = seq_lseek,
4100 .release = single_release,
4101 .write = i915_hpd_storm_ctl_write
4104 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4106 struct drm_i915_private *dev_priv = m->private;
4108 seq_printf(m, "Enabled: %s\n",
4109 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4115 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4117 return single_open(file, i915_hpd_short_storm_ctl_show,
4121 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4122 const char __user *ubuf,
4123 size_t len, loff_t *offp)
4125 struct seq_file *m = file->private_data;
4126 struct drm_i915_private *dev_priv = m->private;
4127 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4133 if (len >= sizeof(tmp))
4136 if (copy_from_user(tmp, ubuf, len))
4141 /* Strip newline, if any */
4142 newline = strchr(tmp, '\n');
4146 /* Reset to the "default" state for this system */
4147 if (strcmp(tmp, "reset") == 0)
4148 new_state = !HAS_DP_MST(dev_priv);
4149 else if (kstrtobool(tmp, &new_state) != 0)
4152 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4153 new_state ? "En" : "Dis");
4155 spin_lock_irq(&dev_priv->irq_lock);
4156 hotplug->hpd_short_storm_enabled = new_state;
4157 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4159 hotplug->stats[i].count = 0;
4160 spin_unlock_irq(&dev_priv->irq_lock);
4162 /* Re-enable hpd immediately if we were in an irq storm */
4163 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4168 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4169 .owner = THIS_MODULE,
4170 .open = i915_hpd_short_storm_ctl_open,
4172 .llseek = seq_lseek,
4173 .release = single_release,
4174 .write = i915_hpd_short_storm_ctl_write,
4177 static int i915_drrs_ctl_set(void *data, u64 val)
4179 struct drm_i915_private *dev_priv = data;
4180 struct drm_device *dev = &dev_priv->drm;
4181 struct intel_crtc *crtc;
4183 if (INTEL_GEN(dev_priv) < 7)
4186 for_each_intel_crtc(dev, crtc) {
4187 struct drm_connector_list_iter conn_iter;
4188 struct intel_crtc_state *crtc_state;
4189 struct drm_connector *connector;
4190 struct drm_crtc_commit *commit;
4193 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4197 crtc_state = to_intel_crtc_state(crtc->base.state);
4199 if (!crtc_state->base.active ||
4200 !crtc_state->has_drrs)
4203 commit = crtc_state->base.commit;
4205 ret = wait_for_completion_interruptible(&commit->hw_done);
4210 drm_connector_list_iter_begin(dev, &conn_iter);
4211 drm_for_each_connector_iter(connector, &conn_iter) {
4212 struct intel_encoder *encoder;
4213 struct intel_dp *intel_dp;
4215 if (!(crtc_state->base.connector_mask &
4216 drm_connector_mask(connector)))
4219 encoder = intel_attached_encoder(connector);
4220 if (encoder->type != INTEL_OUTPUT_EDP)
4223 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4224 val ? "en" : "dis", val);
4226 intel_dp = enc_to_intel_dp(&encoder->base);
4228 intel_edp_drrs_enable(intel_dp,
4231 intel_edp_drrs_disable(intel_dp,
4234 drm_connector_list_iter_end(&conn_iter);
4237 drm_modeset_unlock(&crtc->base.mutex);
4245 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4248 i915_fifo_underrun_reset_write(struct file *filp,
4249 const char __user *ubuf,
4250 size_t cnt, loff_t *ppos)
4252 struct drm_i915_private *dev_priv = filp->private_data;
4253 struct intel_crtc *intel_crtc;
4254 struct drm_device *dev = &dev_priv->drm;
4258 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4265 for_each_intel_crtc(dev, intel_crtc) {
4266 struct drm_crtc_commit *commit;
4267 struct intel_crtc_state *crtc_state;
4269 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4273 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4274 commit = crtc_state->base.commit;
4276 ret = wait_for_completion_interruptible(&commit->hw_done);
4278 ret = wait_for_completion_interruptible(&commit->flip_done);
4281 if (!ret && crtc_state->base.active) {
4282 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4283 pipe_name(intel_crtc->pipe));
4285 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4288 drm_modeset_unlock(&intel_crtc->base.mutex);
4294 ret = intel_fbc_reset_underrun(dev_priv);
4301 static const struct file_operations i915_fifo_underrun_reset_ops = {
4302 .owner = THIS_MODULE,
4303 .open = simple_open,
4304 .write = i915_fifo_underrun_reset_write,
4305 .llseek = default_llseek,
4308 static const struct drm_info_list i915_debugfs_list[] = {
4309 {"i915_capabilities", i915_capabilities, 0},
4310 {"i915_gem_objects", i915_gem_object_info, 0},
4311 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4312 {"i915_gem_interrupt", i915_interrupt_info, 0},
4313 {"i915_guc_info", i915_guc_info, 0},
4314 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4315 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4316 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4317 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4318 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4319 {"i915_frequency_info", i915_frequency_info, 0},
4320 {"i915_hangcheck_info", i915_hangcheck_info, 0},
4321 {"i915_drpc_info", i915_drpc_info, 0},
4322 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4323 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4324 {"i915_fbc_status", i915_fbc_status, 0},
4325 {"i915_ips_status", i915_ips_status, 0},
4326 {"i915_sr_status", i915_sr_status, 0},
4327 {"i915_opregion", i915_opregion, 0},
4328 {"i915_vbt", i915_vbt, 0},
4329 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4330 {"i915_context_status", i915_context_status, 0},
4331 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4332 {"i915_swizzle_info", i915_swizzle_info, 0},
4333 {"i915_llc", i915_llc, 0},
4334 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4335 {"i915_energy_uJ", i915_energy_uJ, 0},
4336 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4337 {"i915_power_domain_info", i915_power_domain_info, 0},
4338 {"i915_dmc_info", i915_dmc_info, 0},
4339 {"i915_display_info", i915_display_info, 0},
4340 {"i915_engine_info", i915_engine_info, 0},
4341 {"i915_rcs_topology", i915_rcs_topology, 0},
4342 {"i915_shrinker_info", i915_shrinker_info, 0},
4343 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4344 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4345 {"i915_wa_registers", i915_wa_registers, 0},
4346 {"i915_ddb_info", i915_ddb_info, 0},
4347 {"i915_sseu_status", i915_sseu_status, 0},
4348 {"i915_drrs_status", i915_drrs_status, 0},
4349 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4351 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4353 static const struct i915_debugfs_files {
4355 const struct file_operations *fops;
4356 } i915_debugfs_files[] = {
4357 {"i915_wedged", &i915_wedged_fops},
4358 {"i915_cache_sharing", &i915_cache_sharing_fops},
4359 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4360 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4361 {"i915_error_state", &i915_error_state_fops},
4362 {"i915_gpu_info", &i915_gpu_info_fops},
4364 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4365 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4366 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4367 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4368 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4369 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4370 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4371 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4372 {"i915_guc_log_level", &i915_guc_log_level_fops},
4373 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4374 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4375 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4376 {"i915_ipc_status", &i915_ipc_status_fops},
4377 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4378 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4381 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4383 struct drm_minor *minor = dev_priv->drm.primary;
4386 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4387 to_i915(minor->dev), &i915_forcewake_fops);
4389 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4390 debugfs_create_file(i915_debugfs_files[i].name,
4392 minor->debugfs_root,
4393 to_i915(minor->dev),
4394 i915_debugfs_files[i].fops);
4397 return drm_debugfs_create_files(i915_debugfs_list,
4398 I915_DEBUGFS_ENTRIES,
4399 minor->debugfs_root, minor);
4403 /* DPCD dump start address. */
4404 unsigned int offset;
4405 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4407 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4409 /* Only valid for eDP. */
4413 static const struct dpcd_block i915_dpcd_debug[] = {
4414 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4415 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4416 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4417 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4418 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4419 { .offset = DP_SET_POWER },
4420 { .offset = DP_EDP_DPCD_REV },
4421 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4422 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4423 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4426 static int i915_dpcd_show(struct seq_file *m, void *data)
4428 struct drm_connector *connector = m->private;
4429 struct intel_dp *intel_dp =
4430 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4435 if (connector->status != connector_status_connected)
4438 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4439 const struct dpcd_block *b = &i915_dpcd_debug[i];
4440 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4443 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4446 /* low tech for now */
4447 if (WARN_ON(size > sizeof(buf)))
4450 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4452 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4454 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4459 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4461 static int i915_panel_show(struct seq_file *m, void *data)
4463 struct drm_connector *connector = m->private;
4464 struct intel_dp *intel_dp =
4465 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4467 if (connector->status != connector_status_connected)
4470 seq_printf(m, "Panel power up delay: %d\n",
4471 intel_dp->panel_power_up_delay);
4472 seq_printf(m, "Panel power down delay: %d\n",
4473 intel_dp->panel_power_down_delay);
4474 seq_printf(m, "Backlight on delay: %d\n",
4475 intel_dp->backlight_on_delay);
4476 seq_printf(m, "Backlight off delay: %d\n",
4477 intel_dp->backlight_off_delay);
4481 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4483 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4485 struct drm_connector *connector = m->private;
4486 struct intel_connector *intel_connector = to_intel_connector(connector);
4488 if (connector->status != connector_status_connected)
4491 /* HDCP is supported by connector */
4492 if (!intel_connector->hdcp.shim)
4495 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4496 connector->base.id);
4497 intel_hdcp_info(m, intel_connector);
4501 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4503 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4505 struct drm_connector *connector = m->private;
4506 struct drm_device *dev = connector->dev;
4507 struct drm_crtc *crtc;
4508 struct intel_dp *intel_dp;
4509 struct drm_modeset_acquire_ctx ctx;
4510 struct intel_crtc_state *crtc_state = NULL;
4512 bool try_again = false;
4514 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4518 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4521 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4527 crtc = connector->state->crtc;
4528 if (connector->status != connector_status_connected || !crtc) {
4532 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4533 if (ret == -EDEADLK) {
4534 ret = drm_modeset_backoff(&ctx);
4543 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4544 crtc_state = to_intel_crtc_state(crtc->state);
4545 seq_printf(m, "DSC_Enabled: %s\n",
4546 yesno(crtc_state->dsc_params.compression_enable));
4547 seq_printf(m, "DSC_Sink_Support: %s\n",
4548 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4549 seq_printf(m, "Force_DSC_Enable: %s\n",
4550 yesno(intel_dp->force_dsc_en));
4551 if (!intel_dp_is_edp(intel_dp))
4552 seq_printf(m, "FEC_Sink_Support: %s\n",
4553 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4554 } while (try_again);
4556 drm_modeset_drop_locks(&ctx);
4557 drm_modeset_acquire_fini(&ctx);
4562 static ssize_t i915_dsc_fec_support_write(struct file *file,
4563 const char __user *ubuf,
4564 size_t len, loff_t *offp)
4566 bool dsc_enable = false;
4568 struct drm_connector *connector =
4569 ((struct seq_file *)file->private_data)->private;
4570 struct intel_encoder *encoder = intel_attached_encoder(connector);
4571 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4576 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4579 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4583 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4584 (dsc_enable) ? "true" : "false");
4585 intel_dp->force_dsc_en = dsc_enable;
4591 static int i915_dsc_fec_support_open(struct inode *inode,
4594 return single_open(file, i915_dsc_fec_support_show,
4598 static const struct file_operations i915_dsc_fec_support_fops = {
4599 .owner = THIS_MODULE,
4600 .open = i915_dsc_fec_support_open,
4602 .llseek = seq_lseek,
4603 .release = single_release,
4604 .write = i915_dsc_fec_support_write
4608 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4609 * @connector: pointer to a registered drm_connector
4611 * Cleanup will be done by drm_connector_unregister() through a call to
4612 * drm_debugfs_connector_remove().
4614 * Returns 0 on success, negative error codes on error.
4616 int i915_debugfs_connector_add(struct drm_connector *connector)
4618 struct dentry *root = connector->debugfs_entry;
4619 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4621 /* The connector must have been registered beforehands. */
4625 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4626 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4627 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4628 connector, &i915_dpcd_fops);
4630 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4631 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4632 connector, &i915_panel_fops);
4633 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4634 connector, &i915_psr_sink_status_fops);
4637 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4638 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4639 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4640 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4641 connector, &i915_hdcp_sink_capability_fops);
4644 if (INTEL_GEN(dev_priv) >= 10 &&
4645 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4646 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4647 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4648 connector, &i915_dsc_fec_support_fops);