2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
35 #include "display/intel_dp.h"
36 #include "display/intel_fbc.h"
37 #include "display/intel_hdcp.h"
38 #include "display/intel_hdmi.h"
39 #include "display/intel_psr.h"
41 #include "gem/i915_gem_context.h"
42 #include "gt/intel_reset.h"
44 #include "i915_debugfs.h"
46 #include "intel_csr.h"
47 #include "intel_drv.h"
48 #include "intel_guc_submission.h"
50 #include "intel_sideband.h"
52 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
54 return to_i915(node->minor->dev);
57 static int i915_capabilities(struct seq_file *m, void *data)
59 struct drm_i915_private *dev_priv = node_to_i915(m->private);
60 const struct intel_device_info *info = INTEL_INFO(dev_priv);
61 struct drm_printer p = drm_seq_file_printer(m);
63 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
64 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
67 intel_device_info_dump_flags(info, &p);
68 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
69 intel_driver_caps_print(&dev_priv->caps, &p);
71 kernel_param_lock(THIS_MODULE);
72 i915_params_dump(&i915_modparams, &p);
73 kernel_param_unlock(THIS_MODULE);
78 static char get_active_flag(struct drm_i915_gem_object *obj)
80 return i915_gem_object_is_active(obj) ? '*' : ' ';
83 static char get_pin_flag(struct drm_i915_gem_object *obj)
85 return obj->pin_global ? 'p' : ' ';
88 static char get_tiling_flag(struct drm_i915_gem_object *obj)
90 switch (i915_gem_object_get_tiling(obj)) {
92 case I915_TILING_NONE: return ' ';
93 case I915_TILING_X: return 'X';
94 case I915_TILING_Y: return 'Y';
98 static char get_global_flag(struct drm_i915_gem_object *obj)
100 return obj->userfault_count ? 'g' : ' ';
103 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
105 return obj->mm.mapping ? 'M' : ' ';
109 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
113 switch (page_sizes) {
116 case I915_GTT_PAGE_SIZE_4K:
118 case I915_GTT_PAGE_SIZE_64K:
120 case I915_GTT_PAGE_SIZE_2M:
126 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
127 x += snprintf(buf + x, len - x, "2M, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
129 x += snprintf(buf + x, len - x, "64K, ");
130 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
131 x += snprintf(buf + x, len - x, "4K, ");
139 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
141 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
142 struct intel_engine_cs *engine;
143 struct i915_vma *vma;
144 unsigned int frontbuffer_bits;
147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149 get_active_flag(obj),
151 get_tiling_flag(obj),
152 get_global_flag(obj),
153 get_pin_mapped_flag(obj),
154 obj->base.size / 1024,
157 i915_cache_level_str(dev_priv, obj->cache_level),
158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161 seq_printf(m, " (name: %d)", obj->base.name);
163 spin_lock(&obj->vma.lock);
164 list_for_each_entry(vma, &obj->vma.list, obj_link) {
165 if (!drm_mm_node_allocated(&vma->node))
168 spin_unlock(&obj->vma.lock);
170 if (i915_vma_is_pinned(vma))
173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
174 i915_vma_is_ggtt(vma) ? "g" : "pp",
175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
201 case I915_GGTT_VIEW_REMAPPED:
202 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
203 vma->ggtt_view.remapped.plane[0].width,
204 vma->ggtt_view.remapped.plane[0].height,
205 vma->ggtt_view.remapped.plane[0].stride,
206 vma->ggtt_view.remapped.plane[0].offset,
207 vma->ggtt_view.remapped.plane[1].width,
208 vma->ggtt_view.remapped.plane[1].height,
209 vma->ggtt_view.remapped.plane[1].stride,
210 vma->ggtt_view.remapped.plane[1].offset);
214 MISSING_CASE(vma->ggtt_view.type);
219 seq_printf(m, " , fence: %d%s",
221 i915_active_request_isset(&vma->last_fence) ? "*" : "");
224 spin_lock(&obj->vma.lock);
226 spin_unlock(&obj->vma.lock);
228 seq_printf(m, " (pinned x %d)", pin_count);
230 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
232 seq_printf(m, " (global)");
234 engine = i915_gem_object_last_write_engine(obj);
236 seq_printf(m, " (%s)", engine->name);
238 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
239 if (frontbuffer_bits)
240 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
244 struct i915_address_space *vm;
248 u64 active, inactive;
252 static int per_file_stats(int id, void *ptr, void *data)
254 struct drm_i915_gem_object *obj = ptr;
255 struct file_stats *stats = data;
256 struct i915_vma *vma;
258 lockdep_assert_held(&obj->base.dev->struct_mutex);
261 stats->total += obj->base.size;
262 if (!atomic_read(&obj->bind_count))
263 stats->unbound += obj->base.size;
264 if (obj->base.name || obj->base.dma_buf)
265 stats->shared += obj->base.size;
267 list_for_each_entry(vma, &obj->vma.list, obj_link) {
268 if (!drm_mm_node_allocated(&vma->node))
271 if (i915_vma_is_ggtt(vma)) {
272 stats->global += vma->node.size;
274 if (vma->vm != stats->vm)
278 if (i915_vma_is_active(vma))
279 stats->active += vma->node.size;
281 stats->inactive += vma->node.size;
283 if (i915_vma_is_closed(vma))
284 stats->closed += vma->node.size;
290 #define print_file_stats(m, name, stats) do { \
292 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
304 static void print_batch_pool_stats(struct seq_file *m,
305 struct drm_i915_private *dev_priv)
307 struct drm_i915_gem_object *obj;
308 struct intel_engine_cs *engine;
309 struct file_stats stats = {};
310 enum intel_engine_id id;
313 for_each_engine(engine, dev_priv, id) {
314 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
315 list_for_each_entry(obj,
316 &engine->batch_pool.cache_list[j],
318 per_file_stats(0, obj, &stats);
322 print_file_stats(m, "[k]batch pool", stats);
325 static void print_context_stats(struct seq_file *m,
326 struct drm_i915_private *i915)
328 struct file_stats kstats = {};
329 struct i915_gem_context *ctx;
331 list_for_each_entry(ctx, &i915->contexts.list, link) {
332 struct i915_gem_engines_iter it;
333 struct intel_context *ce;
335 for_each_gem_engine(ce,
336 i915_gem_context_lock_engines(ctx), it) {
338 per_file_stats(0, ce->state->obj, &kstats);
340 per_file_stats(0, ce->ring->vma->obj, &kstats);
342 i915_gem_context_unlock_engines(ctx);
344 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
345 struct file_stats stats = { .vm = ctx->vm, };
346 struct drm_file *file = ctx->file_priv->file;
347 struct task_struct *task;
350 spin_lock(&file->table_lock);
351 idr_for_each(&file->object_idr, per_file_stats, &stats);
352 spin_unlock(&file->table_lock);
355 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
356 snprintf(name, sizeof(name), "%s",
357 task ? task->comm : "<unknown>");
360 print_file_stats(m, name, stats);
364 print_file_stats(m, "[k]contexts", kstats);
367 static int i915_gem_object_info(struct seq_file *m, void *data)
369 struct drm_i915_private *i915 = node_to_i915(m->private);
372 seq_printf(m, "%u shrinkable objects, %llu bytes\n",
373 i915->mm.shrink_count,
374 i915->mm.shrink_memory);
378 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
382 print_batch_pool_stats(m, i915);
383 print_context_stats(m, i915);
384 mutex_unlock(&i915->drm.struct_mutex);
389 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
391 struct drm_i915_private *dev_priv = node_to_i915(m->private);
392 struct drm_device *dev = &dev_priv->drm;
393 struct drm_i915_gem_object *obj;
394 struct intel_engine_cs *engine;
395 enum intel_engine_id id;
399 ret = mutex_lock_interruptible(&dev->struct_mutex);
403 for_each_engine(engine, dev_priv, id) {
404 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
408 list_for_each_entry(obj,
409 &engine->batch_pool.cache_list[j],
412 seq_printf(m, "%s cache[%d]: %d objects\n",
413 engine->name, j, count);
415 list_for_each_entry(obj,
416 &engine->batch_pool.cache_list[j],
419 describe_obj(m, obj);
427 seq_printf(m, "total: %d\n", total);
429 mutex_unlock(&dev->struct_mutex);
434 static void gen8_display_interrupt_info(struct seq_file *m)
436 struct drm_i915_private *dev_priv = node_to_i915(m->private);
439 for_each_pipe(dev_priv, pipe) {
440 enum intel_display_power_domain power_domain;
441 intel_wakeref_t wakeref;
443 power_domain = POWER_DOMAIN_PIPE(pipe);
444 wakeref = intel_display_power_get_if_enabled(dev_priv,
447 seq_printf(m, "Pipe %c power disabled\n",
451 seq_printf(m, "Pipe %c IMR:\t%08x\n",
453 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
454 seq_printf(m, "Pipe %c IIR:\t%08x\n",
456 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
457 seq_printf(m, "Pipe %c IER:\t%08x\n",
459 I915_READ(GEN8_DE_PIPE_IER(pipe)));
461 intel_display_power_put(dev_priv, power_domain, wakeref);
464 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
465 I915_READ(GEN8_DE_PORT_IMR));
466 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
467 I915_READ(GEN8_DE_PORT_IIR));
468 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
469 I915_READ(GEN8_DE_PORT_IER));
471 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
472 I915_READ(GEN8_DE_MISC_IMR));
473 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
474 I915_READ(GEN8_DE_MISC_IIR));
475 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
476 I915_READ(GEN8_DE_MISC_IER));
478 seq_printf(m, "PCU interrupt mask:\t%08x\n",
479 I915_READ(GEN8_PCU_IMR));
480 seq_printf(m, "PCU interrupt identity:\t%08x\n",
481 I915_READ(GEN8_PCU_IIR));
482 seq_printf(m, "PCU interrupt enable:\t%08x\n",
483 I915_READ(GEN8_PCU_IER));
486 static int i915_interrupt_info(struct seq_file *m, void *data)
488 struct drm_i915_private *dev_priv = node_to_i915(m->private);
489 struct intel_engine_cs *engine;
490 enum intel_engine_id id;
491 intel_wakeref_t wakeref;
494 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
496 if (IS_CHERRYVIEW(dev_priv)) {
497 intel_wakeref_t pref;
499 seq_printf(m, "Master Interrupt Control:\t%08x\n",
500 I915_READ(GEN8_MASTER_IRQ));
502 seq_printf(m, "Display IER:\t%08x\n",
504 seq_printf(m, "Display IIR:\t%08x\n",
506 seq_printf(m, "Display IIR_RW:\t%08x\n",
507 I915_READ(VLV_IIR_RW));
508 seq_printf(m, "Display IMR:\t%08x\n",
510 for_each_pipe(dev_priv, pipe) {
511 enum intel_display_power_domain power_domain;
513 power_domain = POWER_DOMAIN_PIPE(pipe);
514 pref = intel_display_power_get_if_enabled(dev_priv,
517 seq_printf(m, "Pipe %c power disabled\n",
522 seq_printf(m, "Pipe %c stat:\t%08x\n",
524 I915_READ(PIPESTAT(pipe)));
526 intel_display_power_put(dev_priv, power_domain, pref);
529 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
530 seq_printf(m, "Port hotplug:\t%08x\n",
531 I915_READ(PORT_HOTPLUG_EN));
532 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
533 I915_READ(VLV_DPFLIPSTAT));
534 seq_printf(m, "DPINVGTT:\t%08x\n",
535 I915_READ(DPINVGTT));
536 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
538 for (i = 0; i < 4; i++) {
539 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
540 i, I915_READ(GEN8_GT_IMR(i)));
541 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
542 i, I915_READ(GEN8_GT_IIR(i)));
543 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
544 i, I915_READ(GEN8_GT_IER(i)));
547 seq_printf(m, "PCU interrupt mask:\t%08x\n",
548 I915_READ(GEN8_PCU_IMR));
549 seq_printf(m, "PCU interrupt identity:\t%08x\n",
550 I915_READ(GEN8_PCU_IIR));
551 seq_printf(m, "PCU interrupt enable:\t%08x\n",
552 I915_READ(GEN8_PCU_IER));
553 } else if (INTEL_GEN(dev_priv) >= 11) {
554 seq_printf(m, "Master Interrupt Control: %08x\n",
555 I915_READ(GEN11_GFX_MSTR_IRQ));
557 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
558 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
559 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
560 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
561 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
562 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
563 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
564 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
565 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
566 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
567 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
568 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
570 seq_printf(m, "Display Interrupt Control:\t%08x\n",
571 I915_READ(GEN11_DISPLAY_INT_CTL));
573 gen8_display_interrupt_info(m);
574 } else if (INTEL_GEN(dev_priv) >= 8) {
575 seq_printf(m, "Master Interrupt Control:\t%08x\n",
576 I915_READ(GEN8_MASTER_IRQ));
578 for (i = 0; i < 4; i++) {
579 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
580 i, I915_READ(GEN8_GT_IMR(i)));
581 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
582 i, I915_READ(GEN8_GT_IIR(i)));
583 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
584 i, I915_READ(GEN8_GT_IER(i)));
587 gen8_display_interrupt_info(m);
588 } else if (IS_VALLEYVIEW(dev_priv)) {
589 seq_printf(m, "Display IER:\t%08x\n",
591 seq_printf(m, "Display IIR:\t%08x\n",
593 seq_printf(m, "Display IIR_RW:\t%08x\n",
594 I915_READ(VLV_IIR_RW));
595 seq_printf(m, "Display IMR:\t%08x\n",
597 for_each_pipe(dev_priv, pipe) {
598 enum intel_display_power_domain power_domain;
599 intel_wakeref_t pref;
601 power_domain = POWER_DOMAIN_PIPE(pipe);
602 pref = intel_display_power_get_if_enabled(dev_priv,
605 seq_printf(m, "Pipe %c power disabled\n",
610 seq_printf(m, "Pipe %c stat:\t%08x\n",
612 I915_READ(PIPESTAT(pipe)));
613 intel_display_power_put(dev_priv, power_domain, pref);
616 seq_printf(m, "Master IER:\t%08x\n",
617 I915_READ(VLV_MASTER_IER));
619 seq_printf(m, "Render IER:\t%08x\n",
621 seq_printf(m, "Render IIR:\t%08x\n",
623 seq_printf(m, "Render IMR:\t%08x\n",
626 seq_printf(m, "PM IER:\t\t%08x\n",
627 I915_READ(GEN6_PMIER));
628 seq_printf(m, "PM IIR:\t\t%08x\n",
629 I915_READ(GEN6_PMIIR));
630 seq_printf(m, "PM IMR:\t\t%08x\n",
631 I915_READ(GEN6_PMIMR));
633 seq_printf(m, "Port hotplug:\t%08x\n",
634 I915_READ(PORT_HOTPLUG_EN));
635 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
636 I915_READ(VLV_DPFLIPSTAT));
637 seq_printf(m, "DPINVGTT:\t%08x\n",
638 I915_READ(DPINVGTT));
640 } else if (!HAS_PCH_SPLIT(dev_priv)) {
641 seq_printf(m, "Interrupt enable: %08x\n",
642 I915_READ(GEN2_IER));
643 seq_printf(m, "Interrupt identity: %08x\n",
644 I915_READ(GEN2_IIR));
645 seq_printf(m, "Interrupt mask: %08x\n",
646 I915_READ(GEN2_IMR));
647 for_each_pipe(dev_priv, pipe)
648 seq_printf(m, "Pipe %c stat: %08x\n",
650 I915_READ(PIPESTAT(pipe)));
652 seq_printf(m, "North Display Interrupt enable: %08x\n",
654 seq_printf(m, "North Display Interrupt identity: %08x\n",
656 seq_printf(m, "North Display Interrupt mask: %08x\n",
658 seq_printf(m, "South Display Interrupt enable: %08x\n",
660 seq_printf(m, "South Display Interrupt identity: %08x\n",
662 seq_printf(m, "South Display Interrupt mask: %08x\n",
664 seq_printf(m, "Graphics Interrupt enable: %08x\n",
666 seq_printf(m, "Graphics Interrupt identity: %08x\n",
668 seq_printf(m, "Graphics Interrupt mask: %08x\n",
672 if (INTEL_GEN(dev_priv) >= 11) {
673 seq_printf(m, "RCS Intr Mask:\t %08x\n",
674 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
675 seq_printf(m, "BCS Intr Mask:\t %08x\n",
676 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
677 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
678 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
679 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
680 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
681 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
682 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
683 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
684 I915_READ(GEN11_GUC_SG_INTR_MASK));
685 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
686 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
687 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
688 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
689 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
690 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
692 } else if (INTEL_GEN(dev_priv) >= 6) {
693 for_each_engine(engine, dev_priv, id) {
695 "Graphics Interrupt mask (%s): %08x\n",
696 engine->name, ENGINE_READ(engine, RING_IMR));
700 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
705 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
707 struct drm_i915_private *i915 = node_to_i915(m->private);
710 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
713 for (i = 0; i < i915->ggtt.num_fences; i++) {
714 struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
716 seq_printf(m, "Fence %d, pin count = %d, object = ",
717 i, i915->ggtt.fence_regs[i].pin_count);
719 seq_puts(m, "unused");
721 describe_obj(m, vma->obj);
729 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
730 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
731 size_t count, loff_t *pos)
733 struct i915_gpu_state *error;
737 error = file->private_data;
741 /* Bounce buffer required because of kernfs __user API convenience. */
742 buf = kmalloc(count, GFP_KERNEL);
746 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
750 if (!copy_to_user(ubuf, buf, ret))
760 static int gpu_state_release(struct inode *inode, struct file *file)
762 i915_gpu_state_put(file->private_data);
766 static int i915_gpu_info_open(struct inode *inode, struct file *file)
768 struct drm_i915_private *i915 = inode->i_private;
769 struct i915_gpu_state *gpu;
770 intel_wakeref_t wakeref;
773 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
774 gpu = i915_capture_gpu_state(i915);
778 file->private_data = gpu;
782 static const struct file_operations i915_gpu_info_fops = {
783 .owner = THIS_MODULE,
784 .open = i915_gpu_info_open,
785 .read = gpu_state_read,
786 .llseek = default_llseek,
787 .release = gpu_state_release,
791 i915_error_state_write(struct file *filp,
792 const char __user *ubuf,
796 struct i915_gpu_state *error = filp->private_data;
801 DRM_DEBUG_DRIVER("Resetting error state\n");
802 i915_reset_error_state(error->i915);
807 static int i915_error_state_open(struct inode *inode, struct file *file)
809 struct i915_gpu_state *error;
811 error = i915_first_error_state(inode->i_private);
813 return PTR_ERR(error);
815 file->private_data = error;
819 static const struct file_operations i915_error_state_fops = {
820 .owner = THIS_MODULE,
821 .open = i915_error_state_open,
822 .read = gpu_state_read,
823 .write = i915_error_state_write,
824 .llseek = default_llseek,
825 .release = gpu_state_release,
829 static int i915_frequency_info(struct seq_file *m, void *unused)
831 struct drm_i915_private *dev_priv = node_to_i915(m->private);
832 struct intel_uncore *uncore = &dev_priv->uncore;
833 struct intel_rps *rps = &dev_priv->gt_pm.rps;
834 intel_wakeref_t wakeref;
837 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
839 if (IS_GEN(dev_priv, 5)) {
840 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
841 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
843 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
844 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
845 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
847 seq_printf(m, "Current P-state: %d\n",
848 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
849 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
850 u32 rpmodectl, freq_sts;
852 rpmodectl = I915_READ(GEN6_RP_CONTROL);
853 seq_printf(m, "Video Turbo Mode: %s\n",
854 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
855 seq_printf(m, "HW control enabled: %s\n",
856 yesno(rpmodectl & GEN6_RP_ENABLE));
857 seq_printf(m, "SW control enabled: %s\n",
858 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
859 GEN6_RP_MEDIA_SW_MODE));
861 vlv_punit_get(dev_priv);
862 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
863 vlv_punit_put(dev_priv);
865 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
866 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
868 seq_printf(m, "actual GPU freq: %d MHz\n",
869 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
871 seq_printf(m, "current GPU freq: %d MHz\n",
872 intel_gpu_freq(dev_priv, rps->cur_freq));
874 seq_printf(m, "max GPU freq: %d MHz\n",
875 intel_gpu_freq(dev_priv, rps->max_freq));
877 seq_printf(m, "min GPU freq: %d MHz\n",
878 intel_gpu_freq(dev_priv, rps->min_freq));
880 seq_printf(m, "idle GPU freq: %d MHz\n",
881 intel_gpu_freq(dev_priv, rps->idle_freq));
884 "efficient (RPe) frequency: %d MHz\n",
885 intel_gpu_freq(dev_priv, rps->efficient_freq));
886 } else if (INTEL_GEN(dev_priv) >= 6) {
890 u32 rpmodectl, rpinclimit, rpdeclimit;
891 u32 rpstat, cagf, reqf;
892 u32 rpupei, rpcurup, rpprevup;
893 u32 rpdownei, rpcurdown, rpprevdown;
894 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
897 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
898 if (IS_GEN9_LP(dev_priv)) {
899 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
900 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
902 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
903 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
906 /* RPSTAT1 is in the GT power well */
907 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
909 reqf = I915_READ(GEN6_RPNSWREQ);
910 if (INTEL_GEN(dev_priv) >= 9)
913 reqf &= ~GEN6_TURBO_DISABLE;
914 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
919 reqf = intel_gpu_freq(dev_priv, reqf);
921 rpmodectl = I915_READ(GEN6_RP_CONTROL);
922 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
923 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
925 rpstat = I915_READ(GEN6_RPSTAT1);
926 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
927 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
928 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
929 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
930 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
931 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
932 cagf = intel_gpu_freq(dev_priv,
933 intel_get_cagf(dev_priv, rpstat));
935 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
937 if (INTEL_GEN(dev_priv) >= 11) {
938 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
939 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
941 * The equivalent to the PM ISR & IIR cannot be read
942 * without affecting the current state of the system
946 } else if (INTEL_GEN(dev_priv) >= 8) {
947 pm_ier = I915_READ(GEN8_GT_IER(2));
948 pm_imr = I915_READ(GEN8_GT_IMR(2));
949 pm_isr = I915_READ(GEN8_GT_ISR(2));
950 pm_iir = I915_READ(GEN8_GT_IIR(2));
952 pm_ier = I915_READ(GEN6_PMIER);
953 pm_imr = I915_READ(GEN6_PMIMR);
954 pm_isr = I915_READ(GEN6_PMISR);
955 pm_iir = I915_READ(GEN6_PMIIR);
957 pm_mask = I915_READ(GEN6_PMINTRMSK);
959 seq_printf(m, "Video Turbo Mode: %s\n",
960 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
961 seq_printf(m, "HW control enabled: %s\n",
962 yesno(rpmodectl & GEN6_RP_ENABLE));
963 seq_printf(m, "SW control enabled: %s\n",
964 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
965 GEN6_RP_MEDIA_SW_MODE));
967 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
968 pm_ier, pm_imr, pm_mask);
969 if (INTEL_GEN(dev_priv) <= 10)
970 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
972 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
973 rps->pm_intrmsk_mbz);
974 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
975 seq_printf(m, "Render p-state ratio: %d\n",
976 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
977 seq_printf(m, "Render p-state VID: %d\n",
978 gt_perf_status & 0xff);
979 seq_printf(m, "Render p-state limit: %d\n",
980 rp_state_limits & 0xff);
981 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
982 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
983 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
984 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
985 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
986 seq_printf(m, "CAGF: %dMHz\n", cagf);
987 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
988 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
989 seq_printf(m, "RP CUR UP: %d (%dus)\n",
990 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
991 seq_printf(m, "RP PREV UP: %d (%dus)\n",
992 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
993 seq_printf(m, "Up threshold: %d%%\n",
994 rps->power.up_threshold);
996 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
997 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
998 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
999 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1000 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1001 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1002 seq_printf(m, "Down threshold: %d%%\n",
1003 rps->power.down_threshold);
1005 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1006 rp_state_cap >> 16) & 0xff;
1007 max_freq *= (IS_GEN9_BC(dev_priv) ||
1008 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1009 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1010 intel_gpu_freq(dev_priv, max_freq));
1012 max_freq = (rp_state_cap & 0xff00) >> 8;
1013 max_freq *= (IS_GEN9_BC(dev_priv) ||
1014 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1015 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1016 intel_gpu_freq(dev_priv, max_freq));
1018 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1019 rp_state_cap >> 0) & 0xff;
1020 max_freq *= (IS_GEN9_BC(dev_priv) ||
1021 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1022 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1023 intel_gpu_freq(dev_priv, max_freq));
1024 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1025 intel_gpu_freq(dev_priv, rps->max_freq));
1027 seq_printf(m, "Current freq: %d MHz\n",
1028 intel_gpu_freq(dev_priv, rps->cur_freq));
1029 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1030 seq_printf(m, "Idle freq: %d MHz\n",
1031 intel_gpu_freq(dev_priv, rps->idle_freq));
1032 seq_printf(m, "Min freq: %d MHz\n",
1033 intel_gpu_freq(dev_priv, rps->min_freq));
1034 seq_printf(m, "Boost freq: %d MHz\n",
1035 intel_gpu_freq(dev_priv, rps->boost_freq));
1036 seq_printf(m, "Max freq: %d MHz\n",
1037 intel_gpu_freq(dev_priv, rps->max_freq));
1039 "efficient (RPe) frequency: %d MHz\n",
1040 intel_gpu_freq(dev_priv, rps->efficient_freq));
1042 seq_puts(m, "no P-state info available\n");
1045 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1046 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1047 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1049 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1053 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1055 struct intel_instdone *instdone)
1060 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1061 instdone->instdone);
1063 if (INTEL_GEN(dev_priv) <= 3)
1066 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1067 instdone->slice_common);
1069 if (INTEL_GEN(dev_priv) <= 6)
1072 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1073 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1074 slice, subslice, instdone->sampler[slice][subslice]);
1076 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1077 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1078 slice, subslice, instdone->row[slice][subslice]);
1081 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1083 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1084 struct intel_engine_cs *engine;
1085 u64 acthd[I915_NUM_ENGINES];
1086 struct intel_instdone instdone;
1087 intel_wakeref_t wakeref;
1088 enum intel_engine_id id;
1090 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1091 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1092 seq_puts(m, "\tWedged\n");
1093 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1094 seq_puts(m, "\tDevice (global) reset in progress\n");
1096 if (!i915_modparams.enable_hangcheck) {
1097 seq_puts(m, "Hangcheck disabled\n");
1101 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1102 for_each_engine(engine, dev_priv, id)
1103 acthd[id] = intel_engine_get_active_head(engine);
1105 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1108 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1109 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1110 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1112 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1113 seq_puts(m, "Hangcheck active, work pending\n");
1115 seq_puts(m, "Hangcheck inactive\n");
1117 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1119 for_each_engine(engine, dev_priv, id) {
1120 seq_printf(m, "%s: %d ms ago\n",
1122 jiffies_to_msecs(jiffies -
1123 engine->hangcheck.action_timestamp));
1125 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1126 (long long)engine->hangcheck.acthd,
1127 (long long)acthd[id]);
1129 if (engine->id == RCS0) {
1130 seq_puts(m, "\tinstdone read =\n");
1132 i915_instdone_info(dev_priv, m, &instdone);
1134 seq_puts(m, "\tinstdone accu =\n");
1136 i915_instdone_info(dev_priv, m,
1137 &engine->hangcheck.instdone);
1144 static int i915_reset_info(struct seq_file *m, void *unused)
1146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1147 struct i915_gpu_error *error = &dev_priv->gpu_error;
1148 struct intel_engine_cs *engine;
1149 enum intel_engine_id id;
1151 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1153 for_each_engine(engine, dev_priv, id) {
1154 seq_printf(m, "%s = %u\n", engine->name,
1155 i915_reset_engine_count(error, engine));
1161 static int ironlake_drpc_info(struct seq_file *m)
1163 struct drm_i915_private *i915 = node_to_i915(m->private);
1164 struct intel_uncore *uncore = &i915->uncore;
1165 u32 rgvmodectl, rstdbyctl;
1168 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1169 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1170 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1172 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1173 seq_printf(m, "Boost freq: %d\n",
1174 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1175 MEMMODE_BOOST_FREQ_SHIFT);
1176 seq_printf(m, "HW control enabled: %s\n",
1177 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1178 seq_printf(m, "SW control enabled: %s\n",
1179 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1180 seq_printf(m, "Gated voltage change: %s\n",
1181 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1182 seq_printf(m, "Starting frequency: P%d\n",
1183 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1184 seq_printf(m, "Max P-state: P%d\n",
1185 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1186 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1187 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1188 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1189 seq_printf(m, "Render standby enabled: %s\n",
1190 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1191 seq_puts(m, "Current RS state: ");
1192 switch (rstdbyctl & RSX_STATUS_MASK) {
1194 seq_puts(m, "on\n");
1196 case RSX_STATUS_RC1:
1197 seq_puts(m, "RC1\n");
1199 case RSX_STATUS_RC1E:
1200 seq_puts(m, "RC1E\n");
1202 case RSX_STATUS_RS1:
1203 seq_puts(m, "RS1\n");
1205 case RSX_STATUS_RS2:
1206 seq_puts(m, "RS2 (RC6)\n");
1208 case RSX_STATUS_RS3:
1209 seq_puts(m, "RC3 (RC6+)\n");
1212 seq_puts(m, "unknown\n");
1219 static int i915_forcewake_domains(struct seq_file *m, void *data)
1221 struct drm_i915_private *i915 = node_to_i915(m->private);
1222 struct intel_uncore *uncore = &i915->uncore;
1223 struct intel_uncore_forcewake_domain *fw_domain;
1226 seq_printf(m, "user.bypass_count = %u\n",
1227 uncore->user_forcewake.count);
1229 for_each_fw_domain(fw_domain, uncore, tmp)
1230 seq_printf(m, "%s.wake_count = %u\n",
1231 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1232 READ_ONCE(fw_domain->wake_count));
1237 static void print_rc6_res(struct seq_file *m,
1239 const i915_reg_t reg)
1241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1243 seq_printf(m, "%s %u (%llu us)\n",
1244 title, I915_READ(reg),
1245 intel_rc6_residency_us(dev_priv, reg));
1248 static int vlv_drpc_info(struct seq_file *m)
1250 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1251 u32 rcctl1, pw_status;
1253 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1254 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1256 seq_printf(m, "RC6 Enabled: %s\n",
1257 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1258 GEN6_RC_CTL_EI_MODE(1))));
1259 seq_printf(m, "Render Power Well: %s\n",
1260 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1261 seq_printf(m, "Media Power Well: %s\n",
1262 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1264 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1265 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1267 return i915_forcewake_domains(m, NULL);
1270 static int gen6_drpc_info(struct seq_file *m)
1272 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1273 u32 gt_core_status, rcctl1, rc6vids = 0;
1274 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1276 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1277 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1279 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1280 if (INTEL_GEN(dev_priv) >= 9) {
1281 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1282 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1285 if (INTEL_GEN(dev_priv) <= 7)
1286 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1289 seq_printf(m, "RC1e Enabled: %s\n",
1290 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1291 seq_printf(m, "RC6 Enabled: %s\n",
1292 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1293 if (INTEL_GEN(dev_priv) >= 9) {
1294 seq_printf(m, "Render Well Gating Enabled: %s\n",
1295 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1296 seq_printf(m, "Media Well Gating Enabled: %s\n",
1297 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1299 seq_printf(m, "Deep RC6 Enabled: %s\n",
1300 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1301 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1302 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1303 seq_puts(m, "Current RC state: ");
1304 switch (gt_core_status & GEN6_RCn_MASK) {
1306 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1307 seq_puts(m, "Core Power Down\n");
1309 seq_puts(m, "on\n");
1312 seq_puts(m, "RC3\n");
1315 seq_puts(m, "RC6\n");
1318 seq_puts(m, "RC7\n");
1321 seq_puts(m, "Unknown\n");
1325 seq_printf(m, "Core Power Down: %s\n",
1326 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1327 if (INTEL_GEN(dev_priv) >= 9) {
1328 seq_printf(m, "Render Power Well: %s\n",
1329 (gen9_powergate_status &
1330 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1331 seq_printf(m, "Media Power Well: %s\n",
1332 (gen9_powergate_status &
1333 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1336 /* Not exactly sure what this is */
1337 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1338 GEN6_GT_GFX_RC6_LOCKED);
1339 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1340 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1341 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1343 if (INTEL_GEN(dev_priv) <= 7) {
1344 seq_printf(m, "RC6 voltage: %dmV\n",
1345 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1346 seq_printf(m, "RC6+ voltage: %dmV\n",
1347 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1348 seq_printf(m, "RC6++ voltage: %dmV\n",
1349 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1352 return i915_forcewake_domains(m, NULL);
1355 static int i915_drpc_info(struct seq_file *m, void *unused)
1357 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1358 intel_wakeref_t wakeref;
1361 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1362 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1363 err = vlv_drpc_info(m);
1364 else if (INTEL_GEN(dev_priv) >= 6)
1365 err = gen6_drpc_info(m);
1367 err = ironlake_drpc_info(m);
1373 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1375 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1377 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1378 dev_priv->fb_tracking.busy_bits);
1380 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1381 dev_priv->fb_tracking.flip_bits);
1386 static int i915_fbc_status(struct seq_file *m, void *unused)
1388 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1389 struct intel_fbc *fbc = &dev_priv->fbc;
1390 intel_wakeref_t wakeref;
1392 if (!HAS_FBC(dev_priv))
1395 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1396 mutex_lock(&fbc->lock);
1398 if (intel_fbc_is_active(dev_priv))
1399 seq_puts(m, "FBC enabled\n");
1401 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1403 if (intel_fbc_is_active(dev_priv)) {
1406 if (INTEL_GEN(dev_priv) >= 8)
1407 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1408 else if (INTEL_GEN(dev_priv) >= 7)
1409 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1410 else if (INTEL_GEN(dev_priv) >= 5)
1411 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1412 else if (IS_G4X(dev_priv))
1413 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1415 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1416 FBC_STAT_COMPRESSED);
1418 seq_printf(m, "Compressing: %s\n", yesno(mask));
1421 mutex_unlock(&fbc->lock);
1422 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1427 static int i915_fbc_false_color_get(void *data, u64 *val)
1429 struct drm_i915_private *dev_priv = data;
1431 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1434 *val = dev_priv->fbc.false_color;
1439 static int i915_fbc_false_color_set(void *data, u64 val)
1441 struct drm_i915_private *dev_priv = data;
1444 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1447 mutex_lock(&dev_priv->fbc.lock);
1449 reg = I915_READ(ILK_DPFC_CONTROL);
1450 dev_priv->fbc.false_color = val;
1452 I915_WRITE(ILK_DPFC_CONTROL, val ?
1453 (reg | FBC_CTL_FALSE_COLOR) :
1454 (reg & ~FBC_CTL_FALSE_COLOR));
1456 mutex_unlock(&dev_priv->fbc.lock);
1460 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1461 i915_fbc_false_color_get, i915_fbc_false_color_set,
1464 static int i915_ips_status(struct seq_file *m, void *unused)
1466 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467 intel_wakeref_t wakeref;
1469 if (!HAS_IPS(dev_priv))
1472 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1474 seq_printf(m, "Enabled by kernel parameter: %s\n",
1475 yesno(i915_modparams.enable_ips));
1477 if (INTEL_GEN(dev_priv) >= 8) {
1478 seq_puts(m, "Currently: unknown\n");
1480 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1481 seq_puts(m, "Currently: enabled\n");
1483 seq_puts(m, "Currently: disabled\n");
1486 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1491 static int i915_sr_status(struct seq_file *m, void *unused)
1493 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1494 intel_wakeref_t wakeref;
1495 bool sr_enabled = false;
1497 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1499 if (INTEL_GEN(dev_priv) >= 9)
1500 /* no global SR status; inspect per-plane WM */;
1501 else if (HAS_PCH_SPLIT(dev_priv))
1502 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1503 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1504 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1505 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1506 else if (IS_I915GM(dev_priv))
1507 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1508 else if (IS_PINEVIEW(dev_priv))
1509 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1510 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1511 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1513 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1515 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1520 static int i915_emon_status(struct seq_file *m, void *unused)
1522 struct drm_i915_private *i915 = node_to_i915(m->private);
1523 intel_wakeref_t wakeref;
1525 if (!IS_GEN(i915, 5))
1528 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1529 unsigned long temp, chipset, gfx;
1531 temp = i915_mch_val(i915);
1532 chipset = i915_chipset_val(i915);
1533 gfx = i915_gfx_val(i915);
1535 seq_printf(m, "GMCH temp: %ld\n", temp);
1536 seq_printf(m, "Chipset power: %ld\n", chipset);
1537 seq_printf(m, "GFX power: %ld\n", gfx);
1538 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1544 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1546 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1547 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1548 unsigned int max_gpu_freq, min_gpu_freq;
1549 intel_wakeref_t wakeref;
1550 int gpu_freq, ia_freq;
1552 if (!HAS_LLC(dev_priv))
1555 min_gpu_freq = rps->min_freq;
1556 max_gpu_freq = rps->max_freq;
1557 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1558 /* Convert GT frequency to 50 HZ units */
1559 min_gpu_freq /= GEN9_FREQ_SCALER;
1560 max_gpu_freq /= GEN9_FREQ_SCALER;
1563 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1565 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1566 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1568 sandybridge_pcode_read(dev_priv,
1569 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1571 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1572 intel_gpu_freq(dev_priv, (gpu_freq *
1573 (IS_GEN9_BC(dev_priv) ||
1574 INTEL_GEN(dev_priv) >= 10 ?
1575 GEN9_FREQ_SCALER : 1))),
1576 ((ia_freq >> 0) & 0xff) * 100,
1577 ((ia_freq >> 8) & 0xff) * 100);
1579 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1584 static int i915_opregion(struct seq_file *m, void *unused)
1586 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1587 struct drm_device *dev = &dev_priv->drm;
1588 struct intel_opregion *opregion = &dev_priv->opregion;
1591 ret = mutex_lock_interruptible(&dev->struct_mutex);
1595 if (opregion->header)
1596 seq_write(m, opregion->header, OPREGION_SIZE);
1598 mutex_unlock(&dev->struct_mutex);
1604 static int i915_vbt(struct seq_file *m, void *unused)
1606 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1609 seq_write(m, opregion->vbt, opregion->vbt_size);
1614 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1616 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1617 struct drm_device *dev = &dev_priv->drm;
1618 struct intel_framebuffer *fbdev_fb = NULL;
1619 struct drm_framebuffer *drm_fb;
1622 ret = mutex_lock_interruptible(&dev->struct_mutex);
1626 #ifdef CONFIG_DRM_FBDEV_EMULATION
1627 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1628 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1630 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1631 fbdev_fb->base.width,
1632 fbdev_fb->base.height,
1633 fbdev_fb->base.format->depth,
1634 fbdev_fb->base.format->cpp[0] * 8,
1635 fbdev_fb->base.modifier,
1636 drm_framebuffer_read_refcount(&fbdev_fb->base));
1637 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1642 mutex_lock(&dev->mode_config.fb_lock);
1643 drm_for_each_fb(drm_fb, dev) {
1644 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1648 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1651 fb->base.format->depth,
1652 fb->base.format->cpp[0] * 8,
1654 drm_framebuffer_read_refcount(&fb->base));
1655 describe_obj(m, intel_fb_obj(&fb->base));
1658 mutex_unlock(&dev->mode_config.fb_lock);
1659 mutex_unlock(&dev->struct_mutex);
1664 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1666 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1667 ring->space, ring->head, ring->tail, ring->emit);
1670 static int i915_context_status(struct seq_file *m, void *unused)
1672 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1673 struct drm_device *dev = &dev_priv->drm;
1674 struct i915_gem_context *ctx;
1677 ret = mutex_lock_interruptible(&dev->struct_mutex);
1681 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1682 struct i915_gem_engines_iter it;
1683 struct intel_context *ce;
1685 seq_puts(m, "HW context ");
1686 if (!list_empty(&ctx->hw_id_link))
1687 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1688 atomic_read(&ctx->hw_id_pin_count));
1690 struct task_struct *task;
1692 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1694 seq_printf(m, "(%s [%d]) ",
1695 task->comm, task->pid);
1696 put_task_struct(task);
1698 } else if (IS_ERR(ctx->file_priv)) {
1699 seq_puts(m, "(deleted) ");
1701 seq_puts(m, "(kernel) ");
1704 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1707 for_each_gem_engine(ce,
1708 i915_gem_context_lock_engines(ctx), it) {
1709 seq_printf(m, "%s: ", ce->engine->name);
1711 describe_obj(m, ce->state->obj);
1713 describe_ctx_ring(m, ce->ring);
1716 i915_gem_context_unlock_engines(ctx);
1721 mutex_unlock(&dev->struct_mutex);
1726 static const char *swizzle_string(unsigned swizzle)
1729 case I915_BIT_6_SWIZZLE_NONE:
1731 case I915_BIT_6_SWIZZLE_9:
1733 case I915_BIT_6_SWIZZLE_9_10:
1734 return "bit9/bit10";
1735 case I915_BIT_6_SWIZZLE_9_11:
1736 return "bit9/bit11";
1737 case I915_BIT_6_SWIZZLE_9_10_11:
1738 return "bit9/bit10/bit11";
1739 case I915_BIT_6_SWIZZLE_9_17:
1740 return "bit9/bit17";
1741 case I915_BIT_6_SWIZZLE_9_10_17:
1742 return "bit9/bit10/bit17";
1743 case I915_BIT_6_SWIZZLE_UNKNOWN:
1750 static int i915_swizzle_info(struct seq_file *m, void *data)
1752 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1753 struct intel_uncore *uncore = &dev_priv->uncore;
1754 intel_wakeref_t wakeref;
1756 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1758 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1759 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1760 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1761 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1763 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1764 seq_printf(m, "DDC = 0x%08x\n",
1765 intel_uncore_read(uncore, DCC));
1766 seq_printf(m, "DDC2 = 0x%08x\n",
1767 intel_uncore_read(uncore, DCC2));
1768 seq_printf(m, "C0DRB3 = 0x%04x\n",
1769 intel_uncore_read16(uncore, C0DRB3));
1770 seq_printf(m, "C1DRB3 = 0x%04x\n",
1771 intel_uncore_read16(uncore, C1DRB3));
1772 } else if (INTEL_GEN(dev_priv) >= 6) {
1773 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1774 intel_uncore_read(uncore, MAD_DIMM_C0));
1775 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1776 intel_uncore_read(uncore, MAD_DIMM_C1));
1777 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1778 intel_uncore_read(uncore, MAD_DIMM_C2));
1779 seq_printf(m, "TILECTL = 0x%08x\n",
1780 intel_uncore_read(uncore, TILECTL));
1781 if (INTEL_GEN(dev_priv) >= 8)
1782 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1783 intel_uncore_read(uncore, GAMTARBMODE));
1785 seq_printf(m, "ARB_MODE = 0x%08x\n",
1786 intel_uncore_read(uncore, ARB_MODE));
1787 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1788 intel_uncore_read(uncore, DISP_ARB_CTL));
1791 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1792 seq_puts(m, "L-shaped memory detected\n");
1794 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1799 static const char *rps_power_to_str(unsigned int power)
1801 static const char * const strings[] = {
1802 [LOW_POWER] = "low power",
1803 [BETWEEN] = "mixed",
1804 [HIGH_POWER] = "high power",
1807 if (power >= ARRAY_SIZE(strings) || !strings[power])
1810 return strings[power];
1813 static int i915_rps_boost_info(struct seq_file *m, void *data)
1815 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1816 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1817 u32 act_freq = rps->cur_freq;
1818 intel_wakeref_t wakeref;
1820 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1821 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1822 vlv_punit_get(dev_priv);
1823 act_freq = vlv_punit_read(dev_priv,
1824 PUNIT_REG_GPU_FREQ_STS);
1825 vlv_punit_put(dev_priv);
1826 act_freq = (act_freq >> 8) & 0xff;
1828 act_freq = intel_get_cagf(dev_priv,
1829 I915_READ(GEN6_RPSTAT1));
1833 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1834 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1835 seq_printf(m, "Boosts outstanding? %d\n",
1836 atomic_read(&rps->num_waiters));
1837 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1838 seq_printf(m, "Frequency requested %d, actual %d\n",
1839 intel_gpu_freq(dev_priv, rps->cur_freq),
1840 intel_gpu_freq(dev_priv, act_freq));
1841 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1842 intel_gpu_freq(dev_priv, rps->min_freq),
1843 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1844 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1845 intel_gpu_freq(dev_priv, rps->max_freq));
1846 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1847 intel_gpu_freq(dev_priv, rps->idle_freq),
1848 intel_gpu_freq(dev_priv, rps->efficient_freq),
1849 intel_gpu_freq(dev_priv, rps->boost_freq));
1851 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1853 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1855 u32 rpdown, rpdownei;
1857 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1858 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1859 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1860 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1861 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1862 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1864 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1865 rps_power_to_str(rps->power.mode));
1866 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1867 rpup && rpupei ? 100 * rpup / rpupei : 0,
1868 rps->power.up_threshold);
1869 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1870 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1871 rps->power.down_threshold);
1873 seq_puts(m, "\nRPS Autotuning inactive\n");
1879 static int i915_llc(struct seq_file *m, void *data)
1881 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1882 const bool edram = INTEL_GEN(dev_priv) > 8;
1884 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1885 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1886 dev_priv->edram_size_mb);
1891 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1893 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1894 intel_wakeref_t wakeref;
1895 struct drm_printer p;
1897 if (!HAS_HUC(dev_priv))
1900 p = drm_seq_file_printer(m);
1901 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
1903 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1904 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1909 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1911 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1912 intel_wakeref_t wakeref;
1913 struct drm_printer p;
1915 if (!HAS_GUC(dev_priv))
1918 p = drm_seq_file_printer(m);
1919 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
1921 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1922 u32 tmp = I915_READ(GUC_STATUS);
1925 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1926 seq_printf(m, "\tBootrom status = 0x%x\n",
1927 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1928 seq_printf(m, "\tuKernel status = 0x%x\n",
1929 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1930 seq_printf(m, "\tMIA Core status = 0x%x\n",
1931 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1932 seq_puts(m, "\nScratch registers:\n");
1933 for (i = 0; i < 16; i++) {
1934 seq_printf(m, "\t%2d: \t0x%x\n",
1935 i, I915_READ(SOFT_SCRATCH(i)));
1943 stringify_guc_log_type(enum guc_log_buffer_type type)
1946 case GUC_ISR_LOG_BUFFER:
1948 case GUC_DPC_LOG_BUFFER:
1950 case GUC_CRASH_DUMP_LOG_BUFFER:
1959 static void i915_guc_log_info(struct seq_file *m,
1960 struct drm_i915_private *dev_priv)
1962 struct intel_guc_log *log = &dev_priv->guc.log;
1963 enum guc_log_buffer_type type;
1965 if (!intel_guc_log_relay_enabled(log)) {
1966 seq_puts(m, "GuC log relay disabled\n");
1970 seq_puts(m, "GuC logging stats:\n");
1972 seq_printf(m, "\tRelay full count: %u\n",
1973 log->relay.full_count);
1975 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1976 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1977 stringify_guc_log_type(type),
1978 log->stats[type].flush,
1979 log->stats[type].sampled_overflow);
1983 static void i915_guc_client_info(struct seq_file *m,
1984 struct drm_i915_private *dev_priv,
1985 struct intel_guc_client *client)
1987 struct intel_engine_cs *engine;
1988 enum intel_engine_id id;
1991 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1992 client->priority, client->stage_id, client->proc_desc_offset);
1993 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1994 client->doorbell_id, client->doorbell_offset);
1996 for_each_engine(engine, dev_priv, id) {
1997 u64 submissions = client->submissions[id];
1999 seq_printf(m, "\tSubmissions: %llu %s\n",
2000 submissions, engine->name);
2002 seq_printf(m, "\tTotal: %llu\n", tot);
2005 static int i915_guc_info(struct seq_file *m, void *data)
2007 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2008 const struct intel_guc *guc = &dev_priv->guc;
2010 if (!USES_GUC(dev_priv))
2013 i915_guc_log_info(m, dev_priv);
2015 if (!USES_GUC_SUBMISSION(dev_priv))
2018 GEM_BUG_ON(!guc->execbuf_client);
2020 seq_printf(m, "\nDoorbell map:\n");
2021 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2022 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2024 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2025 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2026 if (guc->preempt_client) {
2027 seq_printf(m, "\nGuC preempt client @ %p:\n",
2028 guc->preempt_client);
2029 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2032 /* Add more as required ... */
2037 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2039 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2040 const struct intel_guc *guc = &dev_priv->guc;
2041 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2042 struct intel_guc_client *client = guc->execbuf_client;
2043 intel_engine_mask_t tmp;
2046 if (!USES_GUC_SUBMISSION(dev_priv))
2049 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2050 struct intel_engine_cs *engine;
2052 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2055 seq_printf(m, "GuC stage descriptor %u:\n", index);
2056 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2057 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2058 seq_printf(m, "\tPriority: %d\n", desc->priority);
2059 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2060 seq_printf(m, "\tEngines used: 0x%x\n",
2061 desc->engines_used);
2062 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2063 desc->db_trigger_phy,
2064 desc->db_trigger_cpu,
2065 desc->db_trigger_uk);
2066 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2067 desc->process_desc);
2068 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2069 desc->wq_addr, desc->wq_size);
2072 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2073 u32 guc_engine_id = engine->guc_id;
2074 struct guc_execlist_context *lrc =
2075 &desc->lrc[guc_engine_id];
2077 seq_printf(m, "\t%s LRC:\n", engine->name);
2078 seq_printf(m, "\t\tContext desc: 0x%x\n",
2080 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2081 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2082 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2083 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2091 static int i915_guc_log_dump(struct seq_file *m, void *data)
2093 struct drm_info_node *node = m->private;
2094 struct drm_i915_private *dev_priv = node_to_i915(node);
2095 bool dump_load_err = !!node->info_ent->data;
2096 struct drm_i915_gem_object *obj = NULL;
2100 if (!HAS_GUC(dev_priv))
2104 obj = dev_priv->guc.load_err_log;
2105 else if (dev_priv->guc.log.vma)
2106 obj = dev_priv->guc.log.vma->obj;
2111 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2113 DRM_DEBUG("Failed to pin object\n");
2114 seq_puts(m, "(log data unaccessible)\n");
2115 return PTR_ERR(log);
2118 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2119 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2120 *(log + i), *(log + i + 1),
2121 *(log + i + 2), *(log + i + 3));
2125 i915_gem_object_unpin_map(obj);
2130 static int i915_guc_log_level_get(void *data, u64 *val)
2132 struct drm_i915_private *dev_priv = data;
2134 if (!USES_GUC(dev_priv))
2137 *val = intel_guc_log_get_level(&dev_priv->guc.log);
2142 static int i915_guc_log_level_set(void *data, u64 val)
2144 struct drm_i915_private *dev_priv = data;
2146 if (!USES_GUC(dev_priv))
2149 return intel_guc_log_set_level(&dev_priv->guc.log, val);
2152 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2153 i915_guc_log_level_get, i915_guc_log_level_set,
2156 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2158 struct drm_i915_private *dev_priv = inode->i_private;
2160 if (!USES_GUC(dev_priv))
2163 file->private_data = &dev_priv->guc.log;
2165 return intel_guc_log_relay_open(&dev_priv->guc.log);
2169 i915_guc_log_relay_write(struct file *filp,
2170 const char __user *ubuf,
2174 struct intel_guc_log *log = filp->private_data;
2176 intel_guc_log_relay_flush(log);
2181 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2183 struct drm_i915_private *dev_priv = inode->i_private;
2185 intel_guc_log_relay_close(&dev_priv->guc.log);
2190 static const struct file_operations i915_guc_log_relay_fops = {
2191 .owner = THIS_MODULE,
2192 .open = i915_guc_log_relay_open,
2193 .write = i915_guc_log_relay_write,
2194 .release = i915_guc_log_relay_release,
2197 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2200 static const char * const sink_status[] = {
2202 "transition to active, capture and display",
2203 "active, display from RFB",
2204 "active, capture and display on sink device timings",
2205 "transition to inactive, capture and display, timing re-sync",
2208 "sink internal error",
2210 struct drm_connector *connector = m->private;
2211 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2212 struct intel_dp *intel_dp =
2213 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2216 if (!CAN_PSR(dev_priv)) {
2217 seq_puts(m, "PSR Unsupported\n");
2221 if (connector->status != connector_status_connected)
2224 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2227 const char *str = "unknown";
2229 val &= DP_PSR_SINK_STATE_MASK;
2230 if (val < ARRAY_SIZE(sink_status))
2231 str = sink_status[val];
2232 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2239 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2242 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2244 u32 val, status_val;
2245 const char *status = "unknown";
2247 if (dev_priv->psr.psr2_enabled) {
2248 static const char * const live_status[] = {
2261 val = I915_READ(EDP_PSR2_STATUS);
2262 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2263 EDP_PSR2_STATUS_STATE_SHIFT;
2264 if (status_val < ARRAY_SIZE(live_status))
2265 status = live_status[status_val];
2267 static const char * const live_status[] = {
2277 val = I915_READ(EDP_PSR_STATUS);
2278 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2279 EDP_PSR_STATUS_STATE_SHIFT;
2280 if (status_val < ARRAY_SIZE(live_status))
2281 status = live_status[status_val];
2284 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2287 static int i915_edp_psr_status(struct seq_file *m, void *data)
2289 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290 struct i915_psr *psr = &dev_priv->psr;
2291 intel_wakeref_t wakeref;
2296 if (!HAS_PSR(dev_priv))
2299 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2301 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2304 if (!psr->sink_support)
2307 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2308 mutex_lock(&psr->lock);
2311 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2313 status = "disabled";
2314 seq_printf(m, "PSR mode: %s\n", status);
2319 if (psr->psr2_enabled) {
2320 val = I915_READ(EDP_PSR2_CTL);
2321 enabled = val & EDP_PSR2_ENABLE;
2323 val = I915_READ(EDP_PSR_CTL);
2324 enabled = val & EDP_PSR_ENABLE;
2326 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2327 enableddisabled(enabled), val);
2328 psr_source_status(dev_priv, m);
2329 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2330 psr->busy_frontbuffer_bits);
2333 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2335 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2336 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2337 seq_printf(m, "Performance counter: %u\n", val);
2340 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2341 seq_printf(m, "Last attempted entry at: %lld\n",
2342 psr->last_entry_attempt);
2343 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2346 if (psr->psr2_enabled) {
2347 u32 su_frames_val[3];
2351 * Reading all 3 registers before hand to minimize crossing a
2352 * frame boundary between register reads
2354 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2355 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2357 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2359 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2362 su_blocks = su_frames_val[frame / 3] &
2363 PSR2_SU_STATUS_MASK(frame);
2364 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2365 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2370 mutex_unlock(&psr->lock);
2371 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2377 i915_edp_psr_debug_set(void *data, u64 val)
2379 struct drm_i915_private *dev_priv = data;
2380 intel_wakeref_t wakeref;
2383 if (!CAN_PSR(dev_priv))
2386 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2388 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2390 ret = intel_psr_debug_set(dev_priv, val);
2392 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2398 i915_edp_psr_debug_get(void *data, u64 *val)
2400 struct drm_i915_private *dev_priv = data;
2402 if (!CAN_PSR(dev_priv))
2405 *val = READ_ONCE(dev_priv->psr.debug);
2409 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2410 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2413 static int i915_energy_uJ(struct seq_file *m, void *data)
2415 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2416 unsigned long long power;
2417 intel_wakeref_t wakeref;
2420 if (INTEL_GEN(dev_priv) < 6)
2423 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2426 units = (power & 0x1f00) >> 8;
2427 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2428 power = I915_READ(MCH_SECP_NRG_STTS);
2430 power = (1000000 * power) >> units; /* convert to uJ */
2431 seq_printf(m, "%llu", power);
2436 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2439 struct pci_dev *pdev = dev_priv->drm.pdev;
2441 if (!HAS_RUNTIME_PM(dev_priv))
2442 seq_puts(m, "Runtime power management not supported\n");
2444 seq_printf(m, "Runtime power status: %s\n",
2445 enableddisabled(!dev_priv->power_domains.wakeref));
2447 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2448 seq_printf(m, "IRQs disabled: %s\n",
2449 yesno(!intel_irqs_enabled(dev_priv)));
2451 seq_printf(m, "Usage count: %d\n",
2452 atomic_read(&dev_priv->drm.dev->power.usage_count));
2454 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2456 seq_printf(m, "PCI device power state: %s [%d]\n",
2457 pci_power_name(pdev->current_state),
2458 pdev->current_state);
2460 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2461 struct drm_printer p = drm_seq_file_printer(m);
2463 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2469 static int i915_power_domain_info(struct seq_file *m, void *unused)
2471 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2472 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2475 mutex_lock(&power_domains->lock);
2477 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2478 for (i = 0; i < power_domains->power_well_count; i++) {
2479 struct i915_power_well *power_well;
2480 enum intel_display_power_domain power_domain;
2482 power_well = &power_domains->power_wells[i];
2483 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2486 for_each_power_domain(power_domain, power_well->desc->domains)
2487 seq_printf(m, " %-23s %d\n",
2488 intel_display_power_domain_str(power_domain),
2489 power_domains->domain_use_count[power_domain]);
2492 mutex_unlock(&power_domains->lock);
2497 static int i915_dmc_info(struct seq_file *m, void *unused)
2499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2500 intel_wakeref_t wakeref;
2501 struct intel_csr *csr;
2503 if (!HAS_CSR(dev_priv))
2506 csr = &dev_priv->csr;
2508 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2510 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2511 seq_printf(m, "path: %s\n", csr->fw_path);
2513 if (!csr->dmc_payload)
2516 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2517 CSR_VERSION_MINOR(csr->version));
2519 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2522 seq_printf(m, "DC3 -> DC5 count: %d\n",
2523 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2524 SKL_CSR_DC3_DC5_COUNT));
2525 if (!IS_GEN9_LP(dev_priv))
2526 seq_printf(m, "DC5 -> DC6 count: %d\n",
2527 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2530 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2531 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2532 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2534 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2539 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2540 struct drm_display_mode *mode)
2544 for (i = 0; i < tabs; i++)
2547 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2550 static void intel_encoder_info(struct seq_file *m,
2551 struct intel_crtc *intel_crtc,
2552 struct intel_encoder *intel_encoder)
2554 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2555 struct drm_device *dev = &dev_priv->drm;
2556 struct drm_crtc *crtc = &intel_crtc->base;
2557 struct intel_connector *intel_connector;
2558 struct drm_encoder *encoder;
2560 encoder = &intel_encoder->base;
2561 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2562 encoder->base.id, encoder->name);
2563 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2564 struct drm_connector *connector = &intel_connector->base;
2565 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2568 drm_get_connector_status_name(connector->status));
2569 if (connector->status == connector_status_connected) {
2570 struct drm_display_mode *mode = &crtc->mode;
2571 seq_printf(m, ", mode:\n");
2572 intel_seq_print_mode(m, 2, mode);
2579 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2582 struct drm_device *dev = &dev_priv->drm;
2583 struct drm_crtc *crtc = &intel_crtc->base;
2584 struct intel_encoder *intel_encoder;
2585 struct drm_plane_state *plane_state = crtc->primary->state;
2586 struct drm_framebuffer *fb = plane_state->fb;
2589 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2590 fb->base.id, plane_state->src_x >> 16,
2591 plane_state->src_y >> 16, fb->width, fb->height);
2593 seq_puts(m, "\tprimary plane disabled\n");
2594 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2595 intel_encoder_info(m, intel_crtc, intel_encoder);
2598 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2600 struct drm_display_mode *mode = panel->fixed_mode;
2602 seq_printf(m, "\tfixed mode:\n");
2603 intel_seq_print_mode(m, 2, mode);
2606 static void intel_dp_info(struct seq_file *m,
2607 struct intel_connector *intel_connector)
2609 struct intel_encoder *intel_encoder = intel_connector->encoder;
2610 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2612 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2613 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2614 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2615 intel_panel_info(m, &intel_connector->panel);
2617 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2621 static void intel_dp_mst_info(struct seq_file *m,
2622 struct intel_connector *intel_connector)
2624 struct intel_encoder *intel_encoder = intel_connector->encoder;
2625 struct intel_dp_mst_encoder *intel_mst =
2626 enc_to_mst(&intel_encoder->base);
2627 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2628 struct intel_dp *intel_dp = &intel_dig_port->dp;
2629 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2630 intel_connector->port);
2632 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2635 static void intel_hdmi_info(struct seq_file *m,
2636 struct intel_connector *intel_connector)
2638 struct intel_encoder *intel_encoder = intel_connector->encoder;
2639 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2641 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2644 static void intel_lvds_info(struct seq_file *m,
2645 struct intel_connector *intel_connector)
2647 intel_panel_info(m, &intel_connector->panel);
2650 static void intel_connector_info(struct seq_file *m,
2651 struct drm_connector *connector)
2653 struct intel_connector *intel_connector = to_intel_connector(connector);
2654 struct intel_encoder *intel_encoder = intel_connector->encoder;
2655 struct drm_display_mode *mode;
2657 seq_printf(m, "connector %d: type %s, status: %s\n",
2658 connector->base.id, connector->name,
2659 drm_get_connector_status_name(connector->status));
2661 if (connector->status == connector_status_disconnected)
2664 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2665 connector->display_info.width_mm,
2666 connector->display_info.height_mm);
2667 seq_printf(m, "\tsubpixel order: %s\n",
2668 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2669 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2674 switch (connector->connector_type) {
2675 case DRM_MODE_CONNECTOR_DisplayPort:
2676 case DRM_MODE_CONNECTOR_eDP:
2677 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2678 intel_dp_mst_info(m, intel_connector);
2680 intel_dp_info(m, intel_connector);
2682 case DRM_MODE_CONNECTOR_LVDS:
2683 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2684 intel_lvds_info(m, intel_connector);
2686 case DRM_MODE_CONNECTOR_HDMIA:
2687 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2688 intel_encoder->type == INTEL_OUTPUT_DDI)
2689 intel_hdmi_info(m, intel_connector);
2695 seq_printf(m, "\tmodes:\n");
2696 list_for_each_entry(mode, &connector->modes, head)
2697 intel_seq_print_mode(m, 2, mode);
2700 static const char *plane_type(enum drm_plane_type type)
2703 case DRM_PLANE_TYPE_OVERLAY:
2705 case DRM_PLANE_TYPE_PRIMARY:
2707 case DRM_PLANE_TYPE_CURSOR:
2710 * Deliberately omitting default: to generate compiler warnings
2711 * when a new drm_plane_type gets added.
2718 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2721 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2722 * will print them all to visualize if the values are misused
2724 snprintf(buf, bufsize,
2725 "%s%s%s%s%s%s(0x%08x)",
2726 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2727 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2728 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2729 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2730 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2731 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2735 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2737 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2738 struct drm_device *dev = &dev_priv->drm;
2739 struct intel_plane *intel_plane;
2741 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2742 struct drm_plane_state *state;
2743 struct drm_plane *plane = &intel_plane->base;
2744 struct drm_format_name_buf format_name;
2747 if (!plane->state) {
2748 seq_puts(m, "plane->state is NULL!\n");
2752 state = plane->state;
2755 drm_get_format_name(state->fb->format->format,
2758 sprintf(format_name.str, "N/A");
2761 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2763 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2765 plane_type(intel_plane->base.type),
2766 state->crtc_x, state->crtc_y,
2767 state->crtc_w, state->crtc_h,
2768 (state->src_x >> 16),
2769 ((state->src_x & 0xffff) * 15625) >> 10,
2770 (state->src_y >> 16),
2771 ((state->src_y & 0xffff) * 15625) >> 10,
2772 (state->src_w >> 16),
2773 ((state->src_w & 0xffff) * 15625) >> 10,
2774 (state->src_h >> 16),
2775 ((state->src_h & 0xffff) * 15625) >> 10,
2781 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2783 struct intel_crtc_state *pipe_config;
2784 int num_scalers = intel_crtc->num_scalers;
2787 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2789 /* Not all platformas have a scaler */
2791 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2793 pipe_config->scaler_state.scaler_users,
2794 pipe_config->scaler_state.scaler_id);
2796 for (i = 0; i < num_scalers; i++) {
2797 struct intel_scaler *sc =
2798 &pipe_config->scaler_state.scalers[i];
2800 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2801 i, yesno(sc->in_use), sc->mode);
2805 seq_puts(m, "\tNo scalers available on this platform\n");
2809 static int i915_display_info(struct seq_file *m, void *unused)
2811 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2812 struct drm_device *dev = &dev_priv->drm;
2813 struct intel_crtc *crtc;
2814 struct drm_connector *connector;
2815 struct drm_connector_list_iter conn_iter;
2816 intel_wakeref_t wakeref;
2818 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2820 seq_printf(m, "CRTC info\n");
2821 seq_printf(m, "---------\n");
2822 for_each_intel_crtc(dev, crtc) {
2823 struct intel_crtc_state *pipe_config;
2825 drm_modeset_lock(&crtc->base.mutex, NULL);
2826 pipe_config = to_intel_crtc_state(crtc->base.state);
2828 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2829 crtc->base.base.id, pipe_name(crtc->pipe),
2830 yesno(pipe_config->base.active),
2831 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2832 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2834 if (pipe_config->base.active) {
2835 struct intel_plane *cursor =
2836 to_intel_plane(crtc->base.cursor);
2838 intel_crtc_info(m, crtc);
2840 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2841 yesno(cursor->base.state->visible),
2842 cursor->base.state->crtc_x,
2843 cursor->base.state->crtc_y,
2844 cursor->base.state->crtc_w,
2845 cursor->base.state->crtc_h,
2846 cursor->cursor.base);
2847 intel_scaler_info(m, crtc);
2848 intel_plane_info(m, crtc);
2851 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2852 yesno(!crtc->cpu_fifo_underrun_disabled),
2853 yesno(!crtc->pch_fifo_underrun_disabled));
2854 drm_modeset_unlock(&crtc->base.mutex);
2857 seq_printf(m, "\n");
2858 seq_printf(m, "Connector info\n");
2859 seq_printf(m, "--------------\n");
2860 mutex_lock(&dev->mode_config.mutex);
2861 drm_connector_list_iter_begin(dev, &conn_iter);
2862 drm_for_each_connector_iter(connector, &conn_iter)
2863 intel_connector_info(m, connector);
2864 drm_connector_list_iter_end(&conn_iter);
2865 mutex_unlock(&dev->mode_config.mutex);
2867 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2872 static int i915_engine_info(struct seq_file *m, void *unused)
2874 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2875 struct intel_engine_cs *engine;
2876 intel_wakeref_t wakeref;
2877 enum intel_engine_id id;
2878 struct drm_printer p;
2880 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2882 seq_printf(m, "GT awake? %s [%d]\n",
2883 yesno(dev_priv->gt.awake),
2884 atomic_read(&dev_priv->gt.wakeref.count));
2885 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2886 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2888 p = drm_seq_file_printer(m);
2889 for_each_engine(engine, dev_priv, id)
2890 intel_engine_dump(engine, &p, "%s\n", engine->name);
2892 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2897 static int i915_rcs_topology(struct seq_file *m, void *unused)
2899 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2900 struct drm_printer p = drm_seq_file_printer(m);
2902 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2907 static int i915_shrinker_info(struct seq_file *m, void *unused)
2909 struct drm_i915_private *i915 = node_to_i915(m->private);
2911 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2912 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2917 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2919 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2920 struct drm_device *dev = &dev_priv->drm;
2923 drm_modeset_lock_all(dev);
2924 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2925 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2927 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2929 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2930 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2931 seq_printf(m, " tracked hardware state:\n");
2932 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2933 seq_printf(m, " dpll_md: 0x%08x\n",
2934 pll->state.hw_state.dpll_md);
2935 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2936 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2937 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2938 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2939 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2940 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2941 pll->state.hw_state.mg_refclkin_ctl);
2942 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2943 pll->state.hw_state.mg_clktop2_coreclkctl1);
2944 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2945 pll->state.hw_state.mg_clktop2_hsclkctl);
2946 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2947 pll->state.hw_state.mg_pll_div0);
2948 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2949 pll->state.hw_state.mg_pll_div1);
2950 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2951 pll->state.hw_state.mg_pll_lf);
2952 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2953 pll->state.hw_state.mg_pll_frac_lock);
2954 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2955 pll->state.hw_state.mg_pll_ssc);
2956 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2957 pll->state.hw_state.mg_pll_bias);
2958 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2959 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2961 drm_modeset_unlock_all(dev);
2966 static int i915_wa_registers(struct seq_file *m, void *unused)
2968 struct drm_i915_private *i915 = node_to_i915(m->private);
2969 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
2973 seq_printf(m, "Workarounds applied: %u\n", wal->count);
2974 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2975 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2976 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
2981 static int i915_ipc_status_show(struct seq_file *m, void *data)
2983 struct drm_i915_private *dev_priv = m->private;
2985 seq_printf(m, "Isochronous Priority Control: %s\n",
2986 yesno(dev_priv->ipc_enabled));
2990 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2992 struct drm_i915_private *dev_priv = inode->i_private;
2994 if (!HAS_IPC(dev_priv))
2997 return single_open(file, i915_ipc_status_show, dev_priv);
3000 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3001 size_t len, loff_t *offp)
3003 struct seq_file *m = file->private_data;
3004 struct drm_i915_private *dev_priv = m->private;
3005 intel_wakeref_t wakeref;
3009 ret = kstrtobool_from_user(ubuf, len, &enable);
3013 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3014 if (!dev_priv->ipc_enabled && enable)
3015 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3016 dev_priv->wm.distrust_bios_wm = true;
3017 dev_priv->ipc_enabled = enable;
3018 intel_enable_ipc(dev_priv);
3024 static const struct file_operations i915_ipc_status_fops = {
3025 .owner = THIS_MODULE,
3026 .open = i915_ipc_status_open,
3028 .llseek = seq_lseek,
3029 .release = single_release,
3030 .write = i915_ipc_status_write
3033 static int i915_ddb_info(struct seq_file *m, void *unused)
3035 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3036 struct drm_device *dev = &dev_priv->drm;
3037 struct skl_ddb_entry *entry;
3038 struct intel_crtc *crtc;
3040 if (INTEL_GEN(dev_priv) < 9)
3043 drm_modeset_lock_all(dev);
3045 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3047 for_each_intel_crtc(&dev_priv->drm, crtc) {
3048 struct intel_crtc_state *crtc_state =
3049 to_intel_crtc_state(crtc->base.state);
3050 enum pipe pipe = crtc->pipe;
3051 enum plane_id plane_id;
3053 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3055 for_each_plane_id_on_crtc(crtc, plane_id) {
3056 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3057 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
3058 entry->start, entry->end,
3059 skl_ddb_entry_size(entry));
3062 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3063 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3064 entry->end, skl_ddb_entry_size(entry));
3067 drm_modeset_unlock_all(dev);
3072 static void drrs_status_per_crtc(struct seq_file *m,
3073 struct drm_device *dev,
3074 struct intel_crtc *intel_crtc)
3076 struct drm_i915_private *dev_priv = to_i915(dev);
3077 struct i915_drrs *drrs = &dev_priv->drrs;
3079 struct drm_connector *connector;
3080 struct drm_connector_list_iter conn_iter;
3082 drm_connector_list_iter_begin(dev, &conn_iter);
3083 drm_for_each_connector_iter(connector, &conn_iter) {
3084 if (connector->state->crtc != &intel_crtc->base)
3087 seq_printf(m, "%s:\n", connector->name);
3089 drm_connector_list_iter_end(&conn_iter);
3091 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3092 seq_puts(m, "\tVBT: DRRS_type: Static");
3093 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3094 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3095 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3096 seq_puts(m, "\tVBT: DRRS_type: None");
3098 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3100 seq_puts(m, "\n\n");
3102 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3103 struct intel_panel *panel;
3105 mutex_lock(&drrs->mutex);
3106 /* DRRS Supported */
3107 seq_puts(m, "\tDRRS Supported: Yes\n");
3109 /* disable_drrs() will make drrs->dp NULL */
3111 seq_puts(m, "Idleness DRRS: Disabled\n");
3112 if (dev_priv->psr.enabled)
3114 "\tAs PSR is enabled, DRRS is not enabled\n");
3115 mutex_unlock(&drrs->mutex);
3119 panel = &drrs->dp->attached_connector->panel;
3120 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3121 drrs->busy_frontbuffer_bits);
3123 seq_puts(m, "\n\t\t");
3124 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3125 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3126 vrefresh = panel->fixed_mode->vrefresh;
3127 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3128 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3129 vrefresh = panel->downclock_mode->vrefresh;
3131 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3132 drrs->refresh_rate_type);
3133 mutex_unlock(&drrs->mutex);
3136 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3138 seq_puts(m, "\n\t\t");
3139 mutex_unlock(&drrs->mutex);
3141 /* DRRS not supported. Print the VBT parameter*/
3142 seq_puts(m, "\tDRRS Supported : No");
3147 static int i915_drrs_status(struct seq_file *m, void *unused)
3149 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3150 struct drm_device *dev = &dev_priv->drm;
3151 struct intel_crtc *intel_crtc;
3152 int active_crtc_cnt = 0;
3154 drm_modeset_lock_all(dev);
3155 for_each_intel_crtc(dev, intel_crtc) {
3156 if (intel_crtc->base.state->active) {
3158 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3160 drrs_status_per_crtc(m, dev, intel_crtc);
3163 drm_modeset_unlock_all(dev);
3165 if (!active_crtc_cnt)
3166 seq_puts(m, "No active crtc found\n");
3171 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3173 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3174 struct drm_device *dev = &dev_priv->drm;
3175 struct intel_encoder *intel_encoder;
3176 struct intel_digital_port *intel_dig_port;
3177 struct drm_connector *connector;
3178 struct drm_connector_list_iter conn_iter;
3180 drm_connector_list_iter_begin(dev, &conn_iter);
3181 drm_for_each_connector_iter(connector, &conn_iter) {
3182 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3185 intel_encoder = intel_attached_encoder(connector);
3186 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3189 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3190 if (!intel_dig_port->dp.can_mst)
3193 seq_printf(m, "MST Source Port %c\n",
3194 port_name(intel_dig_port->base.port));
3195 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3197 drm_connector_list_iter_end(&conn_iter);
3202 static ssize_t i915_displayport_test_active_write(struct file *file,
3203 const char __user *ubuf,
3204 size_t len, loff_t *offp)
3208 struct drm_device *dev;
3209 struct drm_connector *connector;
3210 struct drm_connector_list_iter conn_iter;
3211 struct intel_dp *intel_dp;
3214 dev = ((struct seq_file *)file->private_data)->private;
3219 input_buffer = memdup_user_nul(ubuf, len);
3220 if (IS_ERR(input_buffer))
3221 return PTR_ERR(input_buffer);
3223 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3225 drm_connector_list_iter_begin(dev, &conn_iter);
3226 drm_for_each_connector_iter(connector, &conn_iter) {
3227 struct intel_encoder *encoder;
3229 if (connector->connector_type !=
3230 DRM_MODE_CONNECTOR_DisplayPort)
3233 encoder = to_intel_encoder(connector->encoder);
3234 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3237 if (encoder && connector->status == connector_status_connected) {
3238 intel_dp = enc_to_intel_dp(&encoder->base);
3239 status = kstrtoint(input_buffer, 10, &val);
3242 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3243 /* To prevent erroneous activation of the compliance
3244 * testing code, only accept an actual value of 1 here
3247 intel_dp->compliance.test_active = 1;
3249 intel_dp->compliance.test_active = 0;
3252 drm_connector_list_iter_end(&conn_iter);
3253 kfree(input_buffer);
3261 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3263 struct drm_i915_private *dev_priv = m->private;
3264 struct drm_device *dev = &dev_priv->drm;
3265 struct drm_connector *connector;
3266 struct drm_connector_list_iter conn_iter;
3267 struct intel_dp *intel_dp;
3269 drm_connector_list_iter_begin(dev, &conn_iter);
3270 drm_for_each_connector_iter(connector, &conn_iter) {
3271 struct intel_encoder *encoder;
3273 if (connector->connector_type !=
3274 DRM_MODE_CONNECTOR_DisplayPort)
3277 encoder = to_intel_encoder(connector->encoder);
3278 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3281 if (encoder && connector->status == connector_status_connected) {
3282 intel_dp = enc_to_intel_dp(&encoder->base);
3283 if (intel_dp->compliance.test_active)
3290 drm_connector_list_iter_end(&conn_iter);
3295 static int i915_displayport_test_active_open(struct inode *inode,
3298 return single_open(file, i915_displayport_test_active_show,
3302 static const struct file_operations i915_displayport_test_active_fops = {
3303 .owner = THIS_MODULE,
3304 .open = i915_displayport_test_active_open,
3306 .llseek = seq_lseek,
3307 .release = single_release,
3308 .write = i915_displayport_test_active_write
3311 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3313 struct drm_i915_private *dev_priv = m->private;
3314 struct drm_device *dev = &dev_priv->drm;
3315 struct drm_connector *connector;
3316 struct drm_connector_list_iter conn_iter;
3317 struct intel_dp *intel_dp;
3319 drm_connector_list_iter_begin(dev, &conn_iter);
3320 drm_for_each_connector_iter(connector, &conn_iter) {
3321 struct intel_encoder *encoder;
3323 if (connector->connector_type !=
3324 DRM_MODE_CONNECTOR_DisplayPort)
3327 encoder = to_intel_encoder(connector->encoder);
3328 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3331 if (encoder && connector->status == connector_status_connected) {
3332 intel_dp = enc_to_intel_dp(&encoder->base);
3333 if (intel_dp->compliance.test_type ==
3334 DP_TEST_LINK_EDID_READ)
3335 seq_printf(m, "%lx",
3336 intel_dp->compliance.test_data.edid);
3337 else if (intel_dp->compliance.test_type ==
3338 DP_TEST_LINK_VIDEO_PATTERN) {
3339 seq_printf(m, "hdisplay: %d\n",
3340 intel_dp->compliance.test_data.hdisplay);
3341 seq_printf(m, "vdisplay: %d\n",
3342 intel_dp->compliance.test_data.vdisplay);
3343 seq_printf(m, "bpc: %u\n",
3344 intel_dp->compliance.test_data.bpc);
3349 drm_connector_list_iter_end(&conn_iter);
3353 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3355 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3357 struct drm_i915_private *dev_priv = m->private;
3358 struct drm_device *dev = &dev_priv->drm;
3359 struct drm_connector *connector;
3360 struct drm_connector_list_iter conn_iter;
3361 struct intel_dp *intel_dp;
3363 drm_connector_list_iter_begin(dev, &conn_iter);
3364 drm_for_each_connector_iter(connector, &conn_iter) {
3365 struct intel_encoder *encoder;
3367 if (connector->connector_type !=
3368 DRM_MODE_CONNECTOR_DisplayPort)
3371 encoder = to_intel_encoder(connector->encoder);
3372 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3375 if (encoder && connector->status == connector_status_connected) {
3376 intel_dp = enc_to_intel_dp(&encoder->base);
3377 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3381 drm_connector_list_iter_end(&conn_iter);
3385 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3387 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3389 struct drm_i915_private *dev_priv = m->private;
3390 struct drm_device *dev = &dev_priv->drm;
3394 if (IS_CHERRYVIEW(dev_priv))
3396 else if (IS_VALLEYVIEW(dev_priv))
3398 else if (IS_G4X(dev_priv))
3401 num_levels = ilk_wm_max_level(dev_priv) + 1;
3403 drm_modeset_lock_all(dev);
3405 for (level = 0; level < num_levels; level++) {
3406 unsigned int latency = wm[level];
3409 * - WM1+ latency values in 0.5us units
3410 * - latencies are in us on gen9/vlv/chv
3412 if (INTEL_GEN(dev_priv) >= 9 ||
3413 IS_VALLEYVIEW(dev_priv) ||
3414 IS_CHERRYVIEW(dev_priv) ||
3420 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3421 level, wm[level], latency / 10, latency % 10);
3424 drm_modeset_unlock_all(dev);
3427 static int pri_wm_latency_show(struct seq_file *m, void *data)
3429 struct drm_i915_private *dev_priv = m->private;
3430 const u16 *latencies;
3432 if (INTEL_GEN(dev_priv) >= 9)
3433 latencies = dev_priv->wm.skl_latency;
3435 latencies = dev_priv->wm.pri_latency;
3437 wm_latency_show(m, latencies);
3442 static int spr_wm_latency_show(struct seq_file *m, void *data)
3444 struct drm_i915_private *dev_priv = m->private;
3445 const u16 *latencies;
3447 if (INTEL_GEN(dev_priv) >= 9)
3448 latencies = dev_priv->wm.skl_latency;
3450 latencies = dev_priv->wm.spr_latency;
3452 wm_latency_show(m, latencies);
3457 static int cur_wm_latency_show(struct seq_file *m, void *data)
3459 struct drm_i915_private *dev_priv = m->private;
3460 const u16 *latencies;
3462 if (INTEL_GEN(dev_priv) >= 9)
3463 latencies = dev_priv->wm.skl_latency;
3465 latencies = dev_priv->wm.cur_latency;
3467 wm_latency_show(m, latencies);
3472 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3474 struct drm_i915_private *dev_priv = inode->i_private;
3476 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3479 return single_open(file, pri_wm_latency_show, dev_priv);
3482 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3484 struct drm_i915_private *dev_priv = inode->i_private;
3486 if (HAS_GMCH(dev_priv))
3489 return single_open(file, spr_wm_latency_show, dev_priv);
3492 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3494 struct drm_i915_private *dev_priv = inode->i_private;
3496 if (HAS_GMCH(dev_priv))
3499 return single_open(file, cur_wm_latency_show, dev_priv);
3502 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3503 size_t len, loff_t *offp, u16 wm[8])
3505 struct seq_file *m = file->private_data;
3506 struct drm_i915_private *dev_priv = m->private;
3507 struct drm_device *dev = &dev_priv->drm;
3514 if (IS_CHERRYVIEW(dev_priv))
3516 else if (IS_VALLEYVIEW(dev_priv))
3518 else if (IS_G4X(dev_priv))
3521 num_levels = ilk_wm_max_level(dev_priv) + 1;
3523 if (len >= sizeof(tmp))
3526 if (copy_from_user(tmp, ubuf, len))
3531 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3532 &new[0], &new[1], &new[2], &new[3],
3533 &new[4], &new[5], &new[6], &new[7]);
3534 if (ret != num_levels)
3537 drm_modeset_lock_all(dev);
3539 for (level = 0; level < num_levels; level++)
3540 wm[level] = new[level];
3542 drm_modeset_unlock_all(dev);
3548 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3549 size_t len, loff_t *offp)
3551 struct seq_file *m = file->private_data;
3552 struct drm_i915_private *dev_priv = m->private;
3555 if (INTEL_GEN(dev_priv) >= 9)
3556 latencies = dev_priv->wm.skl_latency;
3558 latencies = dev_priv->wm.pri_latency;
3560 return wm_latency_write(file, ubuf, len, offp, latencies);
3563 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3564 size_t len, loff_t *offp)
3566 struct seq_file *m = file->private_data;
3567 struct drm_i915_private *dev_priv = m->private;
3570 if (INTEL_GEN(dev_priv) >= 9)
3571 latencies = dev_priv->wm.skl_latency;
3573 latencies = dev_priv->wm.spr_latency;
3575 return wm_latency_write(file, ubuf, len, offp, latencies);
3578 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3579 size_t len, loff_t *offp)
3581 struct seq_file *m = file->private_data;
3582 struct drm_i915_private *dev_priv = m->private;
3585 if (INTEL_GEN(dev_priv) >= 9)
3586 latencies = dev_priv->wm.skl_latency;
3588 latencies = dev_priv->wm.cur_latency;
3590 return wm_latency_write(file, ubuf, len, offp, latencies);
3593 static const struct file_operations i915_pri_wm_latency_fops = {
3594 .owner = THIS_MODULE,
3595 .open = pri_wm_latency_open,
3597 .llseek = seq_lseek,
3598 .release = single_release,
3599 .write = pri_wm_latency_write
3602 static const struct file_operations i915_spr_wm_latency_fops = {
3603 .owner = THIS_MODULE,
3604 .open = spr_wm_latency_open,
3606 .llseek = seq_lseek,
3607 .release = single_release,
3608 .write = spr_wm_latency_write
3611 static const struct file_operations i915_cur_wm_latency_fops = {
3612 .owner = THIS_MODULE,
3613 .open = cur_wm_latency_open,
3615 .llseek = seq_lseek,
3616 .release = single_release,
3617 .write = cur_wm_latency_write
3621 i915_wedged_get(void *data, u64 *val)
3623 int ret = i915_terminally_wedged(data);
3638 i915_wedged_set(void *data, u64 val)
3640 struct drm_i915_private *i915 = data;
3642 /* Flush any previous reset before applying for a new one */
3643 wait_event(i915->gpu_error.reset_queue,
3644 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3646 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3647 "Manually set wedged engine mask = %llx", val);
3651 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3652 i915_wedged_get, i915_wedged_set,
3655 #define DROP_UNBOUND BIT(0)
3656 #define DROP_BOUND BIT(1)
3657 #define DROP_RETIRE BIT(2)
3658 #define DROP_ACTIVE BIT(3)
3659 #define DROP_FREED BIT(4)
3660 #define DROP_SHRINK_ALL BIT(5)
3661 #define DROP_IDLE BIT(6)
3662 #define DROP_RESET_ACTIVE BIT(7)
3663 #define DROP_RESET_SEQNO BIT(8)
3664 #define DROP_ALL (DROP_UNBOUND | \
3671 DROP_RESET_ACTIVE | \
3674 i915_drop_caches_get(void *data, u64 *val)
3682 i915_drop_caches_set(void *data, u64 val)
3684 struct drm_i915_private *i915 = data;
3686 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3687 val, val & DROP_ALL);
3689 if (val & DROP_RESET_ACTIVE &&
3690 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3691 i915_gem_set_wedged(i915);
3693 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3694 * on ioctls on -EAGAIN. */
3695 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3698 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3703 * To finish the flush of the idle_worker, we must complete
3704 * the switch-to-kernel-context, which requires a double
3705 * pass through wait_for_idle: first queues the switch,
3706 * second waits for the switch.
3708 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3709 ret = i915_gem_wait_for_idle(i915,
3710 I915_WAIT_INTERRUPTIBLE |
3712 MAX_SCHEDULE_TIMEOUT);
3714 if (ret == 0 && val & DROP_IDLE)
3715 ret = i915_gem_wait_for_idle(i915,
3716 I915_WAIT_INTERRUPTIBLE |
3718 MAX_SCHEDULE_TIMEOUT);
3720 if (val & DROP_RETIRE)
3721 i915_retire_requests(i915);
3723 mutex_unlock(&i915->drm.struct_mutex);
3726 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3727 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3729 fs_reclaim_acquire(GFP_KERNEL);
3730 if (val & DROP_BOUND)
3731 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3733 if (val & DROP_UNBOUND)
3734 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3736 if (val & DROP_SHRINK_ALL)
3737 i915_gem_shrink_all(i915);
3738 fs_reclaim_release(GFP_KERNEL);
3740 if (val & DROP_IDLE) {
3741 flush_delayed_work(&i915->gem.retire_work);
3742 flush_work(&i915->gem.idle_work);
3745 if (val & DROP_FREED)
3746 i915_gem_drain_freed_objects(i915);
3751 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3752 i915_drop_caches_get, i915_drop_caches_set,
3756 i915_cache_sharing_get(void *data, u64 *val)
3758 struct drm_i915_private *dev_priv = data;
3759 intel_wakeref_t wakeref;
3762 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3765 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3766 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3768 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3774 i915_cache_sharing_set(void *data, u64 val)
3776 struct drm_i915_private *dev_priv = data;
3777 intel_wakeref_t wakeref;
3779 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3785 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3786 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3789 /* Update the cache sharing policy here as well */
3790 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3791 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3792 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3793 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3799 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3800 i915_cache_sharing_get, i915_cache_sharing_set,
3803 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3804 struct sseu_dev_info *sseu)
3807 const int ss_max = SS_MAX;
3808 u32 sig1[SS_MAX], sig2[SS_MAX];
3811 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3812 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3813 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3814 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3816 for (ss = 0; ss < ss_max; ss++) {
3817 unsigned int eu_cnt;
3819 if (sig1[ss] & CHV_SS_PG_ENABLE)
3820 /* skip disabled subslice */
3823 sseu->slice_mask = BIT(0);
3824 sseu->subslice_mask[0] |= BIT(ss);
3825 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3826 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3827 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3828 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3829 sseu->eu_total += eu_cnt;
3830 sseu->eu_per_subslice = max_t(unsigned int,
3831 sseu->eu_per_subslice, eu_cnt);
3836 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3837 struct sseu_dev_info *sseu)
3840 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3841 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3844 for (s = 0; s < info->sseu.max_slices; s++) {
3846 * FIXME: Valid SS Mask respects the spec and read
3847 * only valid bits for those registers, excluding reserved
3848 * although this seems wrong because it would leave many
3849 * subslices without ACK.
3851 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3852 GEN10_PGCTL_VALID_SS_MASK(s);
3853 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3854 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3857 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3858 GEN9_PGCTL_SSA_EU19_ACK |
3859 GEN9_PGCTL_SSA_EU210_ACK |
3860 GEN9_PGCTL_SSA_EU311_ACK;
3861 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3862 GEN9_PGCTL_SSB_EU19_ACK |
3863 GEN9_PGCTL_SSB_EU210_ACK |
3864 GEN9_PGCTL_SSB_EU311_ACK;
3866 for (s = 0; s < info->sseu.max_slices; s++) {
3867 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3868 /* skip disabled slice */
3871 sseu->slice_mask |= BIT(s);
3872 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3874 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3875 unsigned int eu_cnt;
3877 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3878 /* skip disabled subslice */
3881 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3883 sseu->eu_total += eu_cnt;
3884 sseu->eu_per_subslice = max_t(unsigned int,
3885 sseu->eu_per_subslice,
3892 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3893 struct sseu_dev_info *sseu)
3896 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3897 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3900 for (s = 0; s < info->sseu.max_slices; s++) {
3901 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3902 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3903 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3906 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3907 GEN9_PGCTL_SSA_EU19_ACK |
3908 GEN9_PGCTL_SSA_EU210_ACK |
3909 GEN9_PGCTL_SSA_EU311_ACK;
3910 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3911 GEN9_PGCTL_SSB_EU19_ACK |
3912 GEN9_PGCTL_SSB_EU210_ACK |
3913 GEN9_PGCTL_SSB_EU311_ACK;
3915 for (s = 0; s < info->sseu.max_slices; s++) {
3916 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3917 /* skip disabled slice */
3920 sseu->slice_mask |= BIT(s);
3922 if (IS_GEN9_BC(dev_priv))
3923 sseu->subslice_mask[s] =
3924 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3926 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3927 unsigned int eu_cnt;
3929 if (IS_GEN9_LP(dev_priv)) {
3930 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3931 /* skip disabled subslice */
3934 sseu->subslice_mask[s] |= BIT(ss);
3937 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3939 sseu->eu_total += eu_cnt;
3940 sseu->eu_per_subslice = max_t(unsigned int,
3941 sseu->eu_per_subslice,
3948 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3949 struct sseu_dev_info *sseu)
3951 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3954 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3956 if (sseu->slice_mask) {
3957 sseu->eu_per_subslice =
3958 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3959 for (s = 0; s < fls(sseu->slice_mask); s++) {
3960 sseu->subslice_mask[s] =
3961 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3963 sseu->eu_total = sseu->eu_per_subslice *
3964 intel_sseu_subslice_total(sseu);
3966 /* subtract fused off EU(s) from enabled slice(s) */
3967 for (s = 0; s < fls(sseu->slice_mask); s++) {
3969 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3971 sseu->eu_total -= hweight8(subslice_7eu);
3976 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3977 const struct sseu_dev_info *sseu)
3979 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3980 const char *type = is_available_info ? "Available" : "Enabled";
3983 seq_printf(m, " %s Slice Mask: %04x\n", type,
3985 seq_printf(m, " %s Slice Total: %u\n", type,
3986 hweight8(sseu->slice_mask));
3987 seq_printf(m, " %s Subslice Total: %u\n", type,
3988 intel_sseu_subslice_total(sseu));
3989 for (s = 0; s < fls(sseu->slice_mask); s++) {
3990 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3991 s, intel_sseu_subslices_per_slice(sseu, s));
3993 seq_printf(m, " %s EU Total: %u\n", type,
3995 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3996 sseu->eu_per_subslice);
3998 if (!is_available_info)
4001 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4002 if (HAS_POOLED_EU(dev_priv))
4003 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4005 seq_printf(m, " Has Slice Power Gating: %s\n",
4006 yesno(sseu->has_slice_pg));
4007 seq_printf(m, " Has Subslice Power Gating: %s\n",
4008 yesno(sseu->has_subslice_pg));
4009 seq_printf(m, " Has EU Power Gating: %s\n",
4010 yesno(sseu->has_eu_pg));
4013 static int i915_sseu_status(struct seq_file *m, void *unused)
4015 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4016 struct sseu_dev_info sseu;
4017 intel_wakeref_t wakeref;
4019 if (INTEL_GEN(dev_priv) < 8)
4022 seq_puts(m, "SSEU Device Info\n");
4023 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4025 seq_puts(m, "SSEU Device Status\n");
4026 memset(&sseu, 0, sizeof(sseu));
4027 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4028 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4029 sseu.max_eus_per_subslice =
4030 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4032 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
4033 if (IS_CHERRYVIEW(dev_priv))
4034 cherryview_sseu_device_status(dev_priv, &sseu);
4035 else if (IS_BROADWELL(dev_priv))
4036 broadwell_sseu_device_status(dev_priv, &sseu);
4037 else if (IS_GEN(dev_priv, 9))
4038 gen9_sseu_device_status(dev_priv, &sseu);
4039 else if (INTEL_GEN(dev_priv) >= 10)
4040 gen10_sseu_device_status(dev_priv, &sseu);
4043 i915_print_sseu_info(m, false, &sseu);
4048 static int i915_forcewake_open(struct inode *inode, struct file *file)
4050 struct drm_i915_private *i915 = inode->i_private;
4052 if (INTEL_GEN(i915) < 6)
4055 file->private_data =
4056 (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
4057 intel_uncore_forcewake_user_get(&i915->uncore);
4062 static int i915_forcewake_release(struct inode *inode, struct file *file)
4064 struct drm_i915_private *i915 = inode->i_private;
4066 if (INTEL_GEN(i915) < 6)
4069 intel_uncore_forcewake_user_put(&i915->uncore);
4070 intel_runtime_pm_put(&i915->runtime_pm,
4071 (intel_wakeref_t)(uintptr_t)file->private_data);
4076 static const struct file_operations i915_forcewake_fops = {
4077 .owner = THIS_MODULE,
4078 .open = i915_forcewake_open,
4079 .release = i915_forcewake_release,
4082 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4084 struct drm_i915_private *dev_priv = m->private;
4085 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4087 /* Synchronize with everything first in case there's been an HPD
4088 * storm, but we haven't finished handling it in the kernel yet
4090 synchronize_irq(dev_priv->drm.irq);
4091 flush_work(&dev_priv->hotplug.dig_port_work);
4092 flush_work(&dev_priv->hotplug.hotplug_work);
4094 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4095 seq_printf(m, "Detected: %s\n",
4096 yesno(delayed_work_pending(&hotplug->reenable_work)));
4101 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4102 const char __user *ubuf, size_t len,
4105 struct seq_file *m = file->private_data;
4106 struct drm_i915_private *dev_priv = m->private;
4107 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4108 unsigned int new_threshold;
4113 if (len >= sizeof(tmp))
4116 if (copy_from_user(tmp, ubuf, len))
4121 /* Strip newline, if any */
4122 newline = strchr(tmp, '\n');
4126 if (strcmp(tmp, "reset") == 0)
4127 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4128 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4131 if (new_threshold > 0)
4132 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4135 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4137 spin_lock_irq(&dev_priv->irq_lock);
4138 hotplug->hpd_storm_threshold = new_threshold;
4139 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4141 hotplug->stats[i].count = 0;
4142 spin_unlock_irq(&dev_priv->irq_lock);
4144 /* Re-enable hpd immediately if we were in an irq storm */
4145 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4150 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4152 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4155 static const struct file_operations i915_hpd_storm_ctl_fops = {
4156 .owner = THIS_MODULE,
4157 .open = i915_hpd_storm_ctl_open,
4159 .llseek = seq_lseek,
4160 .release = single_release,
4161 .write = i915_hpd_storm_ctl_write
4164 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4166 struct drm_i915_private *dev_priv = m->private;
4168 seq_printf(m, "Enabled: %s\n",
4169 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4175 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4177 return single_open(file, i915_hpd_short_storm_ctl_show,
4181 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4182 const char __user *ubuf,
4183 size_t len, loff_t *offp)
4185 struct seq_file *m = file->private_data;
4186 struct drm_i915_private *dev_priv = m->private;
4187 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4193 if (len >= sizeof(tmp))
4196 if (copy_from_user(tmp, ubuf, len))
4201 /* Strip newline, if any */
4202 newline = strchr(tmp, '\n');
4206 /* Reset to the "default" state for this system */
4207 if (strcmp(tmp, "reset") == 0)
4208 new_state = !HAS_DP_MST(dev_priv);
4209 else if (kstrtobool(tmp, &new_state) != 0)
4212 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4213 new_state ? "En" : "Dis");
4215 spin_lock_irq(&dev_priv->irq_lock);
4216 hotplug->hpd_short_storm_enabled = new_state;
4217 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4219 hotplug->stats[i].count = 0;
4220 spin_unlock_irq(&dev_priv->irq_lock);
4222 /* Re-enable hpd immediately if we were in an irq storm */
4223 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4228 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4229 .owner = THIS_MODULE,
4230 .open = i915_hpd_short_storm_ctl_open,
4232 .llseek = seq_lseek,
4233 .release = single_release,
4234 .write = i915_hpd_short_storm_ctl_write,
4237 static int i915_drrs_ctl_set(void *data, u64 val)
4239 struct drm_i915_private *dev_priv = data;
4240 struct drm_device *dev = &dev_priv->drm;
4241 struct intel_crtc *crtc;
4243 if (INTEL_GEN(dev_priv) < 7)
4246 for_each_intel_crtc(dev, crtc) {
4247 struct drm_connector_list_iter conn_iter;
4248 struct intel_crtc_state *crtc_state;
4249 struct drm_connector *connector;
4250 struct drm_crtc_commit *commit;
4253 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4257 crtc_state = to_intel_crtc_state(crtc->base.state);
4259 if (!crtc_state->base.active ||
4260 !crtc_state->has_drrs)
4263 commit = crtc_state->base.commit;
4265 ret = wait_for_completion_interruptible(&commit->hw_done);
4270 drm_connector_list_iter_begin(dev, &conn_iter);
4271 drm_for_each_connector_iter(connector, &conn_iter) {
4272 struct intel_encoder *encoder;
4273 struct intel_dp *intel_dp;
4275 if (!(crtc_state->base.connector_mask &
4276 drm_connector_mask(connector)))
4279 encoder = intel_attached_encoder(connector);
4280 if (encoder->type != INTEL_OUTPUT_EDP)
4283 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4284 val ? "en" : "dis", val);
4286 intel_dp = enc_to_intel_dp(&encoder->base);
4288 intel_edp_drrs_enable(intel_dp,
4291 intel_edp_drrs_disable(intel_dp,
4294 drm_connector_list_iter_end(&conn_iter);
4297 drm_modeset_unlock(&crtc->base.mutex);
4305 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4308 i915_fifo_underrun_reset_write(struct file *filp,
4309 const char __user *ubuf,
4310 size_t cnt, loff_t *ppos)
4312 struct drm_i915_private *dev_priv = filp->private_data;
4313 struct intel_crtc *intel_crtc;
4314 struct drm_device *dev = &dev_priv->drm;
4318 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4325 for_each_intel_crtc(dev, intel_crtc) {
4326 struct drm_crtc_commit *commit;
4327 struct intel_crtc_state *crtc_state;
4329 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4333 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4334 commit = crtc_state->base.commit;
4336 ret = wait_for_completion_interruptible(&commit->hw_done);
4338 ret = wait_for_completion_interruptible(&commit->flip_done);
4341 if (!ret && crtc_state->base.active) {
4342 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4343 pipe_name(intel_crtc->pipe));
4345 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4348 drm_modeset_unlock(&intel_crtc->base.mutex);
4354 ret = intel_fbc_reset_underrun(dev_priv);
4361 static const struct file_operations i915_fifo_underrun_reset_ops = {
4362 .owner = THIS_MODULE,
4363 .open = simple_open,
4364 .write = i915_fifo_underrun_reset_write,
4365 .llseek = default_llseek,
4368 static const struct drm_info_list i915_debugfs_list[] = {
4369 {"i915_capabilities", i915_capabilities, 0},
4370 {"i915_gem_objects", i915_gem_object_info, 0},
4371 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4372 {"i915_gem_interrupt", i915_interrupt_info, 0},
4373 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4374 {"i915_guc_info", i915_guc_info, 0},
4375 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4376 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4377 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4378 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4379 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4380 {"i915_frequency_info", i915_frequency_info, 0},
4381 {"i915_hangcheck_info", i915_hangcheck_info, 0},
4382 {"i915_reset_info", i915_reset_info, 0},
4383 {"i915_drpc_info", i915_drpc_info, 0},
4384 {"i915_emon_status", i915_emon_status, 0},
4385 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4386 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4387 {"i915_fbc_status", i915_fbc_status, 0},
4388 {"i915_ips_status", i915_ips_status, 0},
4389 {"i915_sr_status", i915_sr_status, 0},
4390 {"i915_opregion", i915_opregion, 0},
4391 {"i915_vbt", i915_vbt, 0},
4392 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4393 {"i915_context_status", i915_context_status, 0},
4394 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4395 {"i915_swizzle_info", i915_swizzle_info, 0},
4396 {"i915_llc", i915_llc, 0},
4397 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4398 {"i915_energy_uJ", i915_energy_uJ, 0},
4399 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4400 {"i915_power_domain_info", i915_power_domain_info, 0},
4401 {"i915_dmc_info", i915_dmc_info, 0},
4402 {"i915_display_info", i915_display_info, 0},
4403 {"i915_engine_info", i915_engine_info, 0},
4404 {"i915_rcs_topology", i915_rcs_topology, 0},
4405 {"i915_shrinker_info", i915_shrinker_info, 0},
4406 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4407 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4408 {"i915_wa_registers", i915_wa_registers, 0},
4409 {"i915_ddb_info", i915_ddb_info, 0},
4410 {"i915_sseu_status", i915_sseu_status, 0},
4411 {"i915_drrs_status", i915_drrs_status, 0},
4412 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4414 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4416 static const struct i915_debugfs_files {
4418 const struct file_operations *fops;
4419 } i915_debugfs_files[] = {
4420 {"i915_wedged", &i915_wedged_fops},
4421 {"i915_cache_sharing", &i915_cache_sharing_fops},
4422 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4423 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4424 {"i915_error_state", &i915_error_state_fops},
4425 {"i915_gpu_info", &i915_gpu_info_fops},
4427 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4428 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4429 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4430 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4431 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4432 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4433 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4434 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4435 {"i915_guc_log_level", &i915_guc_log_level_fops},
4436 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4437 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4438 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4439 {"i915_ipc_status", &i915_ipc_status_fops},
4440 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4441 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4444 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4446 struct drm_minor *minor = dev_priv->drm.primary;
4449 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4450 to_i915(minor->dev), &i915_forcewake_fops);
4452 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4453 debugfs_create_file(i915_debugfs_files[i].name,
4455 minor->debugfs_root,
4456 to_i915(minor->dev),
4457 i915_debugfs_files[i].fops);
4460 return drm_debugfs_create_files(i915_debugfs_list,
4461 I915_DEBUGFS_ENTRIES,
4462 minor->debugfs_root, minor);
4466 /* DPCD dump start address. */
4467 unsigned int offset;
4468 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4470 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4472 /* Only valid for eDP. */
4476 static const struct dpcd_block i915_dpcd_debug[] = {
4477 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4478 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4479 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4480 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4481 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4482 { .offset = DP_SET_POWER },
4483 { .offset = DP_EDP_DPCD_REV },
4484 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4485 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4486 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4489 static int i915_dpcd_show(struct seq_file *m, void *data)
4491 struct drm_connector *connector = m->private;
4492 struct intel_dp *intel_dp =
4493 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4498 if (connector->status != connector_status_connected)
4501 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4502 const struct dpcd_block *b = &i915_dpcd_debug[i];
4503 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4506 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4509 /* low tech for now */
4510 if (WARN_ON(size > sizeof(buf)))
4513 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4515 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4517 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4522 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4524 static int i915_panel_show(struct seq_file *m, void *data)
4526 struct drm_connector *connector = m->private;
4527 struct intel_dp *intel_dp =
4528 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4530 if (connector->status != connector_status_connected)
4533 seq_printf(m, "Panel power up delay: %d\n",
4534 intel_dp->panel_power_up_delay);
4535 seq_printf(m, "Panel power down delay: %d\n",
4536 intel_dp->panel_power_down_delay);
4537 seq_printf(m, "Backlight on delay: %d\n",
4538 intel_dp->backlight_on_delay);
4539 seq_printf(m, "Backlight off delay: %d\n",
4540 intel_dp->backlight_off_delay);
4544 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4546 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4548 struct drm_connector *connector = m->private;
4549 struct intel_connector *intel_connector = to_intel_connector(connector);
4550 bool hdcp_cap, hdcp2_cap;
4552 if (connector->status != connector_status_connected)
4555 /* HDCP is supported by connector */
4556 if (!intel_connector->hdcp.shim)
4559 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4560 connector->base.id);
4561 hdcp_cap = intel_hdcp_capable(intel_connector);
4562 hdcp2_cap = intel_hdcp2_capable(intel_connector);
4565 seq_puts(m, "HDCP1.4 ");
4567 seq_puts(m, "HDCP2.2 ");
4569 if (!hdcp_cap && !hdcp2_cap)
4570 seq_puts(m, "None");
4575 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4577 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4579 struct drm_connector *connector = m->private;
4580 struct drm_device *dev = connector->dev;
4581 struct drm_crtc *crtc;
4582 struct intel_dp *intel_dp;
4583 struct drm_modeset_acquire_ctx ctx;
4584 struct intel_crtc_state *crtc_state = NULL;
4586 bool try_again = false;
4588 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4592 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4595 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4601 crtc = connector->state->crtc;
4602 if (connector->status != connector_status_connected || !crtc) {
4606 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4607 if (ret == -EDEADLK) {
4608 ret = drm_modeset_backoff(&ctx);
4617 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4618 crtc_state = to_intel_crtc_state(crtc->state);
4619 seq_printf(m, "DSC_Enabled: %s\n",
4620 yesno(crtc_state->dsc_params.compression_enable));
4621 seq_printf(m, "DSC_Sink_Support: %s\n",
4622 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4623 seq_printf(m, "Force_DSC_Enable: %s\n",
4624 yesno(intel_dp->force_dsc_en));
4625 if (!intel_dp_is_edp(intel_dp))
4626 seq_printf(m, "FEC_Sink_Support: %s\n",
4627 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4628 } while (try_again);
4630 drm_modeset_drop_locks(&ctx);
4631 drm_modeset_acquire_fini(&ctx);
4636 static ssize_t i915_dsc_fec_support_write(struct file *file,
4637 const char __user *ubuf,
4638 size_t len, loff_t *offp)
4640 bool dsc_enable = false;
4642 struct drm_connector *connector =
4643 ((struct seq_file *)file->private_data)->private;
4644 struct intel_encoder *encoder = intel_attached_encoder(connector);
4645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4650 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4653 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4657 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4658 (dsc_enable) ? "true" : "false");
4659 intel_dp->force_dsc_en = dsc_enable;
4665 static int i915_dsc_fec_support_open(struct inode *inode,
4668 return single_open(file, i915_dsc_fec_support_show,
4672 static const struct file_operations i915_dsc_fec_support_fops = {
4673 .owner = THIS_MODULE,
4674 .open = i915_dsc_fec_support_open,
4676 .llseek = seq_lseek,
4677 .release = single_release,
4678 .write = i915_dsc_fec_support_write
4682 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4683 * @connector: pointer to a registered drm_connector
4685 * Cleanup will be done by drm_connector_unregister() through a call to
4686 * drm_debugfs_connector_remove().
4688 * Returns 0 on success, negative error codes on error.
4690 int i915_debugfs_connector_add(struct drm_connector *connector)
4692 struct dentry *root = connector->debugfs_entry;
4693 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4695 /* The connector must have been registered beforehands. */
4699 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4700 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4701 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4702 connector, &i915_dpcd_fops);
4704 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4705 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4706 connector, &i915_panel_fops);
4707 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4708 connector, &i915_psr_sink_status_fops);
4711 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4712 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4713 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4714 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4715 connector, &i915_hdcp_sink_capability_fops);
4718 if (INTEL_GEN(dev_priv) >= 10 &&
4719 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4720 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4721 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4722 connector, &i915_dsc_fec_support_fops);