2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_pm.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_reset.h"
38 #include "gt/intel_rc6.h"
39 #include "gt/intel_rps.h"
40 #include "gt/uc/intel_guc_submission.h"
42 #include "i915_debugfs.h"
43 #include "i915_debugfs_params.h"
45 #include "i915_trace.h"
47 #include "intel_sideband.h"
49 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
51 return to_i915(node->minor->dev);
54 static int i915_capabilities(struct seq_file *m, void *data)
56 struct drm_i915_private *i915 = node_to_i915(m->private);
57 struct drm_printer p = drm_seq_file_printer(m);
59 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
61 intel_device_info_print_static(INTEL_INFO(i915), &p);
62 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
63 intel_driver_caps_print(&i915->caps, &p);
65 kernel_param_lock(THIS_MODULE);
66 i915_params_dump(&i915_modparams, &p);
67 kernel_param_unlock(THIS_MODULE);
72 static char get_tiling_flag(struct drm_i915_gem_object *obj)
74 switch (i915_gem_object_get_tiling(obj)) {
76 case I915_TILING_NONE: return ' ';
77 case I915_TILING_X: return 'X';
78 case I915_TILING_Y: return 'Y';
82 static char get_global_flag(struct drm_i915_gem_object *obj)
84 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
87 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
89 return obj->mm.mapping ? 'M' : ' ';
93 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
100 case I915_GTT_PAGE_SIZE_4K:
102 case I915_GTT_PAGE_SIZE_64K:
104 case I915_GTT_PAGE_SIZE_2M:
110 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
111 x += snprintf(buf + x, len - x, "2M, ");
112 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
113 x += snprintf(buf + x, len - x, "64K, ");
114 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
115 x += snprintf(buf + x, len - x, "4K, ");
123 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
126 struct intel_engine_cs *engine;
127 struct i915_vma *vma;
130 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
132 get_tiling_flag(obj),
133 get_global_flag(obj),
134 get_pin_mapped_flag(obj),
135 obj->base.size / 1024,
138 i915_cache_level_str(dev_priv, obj->cache_level),
139 obj->mm.dirty ? " dirty" : "",
140 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 seq_printf(m, " (name: %d)", obj->base.name);
144 spin_lock(&obj->vma.lock);
145 list_for_each_entry(vma, &obj->vma.list, obj_link) {
146 if (!drm_mm_node_allocated(&vma->node))
149 spin_unlock(&obj->vma.lock);
151 if (i915_vma_is_pinned(vma))
154 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
155 i915_vma_is_ggtt(vma) ? "g" : "pp",
156 vma->node.start, vma->node.size,
157 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
158 if (i915_vma_is_ggtt(vma)) {
159 switch (vma->ggtt_view.type) {
160 case I915_GGTT_VIEW_NORMAL:
161 seq_puts(m, ", normal");
164 case I915_GGTT_VIEW_PARTIAL:
165 seq_printf(m, ", partial [%08llx+%x]",
166 vma->ggtt_view.partial.offset << PAGE_SHIFT,
167 vma->ggtt_view.partial.size << PAGE_SHIFT);
170 case I915_GGTT_VIEW_ROTATED:
171 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
172 vma->ggtt_view.rotated.plane[0].width,
173 vma->ggtt_view.rotated.plane[0].height,
174 vma->ggtt_view.rotated.plane[0].stride,
175 vma->ggtt_view.rotated.plane[0].offset,
176 vma->ggtt_view.rotated.plane[1].width,
177 vma->ggtt_view.rotated.plane[1].height,
178 vma->ggtt_view.rotated.plane[1].stride,
179 vma->ggtt_view.rotated.plane[1].offset);
182 case I915_GGTT_VIEW_REMAPPED:
183 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
184 vma->ggtt_view.remapped.plane[0].width,
185 vma->ggtt_view.remapped.plane[0].height,
186 vma->ggtt_view.remapped.plane[0].stride,
187 vma->ggtt_view.remapped.plane[0].offset,
188 vma->ggtt_view.remapped.plane[1].width,
189 vma->ggtt_view.remapped.plane[1].height,
190 vma->ggtt_view.remapped.plane[1].stride,
191 vma->ggtt_view.remapped.plane[1].offset);
195 MISSING_CASE(vma->ggtt_view.type);
200 seq_printf(m, " , fence: %d", vma->fence->id);
203 spin_lock(&obj->vma.lock);
205 spin_unlock(&obj->vma.lock);
207 seq_printf(m, " (pinned x %d)", pin_count);
209 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
210 if (i915_gem_object_is_framebuffer(obj))
211 seq_printf(m, " (fb)");
213 engine = i915_gem_object_last_write_engine(obj);
215 seq_printf(m, " (%s)", engine->name);
219 struct i915_address_space *vm;
222 u64 active, inactive;
226 static int per_file_stats(int id, void *ptr, void *data)
228 struct drm_i915_gem_object *obj = ptr;
229 struct file_stats *stats = data;
230 struct i915_vma *vma;
232 if (!kref_get_unless_zero(&obj->base.refcount))
236 stats->total += obj->base.size;
237 if (!atomic_read(&obj->bind_count))
238 stats->unbound += obj->base.size;
240 spin_lock(&obj->vma.lock);
242 for_each_ggtt_vma(vma, obj) {
243 if (!drm_mm_node_allocated(&vma->node))
246 if (i915_vma_is_active(vma))
247 stats->active += vma->node.size;
249 stats->inactive += vma->node.size;
251 if (i915_vma_is_closed(vma))
252 stats->closed += vma->node.size;
255 struct rb_node *p = obj->vma.tree.rb_node;
260 vma = rb_entry(p, typeof(*vma), obj_node);
261 cmp = i915_vma_compare(vma, stats->vm, NULL);
263 if (drm_mm_node_allocated(&vma->node)) {
264 if (i915_vma_is_active(vma))
265 stats->active += vma->node.size;
267 stats->inactive += vma->node.size;
269 if (i915_vma_is_closed(vma))
270 stats->closed += vma->node.size;
280 spin_unlock(&obj->vma.lock);
282 i915_gem_object_put(obj);
286 #define print_file_stats(m, name, stats) do { \
288 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
298 static void print_context_stats(struct seq_file *m,
299 struct drm_i915_private *i915)
301 struct file_stats kstats = {};
302 struct i915_gem_context *ctx, *cn;
304 spin_lock(&i915->gem.contexts.lock);
305 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
306 struct i915_gem_engines_iter it;
307 struct intel_context *ce;
309 if (!kref_get_unless_zero(&ctx->ref))
312 spin_unlock(&i915->gem.contexts.lock);
314 for_each_gem_engine(ce,
315 i915_gem_context_lock_engines(ctx), it) {
316 if (intel_context_pin_if_active(ce)) {
320 ce->state->obj, &kstats);
321 per_file_stats(0, ce->ring->vma->obj, &kstats);
323 intel_context_unpin(ce);
326 i915_gem_context_unlock_engines(ctx);
328 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
329 struct file_stats stats = {
330 .vm = rcu_access_pointer(ctx->vm),
332 struct drm_file *file = ctx->file_priv->file;
333 struct task_struct *task;
337 idr_for_each(&file->object_idr, per_file_stats, &stats);
341 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
342 snprintf(name, sizeof(name), "%s",
343 task ? task->comm : "<unknown>");
346 print_file_stats(m, name, stats);
349 spin_lock(&i915->gem.contexts.lock);
350 list_safe_reset_next(ctx, cn, link);
351 i915_gem_context_put(ctx);
353 spin_unlock(&i915->gem.contexts.lock);
355 print_file_stats(m, "[k]contexts", kstats);
358 static int i915_gem_object_info(struct seq_file *m, void *data)
360 struct drm_i915_private *i915 = node_to_i915(m->private);
361 struct intel_memory_region *mr;
362 enum intel_region_id id;
364 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
365 i915->mm.shrink_count,
366 atomic_read(&i915->mm.free_count),
367 i915->mm.shrink_memory);
368 for_each_memory_region(mr, i915, id)
369 seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
370 mr->name, &mr->total, &mr->avail);
373 print_context_stats(m, i915);
378 static void gen8_display_interrupt_info(struct seq_file *m)
380 struct drm_i915_private *dev_priv = node_to_i915(m->private);
383 for_each_pipe(dev_priv, pipe) {
384 enum intel_display_power_domain power_domain;
385 intel_wakeref_t wakeref;
387 power_domain = POWER_DOMAIN_PIPE(pipe);
388 wakeref = intel_display_power_get_if_enabled(dev_priv,
391 seq_printf(m, "Pipe %c power disabled\n",
395 seq_printf(m, "Pipe %c IMR:\t%08x\n",
397 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
398 seq_printf(m, "Pipe %c IIR:\t%08x\n",
400 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
401 seq_printf(m, "Pipe %c IER:\t%08x\n",
403 I915_READ(GEN8_DE_PIPE_IER(pipe)));
405 intel_display_power_put(dev_priv, power_domain, wakeref);
408 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
409 I915_READ(GEN8_DE_PORT_IMR));
410 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
411 I915_READ(GEN8_DE_PORT_IIR));
412 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
413 I915_READ(GEN8_DE_PORT_IER));
415 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
416 I915_READ(GEN8_DE_MISC_IMR));
417 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
418 I915_READ(GEN8_DE_MISC_IIR));
419 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
420 I915_READ(GEN8_DE_MISC_IER));
422 seq_printf(m, "PCU interrupt mask:\t%08x\n",
423 I915_READ(GEN8_PCU_IMR));
424 seq_printf(m, "PCU interrupt identity:\t%08x\n",
425 I915_READ(GEN8_PCU_IIR));
426 seq_printf(m, "PCU interrupt enable:\t%08x\n",
427 I915_READ(GEN8_PCU_IER));
430 static int i915_interrupt_info(struct seq_file *m, void *data)
432 struct drm_i915_private *dev_priv = node_to_i915(m->private);
433 struct intel_engine_cs *engine;
434 intel_wakeref_t wakeref;
437 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
439 if (IS_CHERRYVIEW(dev_priv)) {
440 intel_wakeref_t pref;
442 seq_printf(m, "Master Interrupt Control:\t%08x\n",
443 I915_READ(GEN8_MASTER_IRQ));
445 seq_printf(m, "Display IER:\t%08x\n",
447 seq_printf(m, "Display IIR:\t%08x\n",
449 seq_printf(m, "Display IIR_RW:\t%08x\n",
450 I915_READ(VLV_IIR_RW));
451 seq_printf(m, "Display IMR:\t%08x\n",
453 for_each_pipe(dev_priv, pipe) {
454 enum intel_display_power_domain power_domain;
456 power_domain = POWER_DOMAIN_PIPE(pipe);
457 pref = intel_display_power_get_if_enabled(dev_priv,
460 seq_printf(m, "Pipe %c power disabled\n",
465 seq_printf(m, "Pipe %c stat:\t%08x\n",
467 I915_READ(PIPESTAT(pipe)));
469 intel_display_power_put(dev_priv, power_domain, pref);
472 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
473 seq_printf(m, "Port hotplug:\t%08x\n",
474 I915_READ(PORT_HOTPLUG_EN));
475 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
476 I915_READ(VLV_DPFLIPSTAT));
477 seq_printf(m, "DPINVGTT:\t%08x\n",
478 I915_READ(DPINVGTT));
479 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
481 for (i = 0; i < 4; i++) {
482 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
483 i, I915_READ(GEN8_GT_IMR(i)));
484 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
485 i, I915_READ(GEN8_GT_IIR(i)));
486 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
487 i, I915_READ(GEN8_GT_IER(i)));
490 seq_printf(m, "PCU interrupt mask:\t%08x\n",
491 I915_READ(GEN8_PCU_IMR));
492 seq_printf(m, "PCU interrupt identity:\t%08x\n",
493 I915_READ(GEN8_PCU_IIR));
494 seq_printf(m, "PCU interrupt enable:\t%08x\n",
495 I915_READ(GEN8_PCU_IER));
496 } else if (INTEL_GEN(dev_priv) >= 11) {
497 seq_printf(m, "Master Interrupt Control: %08x\n",
498 I915_READ(GEN11_GFX_MSTR_IRQ));
500 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
501 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
502 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
503 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
504 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
505 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
506 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
507 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
508 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
509 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
510 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
511 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
513 seq_printf(m, "Display Interrupt Control:\t%08x\n",
514 I915_READ(GEN11_DISPLAY_INT_CTL));
516 gen8_display_interrupt_info(m);
517 } else if (INTEL_GEN(dev_priv) >= 8) {
518 seq_printf(m, "Master Interrupt Control:\t%08x\n",
519 I915_READ(GEN8_MASTER_IRQ));
521 for (i = 0; i < 4; i++) {
522 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
523 i, I915_READ(GEN8_GT_IMR(i)));
524 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
525 i, I915_READ(GEN8_GT_IIR(i)));
526 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
527 i, I915_READ(GEN8_GT_IER(i)));
530 gen8_display_interrupt_info(m);
531 } else if (IS_VALLEYVIEW(dev_priv)) {
532 intel_wakeref_t pref;
534 seq_printf(m, "Display IER:\t%08x\n",
536 seq_printf(m, "Display IIR:\t%08x\n",
538 seq_printf(m, "Display IIR_RW:\t%08x\n",
539 I915_READ(VLV_IIR_RW));
540 seq_printf(m, "Display IMR:\t%08x\n",
542 for_each_pipe(dev_priv, pipe) {
543 enum intel_display_power_domain power_domain;
545 power_domain = POWER_DOMAIN_PIPE(pipe);
546 pref = intel_display_power_get_if_enabled(dev_priv,
549 seq_printf(m, "Pipe %c power disabled\n",
554 seq_printf(m, "Pipe %c stat:\t%08x\n",
556 I915_READ(PIPESTAT(pipe)));
557 intel_display_power_put(dev_priv, power_domain, pref);
560 seq_printf(m, "Master IER:\t%08x\n",
561 I915_READ(VLV_MASTER_IER));
563 seq_printf(m, "Render IER:\t%08x\n",
565 seq_printf(m, "Render IIR:\t%08x\n",
567 seq_printf(m, "Render IMR:\t%08x\n",
570 seq_printf(m, "PM IER:\t\t%08x\n",
571 I915_READ(GEN6_PMIER));
572 seq_printf(m, "PM IIR:\t\t%08x\n",
573 I915_READ(GEN6_PMIIR));
574 seq_printf(m, "PM IMR:\t\t%08x\n",
575 I915_READ(GEN6_PMIMR));
577 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
578 seq_printf(m, "Port hotplug:\t%08x\n",
579 I915_READ(PORT_HOTPLUG_EN));
580 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
581 I915_READ(VLV_DPFLIPSTAT));
582 seq_printf(m, "DPINVGTT:\t%08x\n",
583 I915_READ(DPINVGTT));
584 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
586 } else if (!HAS_PCH_SPLIT(dev_priv)) {
587 seq_printf(m, "Interrupt enable: %08x\n",
588 I915_READ(GEN2_IER));
589 seq_printf(m, "Interrupt identity: %08x\n",
590 I915_READ(GEN2_IIR));
591 seq_printf(m, "Interrupt mask: %08x\n",
592 I915_READ(GEN2_IMR));
593 for_each_pipe(dev_priv, pipe)
594 seq_printf(m, "Pipe %c stat: %08x\n",
596 I915_READ(PIPESTAT(pipe)));
598 seq_printf(m, "North Display Interrupt enable: %08x\n",
600 seq_printf(m, "North Display Interrupt identity: %08x\n",
602 seq_printf(m, "North Display Interrupt mask: %08x\n",
604 seq_printf(m, "South Display Interrupt enable: %08x\n",
606 seq_printf(m, "South Display Interrupt identity: %08x\n",
608 seq_printf(m, "South Display Interrupt mask: %08x\n",
610 seq_printf(m, "Graphics Interrupt enable: %08x\n",
612 seq_printf(m, "Graphics Interrupt identity: %08x\n",
614 seq_printf(m, "Graphics Interrupt mask: %08x\n",
618 if (INTEL_GEN(dev_priv) >= 11) {
619 seq_printf(m, "RCS Intr Mask:\t %08x\n",
620 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
621 seq_printf(m, "BCS Intr Mask:\t %08x\n",
622 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
623 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
624 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
625 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
626 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
627 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
628 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
629 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
630 I915_READ(GEN11_GUC_SG_INTR_MASK));
631 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
632 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
633 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
634 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
635 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
636 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
638 } else if (INTEL_GEN(dev_priv) >= 6) {
639 for_each_uabi_engine(engine, dev_priv) {
641 "Graphics Interrupt mask (%s): %08x\n",
642 engine->name, ENGINE_READ(engine, RING_IMR));
646 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
651 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
653 struct drm_i915_private *i915 = node_to_i915(m->private);
656 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
659 for (i = 0; i < i915->ggtt.num_fences; i++) {
660 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
661 struct i915_vma *vma = reg->vma;
663 seq_printf(m, "Fence %d, pin count = %d, object = ",
664 i, atomic_read(®->pin_count));
666 seq_puts(m, "unused");
668 i915_debugfs_describe_obj(m, vma->obj);
676 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
677 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
678 size_t count, loff_t *pos)
680 struct i915_gpu_coredump *error;
684 error = file->private_data;
688 /* Bounce buffer required because of kernfs __user API convenience. */
689 buf = kmalloc(count, GFP_KERNEL);
693 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
697 if (!copy_to_user(ubuf, buf, ret))
707 static int gpu_state_release(struct inode *inode, struct file *file)
709 i915_gpu_coredump_put(file->private_data);
713 static int i915_gpu_info_open(struct inode *inode, struct file *file)
715 struct drm_i915_private *i915 = inode->i_private;
716 struct i915_gpu_coredump *gpu;
717 intel_wakeref_t wakeref;
720 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
721 gpu = i915_gpu_coredump(i915);
725 file->private_data = gpu;
729 static const struct file_operations i915_gpu_info_fops = {
730 .owner = THIS_MODULE,
731 .open = i915_gpu_info_open,
732 .read = gpu_state_read,
733 .llseek = default_llseek,
734 .release = gpu_state_release,
738 i915_error_state_write(struct file *filp,
739 const char __user *ubuf,
743 struct i915_gpu_coredump *error = filp->private_data;
748 DRM_DEBUG_DRIVER("Resetting error state\n");
749 i915_reset_error_state(error->i915);
754 static int i915_error_state_open(struct inode *inode, struct file *file)
756 struct i915_gpu_coredump *error;
758 error = i915_first_error_state(inode->i_private);
760 return PTR_ERR(error);
762 file->private_data = error;
766 static const struct file_operations i915_error_state_fops = {
767 .owner = THIS_MODULE,
768 .open = i915_error_state_open,
769 .read = gpu_state_read,
770 .write = i915_error_state_write,
771 .llseek = default_llseek,
772 .release = gpu_state_release,
776 static int i915_frequency_info(struct seq_file *m, void *unused)
778 struct drm_i915_private *dev_priv = node_to_i915(m->private);
779 struct intel_uncore *uncore = &dev_priv->uncore;
780 struct intel_rps *rps = &dev_priv->gt.rps;
781 intel_wakeref_t wakeref;
784 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
786 if (IS_GEN(dev_priv, 5)) {
787 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
788 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
790 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
791 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
792 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
794 seq_printf(m, "Current P-state: %d\n",
795 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
796 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
797 u32 rpmodectl, freq_sts;
799 rpmodectl = I915_READ(GEN6_RP_CONTROL);
800 seq_printf(m, "Video Turbo Mode: %s\n",
801 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
802 seq_printf(m, "HW control enabled: %s\n",
803 yesno(rpmodectl & GEN6_RP_ENABLE));
804 seq_printf(m, "SW control enabled: %s\n",
805 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
806 GEN6_RP_MEDIA_SW_MODE));
808 vlv_punit_get(dev_priv);
809 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
810 vlv_punit_put(dev_priv);
812 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
813 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
815 seq_printf(m, "actual GPU freq: %d MHz\n",
816 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
818 seq_printf(m, "current GPU freq: %d MHz\n",
819 intel_gpu_freq(rps, rps->cur_freq));
821 seq_printf(m, "max GPU freq: %d MHz\n",
822 intel_gpu_freq(rps, rps->max_freq));
824 seq_printf(m, "min GPU freq: %d MHz\n",
825 intel_gpu_freq(rps, rps->min_freq));
827 seq_printf(m, "idle GPU freq: %d MHz\n",
828 intel_gpu_freq(rps, rps->idle_freq));
831 "efficient (RPe) frequency: %d MHz\n",
832 intel_gpu_freq(rps, rps->efficient_freq));
833 } else if (INTEL_GEN(dev_priv) >= 6) {
837 u32 rpmodectl, rpinclimit, rpdeclimit;
838 u32 rpstat, cagf, reqf;
839 u32 rpupei, rpcurup, rpprevup;
840 u32 rpdownei, rpcurdown, rpprevdown;
841 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
844 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
845 if (IS_GEN9_LP(dev_priv)) {
846 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
847 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
849 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
850 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
853 /* RPSTAT1 is in the GT power well */
854 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
856 reqf = I915_READ(GEN6_RPNSWREQ);
857 if (INTEL_GEN(dev_priv) >= 9)
860 reqf &= ~GEN6_TURBO_DISABLE;
861 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
866 reqf = intel_gpu_freq(rps, reqf);
868 rpmodectl = I915_READ(GEN6_RP_CONTROL);
869 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
870 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
872 rpstat = I915_READ(GEN6_RPSTAT1);
873 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
874 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
875 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
876 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
877 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
878 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
879 cagf = intel_rps_read_actual_frequency(rps);
881 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
883 if (INTEL_GEN(dev_priv) >= 11) {
884 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
885 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
887 * The equivalent to the PM ISR & IIR cannot be read
888 * without affecting the current state of the system
892 } else if (INTEL_GEN(dev_priv) >= 8) {
893 pm_ier = I915_READ(GEN8_GT_IER(2));
894 pm_imr = I915_READ(GEN8_GT_IMR(2));
895 pm_isr = I915_READ(GEN8_GT_ISR(2));
896 pm_iir = I915_READ(GEN8_GT_IIR(2));
898 pm_ier = I915_READ(GEN6_PMIER);
899 pm_imr = I915_READ(GEN6_PMIMR);
900 pm_isr = I915_READ(GEN6_PMISR);
901 pm_iir = I915_READ(GEN6_PMIIR);
903 pm_mask = I915_READ(GEN6_PMINTRMSK);
905 seq_printf(m, "Video Turbo Mode: %s\n",
906 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
907 seq_printf(m, "HW control enabled: %s\n",
908 yesno(rpmodectl & GEN6_RP_ENABLE));
909 seq_printf(m, "SW control enabled: %s\n",
910 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
911 GEN6_RP_MEDIA_SW_MODE));
913 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
914 pm_ier, pm_imr, pm_mask);
915 if (INTEL_GEN(dev_priv) <= 10)
916 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
918 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
919 rps->pm_intrmsk_mbz);
920 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
921 seq_printf(m, "Render p-state ratio: %d\n",
922 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
923 seq_printf(m, "Render p-state VID: %d\n",
924 gt_perf_status & 0xff);
925 seq_printf(m, "Render p-state limit: %d\n",
926 rp_state_limits & 0xff);
927 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
928 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
929 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
930 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
931 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
932 seq_printf(m, "CAGF: %dMHz\n", cagf);
933 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
934 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
935 seq_printf(m, "RP CUR UP: %d (%dus)\n",
936 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
937 seq_printf(m, "RP PREV UP: %d (%dus)\n",
938 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
939 seq_printf(m, "Up threshold: %d%%\n",
940 rps->power.up_threshold);
942 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
943 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
944 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
945 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
946 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
947 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
948 seq_printf(m, "Down threshold: %d%%\n",
949 rps->power.down_threshold);
951 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
952 rp_state_cap >> 16) & 0xff;
953 max_freq *= (IS_GEN9_BC(dev_priv) ||
954 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
955 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
956 intel_gpu_freq(rps, max_freq));
958 max_freq = (rp_state_cap & 0xff00) >> 8;
959 max_freq *= (IS_GEN9_BC(dev_priv) ||
960 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
961 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
962 intel_gpu_freq(rps, max_freq));
964 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
965 rp_state_cap >> 0) & 0xff;
966 max_freq *= (IS_GEN9_BC(dev_priv) ||
967 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
968 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
969 intel_gpu_freq(rps, max_freq));
970 seq_printf(m, "Max overclocked frequency: %dMHz\n",
971 intel_gpu_freq(rps, rps->max_freq));
973 seq_printf(m, "Current freq: %d MHz\n",
974 intel_gpu_freq(rps, rps->cur_freq));
975 seq_printf(m, "Actual freq: %d MHz\n", cagf);
976 seq_printf(m, "Idle freq: %d MHz\n",
977 intel_gpu_freq(rps, rps->idle_freq));
978 seq_printf(m, "Min freq: %d MHz\n",
979 intel_gpu_freq(rps, rps->min_freq));
980 seq_printf(m, "Boost freq: %d MHz\n",
981 intel_gpu_freq(rps, rps->boost_freq));
982 seq_printf(m, "Max freq: %d MHz\n",
983 intel_gpu_freq(rps, rps->max_freq));
985 "efficient (RPe) frequency: %d MHz\n",
986 intel_gpu_freq(rps, rps->efficient_freq));
988 seq_puts(m, "no P-state info available\n");
991 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
992 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
993 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
995 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
999 static int ilk_drpc_info(struct seq_file *m)
1001 struct drm_i915_private *i915 = node_to_i915(m->private);
1002 struct intel_uncore *uncore = &i915->uncore;
1003 u32 rgvmodectl, rstdbyctl;
1006 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1007 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1008 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1010 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1011 seq_printf(m, "Boost freq: %d\n",
1012 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1013 MEMMODE_BOOST_FREQ_SHIFT);
1014 seq_printf(m, "HW control enabled: %s\n",
1015 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1016 seq_printf(m, "SW control enabled: %s\n",
1017 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1018 seq_printf(m, "Gated voltage change: %s\n",
1019 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1020 seq_printf(m, "Starting frequency: P%d\n",
1021 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1022 seq_printf(m, "Max P-state: P%d\n",
1023 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1024 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1025 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1026 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1027 seq_printf(m, "Render standby enabled: %s\n",
1028 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1029 seq_puts(m, "Current RS state: ");
1030 switch (rstdbyctl & RSX_STATUS_MASK) {
1032 seq_puts(m, "on\n");
1034 case RSX_STATUS_RC1:
1035 seq_puts(m, "RC1\n");
1037 case RSX_STATUS_RC1E:
1038 seq_puts(m, "RC1E\n");
1040 case RSX_STATUS_RS1:
1041 seq_puts(m, "RS1\n");
1043 case RSX_STATUS_RS2:
1044 seq_puts(m, "RS2 (RC6)\n");
1046 case RSX_STATUS_RS3:
1047 seq_puts(m, "RC3 (RC6+)\n");
1050 seq_puts(m, "unknown\n");
1057 static int i915_forcewake_domains(struct seq_file *m, void *data)
1059 struct drm_i915_private *i915 = node_to_i915(m->private);
1060 struct intel_uncore *uncore = &i915->uncore;
1061 struct intel_uncore_forcewake_domain *fw_domain;
1064 seq_printf(m, "user.bypass_count = %u\n",
1065 uncore->user_forcewake_count);
1067 for_each_fw_domain(fw_domain, uncore, tmp)
1068 seq_printf(m, "%s.wake_count = %u\n",
1069 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1070 READ_ONCE(fw_domain->wake_count));
1075 static void print_rc6_res(struct seq_file *m,
1077 const i915_reg_t reg)
1079 struct drm_i915_private *i915 = node_to_i915(m->private);
1080 intel_wakeref_t wakeref;
1082 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1083 seq_printf(m, "%s %u (%llu us)\n", title,
1084 intel_uncore_read(&i915->uncore, reg),
1085 intel_rc6_residency_us(&i915->gt.rc6, reg));
1088 static int vlv_drpc_info(struct seq_file *m)
1090 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1091 u32 rcctl1, pw_status;
1093 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1094 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1096 seq_printf(m, "RC6 Enabled: %s\n",
1097 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1098 GEN6_RC_CTL_EI_MODE(1))));
1099 seq_printf(m, "Render Power Well: %s\n",
1100 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1101 seq_printf(m, "Media Power Well: %s\n",
1102 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1104 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1105 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1107 return i915_forcewake_domains(m, NULL);
1110 static int gen6_drpc_info(struct seq_file *m)
1112 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1113 u32 gt_core_status, rcctl1, rc6vids = 0;
1114 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1116 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1117 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1119 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1120 if (INTEL_GEN(dev_priv) >= 9) {
1121 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1122 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1125 if (INTEL_GEN(dev_priv) <= 7)
1126 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1129 seq_printf(m, "RC1e Enabled: %s\n",
1130 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1131 seq_printf(m, "RC6 Enabled: %s\n",
1132 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1133 if (INTEL_GEN(dev_priv) >= 9) {
1134 seq_printf(m, "Render Well Gating Enabled: %s\n",
1135 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1136 seq_printf(m, "Media Well Gating Enabled: %s\n",
1137 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1139 seq_printf(m, "Deep RC6 Enabled: %s\n",
1140 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1141 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1142 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1143 seq_puts(m, "Current RC state: ");
1144 switch (gt_core_status & GEN6_RCn_MASK) {
1146 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1147 seq_puts(m, "Core Power Down\n");
1149 seq_puts(m, "on\n");
1152 seq_puts(m, "RC3\n");
1155 seq_puts(m, "RC6\n");
1158 seq_puts(m, "RC7\n");
1161 seq_puts(m, "Unknown\n");
1165 seq_printf(m, "Core Power Down: %s\n",
1166 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1167 if (INTEL_GEN(dev_priv) >= 9) {
1168 seq_printf(m, "Render Power Well: %s\n",
1169 (gen9_powergate_status &
1170 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1171 seq_printf(m, "Media Power Well: %s\n",
1172 (gen9_powergate_status &
1173 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1176 /* Not exactly sure what this is */
1177 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1178 GEN6_GT_GFX_RC6_LOCKED);
1179 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1180 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1181 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1183 if (INTEL_GEN(dev_priv) <= 7) {
1184 seq_printf(m, "RC6 voltage: %dmV\n",
1185 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1186 seq_printf(m, "RC6+ voltage: %dmV\n",
1187 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1188 seq_printf(m, "RC6++ voltage: %dmV\n",
1189 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1192 return i915_forcewake_domains(m, NULL);
1195 static int i915_drpc_info(struct seq_file *m, void *unused)
1197 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1198 intel_wakeref_t wakeref;
1201 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1202 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1203 err = vlv_drpc_info(m);
1204 else if (INTEL_GEN(dev_priv) >= 6)
1205 err = gen6_drpc_info(m);
1207 err = ilk_drpc_info(m);
1213 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1216 struct intel_rps *rps = &dev_priv->gt.rps;
1217 unsigned int max_gpu_freq, min_gpu_freq;
1218 intel_wakeref_t wakeref;
1219 int gpu_freq, ia_freq;
1221 if (!HAS_LLC(dev_priv))
1224 min_gpu_freq = rps->min_freq;
1225 max_gpu_freq = rps->max_freq;
1226 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1227 /* Convert GT frequency to 50 HZ units */
1228 min_gpu_freq /= GEN9_FREQ_SCALER;
1229 max_gpu_freq /= GEN9_FREQ_SCALER;
1232 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1234 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1235 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1237 sandybridge_pcode_read(dev_priv,
1238 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1240 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1243 (IS_GEN9_BC(dev_priv) ||
1244 INTEL_GEN(dev_priv) >= 10 ?
1245 GEN9_FREQ_SCALER : 1))),
1246 ((ia_freq >> 0) & 0xff) * 100,
1247 ((ia_freq >> 8) & 0xff) * 100);
1249 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1254 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1256 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1257 ring->space, ring->head, ring->tail, ring->emit);
1260 static int i915_context_status(struct seq_file *m, void *unused)
1262 struct drm_i915_private *i915 = node_to_i915(m->private);
1263 struct i915_gem_context *ctx, *cn;
1265 spin_lock(&i915->gem.contexts.lock);
1266 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1267 struct i915_gem_engines_iter it;
1268 struct intel_context *ce;
1270 if (!kref_get_unless_zero(&ctx->ref))
1273 spin_unlock(&i915->gem.contexts.lock);
1275 seq_puts(m, "HW context ");
1277 struct task_struct *task;
1279 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1281 seq_printf(m, "(%s [%d]) ",
1282 task->comm, task->pid);
1283 put_task_struct(task);
1285 } else if (IS_ERR(ctx->file_priv)) {
1286 seq_puts(m, "(deleted) ");
1288 seq_puts(m, "(kernel) ");
1291 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1294 for_each_gem_engine(ce,
1295 i915_gem_context_lock_engines(ctx), it) {
1296 if (intel_context_pin_if_active(ce)) {
1297 seq_printf(m, "%s: ", ce->engine->name);
1299 i915_debugfs_describe_obj(m, ce->state->obj);
1300 describe_ctx_ring(m, ce->ring);
1302 intel_context_unpin(ce);
1305 i915_gem_context_unlock_engines(ctx);
1309 spin_lock(&i915->gem.contexts.lock);
1310 list_safe_reset_next(ctx, cn, link);
1311 i915_gem_context_put(ctx);
1313 spin_unlock(&i915->gem.contexts.lock);
1318 static const char *swizzle_string(unsigned swizzle)
1321 case I915_BIT_6_SWIZZLE_NONE:
1323 case I915_BIT_6_SWIZZLE_9:
1325 case I915_BIT_6_SWIZZLE_9_10:
1326 return "bit9/bit10";
1327 case I915_BIT_6_SWIZZLE_9_11:
1328 return "bit9/bit11";
1329 case I915_BIT_6_SWIZZLE_9_10_11:
1330 return "bit9/bit10/bit11";
1331 case I915_BIT_6_SWIZZLE_9_17:
1332 return "bit9/bit17";
1333 case I915_BIT_6_SWIZZLE_9_10_17:
1334 return "bit9/bit10/bit17";
1335 case I915_BIT_6_SWIZZLE_UNKNOWN:
1342 static int i915_swizzle_info(struct seq_file *m, void *data)
1344 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1345 struct intel_uncore *uncore = &dev_priv->uncore;
1346 intel_wakeref_t wakeref;
1348 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1350 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1351 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1352 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1353 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1355 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1356 seq_printf(m, "DDC = 0x%08x\n",
1357 intel_uncore_read(uncore, DCC));
1358 seq_printf(m, "DDC2 = 0x%08x\n",
1359 intel_uncore_read(uncore, DCC2));
1360 seq_printf(m, "C0DRB3 = 0x%04x\n",
1361 intel_uncore_read16(uncore, C0DRB3));
1362 seq_printf(m, "C1DRB3 = 0x%04x\n",
1363 intel_uncore_read16(uncore, C1DRB3));
1364 } else if (INTEL_GEN(dev_priv) >= 6) {
1365 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1366 intel_uncore_read(uncore, MAD_DIMM_C0));
1367 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1368 intel_uncore_read(uncore, MAD_DIMM_C1));
1369 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1370 intel_uncore_read(uncore, MAD_DIMM_C2));
1371 seq_printf(m, "TILECTL = 0x%08x\n",
1372 intel_uncore_read(uncore, TILECTL));
1373 if (INTEL_GEN(dev_priv) >= 8)
1374 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1375 intel_uncore_read(uncore, GAMTARBMODE));
1377 seq_printf(m, "ARB_MODE = 0x%08x\n",
1378 intel_uncore_read(uncore, ARB_MODE));
1379 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1380 intel_uncore_read(uncore, DISP_ARB_CTL));
1383 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1384 seq_puts(m, "L-shaped memory detected\n");
1386 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1391 static const char *rps_power_to_str(unsigned int power)
1393 static const char * const strings[] = {
1394 [LOW_POWER] = "low power",
1395 [BETWEEN] = "mixed",
1396 [HIGH_POWER] = "high power",
1399 if (power >= ARRAY_SIZE(strings) || !strings[power])
1402 return strings[power];
1405 static int i915_rps_boost_info(struct seq_file *m, void *data)
1407 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1408 struct intel_rps *rps = &dev_priv->gt.rps;
1410 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1411 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1412 seq_printf(m, "Boosts outstanding? %d\n",
1413 atomic_read(&rps->num_waiters));
1414 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1415 seq_printf(m, "Frequency requested %d, actual %d\n",
1416 intel_gpu_freq(rps, rps->cur_freq),
1417 intel_rps_read_actual_frequency(rps));
1418 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1419 intel_gpu_freq(rps, rps->min_freq),
1420 intel_gpu_freq(rps, rps->min_freq_softlimit),
1421 intel_gpu_freq(rps, rps->max_freq_softlimit),
1422 intel_gpu_freq(rps, rps->max_freq));
1423 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1424 intel_gpu_freq(rps, rps->idle_freq),
1425 intel_gpu_freq(rps, rps->efficient_freq),
1426 intel_gpu_freq(rps, rps->boost_freq));
1428 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1430 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1432 u32 rpdown, rpdownei;
1434 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1435 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1436 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1437 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1438 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1439 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1441 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1442 rps_power_to_str(rps->power.mode));
1443 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1444 rpup && rpupei ? 100 * rpup / rpupei : 0,
1445 rps->power.up_threshold);
1446 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1447 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1448 rps->power.down_threshold);
1450 seq_puts(m, "\nRPS Autotuning inactive\n");
1456 static int i915_llc(struct seq_file *m, void *data)
1458 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1459 const bool edram = INTEL_GEN(dev_priv) > 8;
1461 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1462 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1463 dev_priv->edram_size_mb);
1468 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1470 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1471 intel_wakeref_t wakeref;
1472 struct drm_printer p;
1474 if (!HAS_GT_UC(dev_priv))
1477 p = drm_seq_file_printer(m);
1478 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1480 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1481 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1486 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1488 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1489 intel_wakeref_t wakeref;
1490 struct drm_printer p;
1492 if (!HAS_GT_UC(dev_priv))
1495 p = drm_seq_file_printer(m);
1496 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1498 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1499 u32 tmp = I915_READ(GUC_STATUS);
1502 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1503 seq_printf(m, "\tBootrom status = 0x%x\n",
1504 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1505 seq_printf(m, "\tuKernel status = 0x%x\n",
1506 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1507 seq_printf(m, "\tMIA Core status = 0x%x\n",
1508 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1509 seq_puts(m, "\nScratch registers:\n");
1510 for (i = 0; i < 16; i++) {
1511 seq_printf(m, "\t%2d: \t0x%x\n",
1512 i, I915_READ(SOFT_SCRATCH(i)));
1520 stringify_guc_log_type(enum guc_log_buffer_type type)
1523 case GUC_ISR_LOG_BUFFER:
1525 case GUC_DPC_LOG_BUFFER:
1527 case GUC_CRASH_DUMP_LOG_BUFFER:
1536 static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
1538 enum guc_log_buffer_type type;
1540 if (!intel_guc_log_relay_created(log)) {
1541 seq_puts(m, "GuC log relay not created\n");
1545 seq_puts(m, "GuC logging stats:\n");
1547 seq_printf(m, "\tRelay full count: %u\n",
1548 log->relay.full_count);
1550 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1551 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1552 stringify_guc_log_type(type),
1553 log->stats[type].flush,
1554 log->stats[type].sampled_overflow);
1558 static int i915_guc_info(struct seq_file *m, void *data)
1560 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1561 struct intel_uc *uc = &dev_priv->gt.uc;
1563 if (!intel_uc_uses_guc(uc))
1566 i915_guc_log_info(m, &uc->guc.log);
1568 /* Add more as required ... */
1573 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1575 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1576 struct intel_uc *uc = &dev_priv->gt.uc;
1577 struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
1580 if (!intel_uc_uses_guc_submission(uc))
1583 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1584 struct intel_engine_cs *engine;
1586 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1589 seq_printf(m, "GuC stage descriptor %u:\n", index);
1590 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1591 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1592 seq_printf(m, "\tPriority: %d\n", desc->priority);
1593 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1594 seq_printf(m, "\tEngines used: 0x%x\n",
1595 desc->engines_used);
1596 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1597 desc->db_trigger_phy,
1598 desc->db_trigger_cpu,
1599 desc->db_trigger_uk);
1600 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1601 desc->process_desc);
1602 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1603 desc->wq_addr, desc->wq_size);
1606 for_each_uabi_engine(engine, dev_priv) {
1607 u32 guc_engine_id = engine->guc_id;
1608 struct guc_execlist_context *lrc =
1609 &desc->lrc[guc_engine_id];
1611 seq_printf(m, "\t%s LRC:\n", engine->name);
1612 seq_printf(m, "\t\tContext desc: 0x%x\n",
1614 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1615 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1616 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1617 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1625 static int i915_guc_log_dump(struct seq_file *m, void *data)
1627 struct drm_info_node *node = m->private;
1628 struct drm_i915_private *dev_priv = node_to_i915(node);
1629 bool dump_load_err = !!node->info_ent->data;
1630 struct drm_i915_gem_object *obj = NULL;
1634 if (!HAS_GT_UC(dev_priv))
1638 obj = dev_priv->gt.uc.load_err_log;
1639 else if (dev_priv->gt.uc.guc.log.vma)
1640 obj = dev_priv->gt.uc.guc.log.vma->obj;
1645 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1647 DRM_DEBUG("Failed to pin object\n");
1648 seq_puts(m, "(log data unaccessible)\n");
1649 return PTR_ERR(log);
1652 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1653 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1654 *(log + i), *(log + i + 1),
1655 *(log + i + 2), *(log + i + 3));
1659 i915_gem_object_unpin_map(obj);
1664 static int i915_guc_log_level_get(void *data, u64 *val)
1666 struct drm_i915_private *dev_priv = data;
1667 struct intel_uc *uc = &dev_priv->gt.uc;
1669 if (!intel_uc_uses_guc(uc))
1672 *val = intel_guc_log_get_level(&uc->guc.log);
1677 static int i915_guc_log_level_set(void *data, u64 val)
1679 struct drm_i915_private *dev_priv = data;
1680 struct intel_uc *uc = &dev_priv->gt.uc;
1682 if (!intel_uc_uses_guc(uc))
1685 return intel_guc_log_set_level(&uc->guc.log, val);
1688 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1689 i915_guc_log_level_get, i915_guc_log_level_set,
1692 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1694 struct drm_i915_private *i915 = inode->i_private;
1695 struct intel_guc *guc = &i915->gt.uc.guc;
1696 struct intel_guc_log *log = &guc->log;
1698 if (!intel_guc_is_ready(guc))
1701 file->private_data = log;
1703 return intel_guc_log_relay_open(log);
1707 i915_guc_log_relay_write(struct file *filp,
1708 const char __user *ubuf,
1712 struct intel_guc_log *log = filp->private_data;
1716 ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1721 * Enable and start the guc log relay on value of 1.
1722 * Flush log relay for any other value.
1725 ret = intel_guc_log_relay_start(log);
1727 intel_guc_log_relay_flush(log);
1732 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1734 struct drm_i915_private *i915 = inode->i_private;
1735 struct intel_guc *guc = &i915->gt.uc.guc;
1737 intel_guc_log_relay_close(&guc->log);
1741 static const struct file_operations i915_guc_log_relay_fops = {
1742 .owner = THIS_MODULE,
1743 .open = i915_guc_log_relay_open,
1744 .write = i915_guc_log_relay_write,
1745 .release = i915_guc_log_relay_release,
1748 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1750 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751 struct pci_dev *pdev = dev_priv->drm.pdev;
1753 if (!HAS_RUNTIME_PM(dev_priv))
1754 seq_puts(m, "Runtime power management not supported\n");
1756 seq_printf(m, "Runtime power status: %s\n",
1757 enableddisabled(!dev_priv->power_domains.wakeref));
1759 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1760 seq_printf(m, "IRQs disabled: %s\n",
1761 yesno(!intel_irqs_enabled(dev_priv)));
1763 seq_printf(m, "Usage count: %d\n",
1764 atomic_read(&dev_priv->drm.dev->power.usage_count));
1766 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
1768 seq_printf(m, "PCI device power state: %s [%d]\n",
1769 pci_power_name(pdev->current_state),
1770 pdev->current_state);
1772 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1773 struct drm_printer p = drm_seq_file_printer(m);
1775 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1781 static int i915_engine_info(struct seq_file *m, void *unused)
1783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 struct intel_engine_cs *engine;
1785 intel_wakeref_t wakeref;
1786 struct drm_printer p;
1788 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1790 seq_printf(m, "GT awake? %s [%d]\n",
1791 yesno(dev_priv->gt.awake),
1792 atomic_read(&dev_priv->gt.wakeref.count));
1793 seq_printf(m, "CS timestamp frequency: %u kHz\n",
1794 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
1796 p = drm_seq_file_printer(m);
1797 for_each_uabi_engine(engine, dev_priv)
1798 intel_engine_dump(engine, &p, "%s\n", engine->name);
1800 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1805 static int i915_rcs_topology(struct seq_file *m, void *unused)
1807 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1808 struct drm_printer p = drm_seq_file_printer(m);
1810 intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1815 static int i915_shrinker_info(struct seq_file *m, void *unused)
1817 struct drm_i915_private *i915 = node_to_i915(m->private);
1819 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
1820 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1825 static int i915_wa_registers(struct seq_file *m, void *unused)
1827 struct drm_i915_private *i915 = node_to_i915(m->private);
1828 struct intel_engine_cs *engine;
1830 for_each_uabi_engine(engine, i915) {
1831 const struct i915_wa_list *wal = &engine->ctx_wa_list;
1832 const struct i915_wa *wa;
1839 seq_printf(m, "%s: Workarounds applied: %u\n",
1840 engine->name, count);
1842 for (wa = wal->list; count--; wa++)
1843 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1844 i915_mmio_reg_offset(wa->reg),
1847 seq_printf(m, "\n");
1854 i915_wedged_get(void *data, u64 *val)
1856 struct drm_i915_private *i915 = data;
1857 int ret = intel_gt_terminally_wedged(&i915->gt);
1872 i915_wedged_set(void *data, u64 val)
1874 struct drm_i915_private *i915 = data;
1876 /* Flush any previous reset before applying for a new one */
1877 wait_event(i915->gt.reset.queue,
1878 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1880 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1881 "Manually set wedged engine mask = %llx", val);
1885 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1886 i915_wedged_get, i915_wedged_set,
1890 i915_perf_noa_delay_set(void *data, u64 val)
1892 struct drm_i915_private *i915 = data;
1893 const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
1896 * This would lead to infinite waits as we're doing timestamp
1897 * difference on the CS with only 32bits.
1899 if (val > mul_u32_u32(U32_MAX, clk))
1902 atomic64_set(&i915->perf.noa_programming_delay, val);
1907 i915_perf_noa_delay_get(void *data, u64 *val)
1909 struct drm_i915_private *i915 = data;
1911 *val = atomic64_read(&i915->perf.noa_programming_delay);
1915 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1916 i915_perf_noa_delay_get,
1917 i915_perf_noa_delay_set,
1920 #define DROP_UNBOUND BIT(0)
1921 #define DROP_BOUND BIT(1)
1922 #define DROP_RETIRE BIT(2)
1923 #define DROP_ACTIVE BIT(3)
1924 #define DROP_FREED BIT(4)
1925 #define DROP_SHRINK_ALL BIT(5)
1926 #define DROP_IDLE BIT(6)
1927 #define DROP_RESET_ACTIVE BIT(7)
1928 #define DROP_RESET_SEQNO BIT(8)
1929 #define DROP_RCU BIT(9)
1930 #define DROP_ALL (DROP_UNBOUND | \
1937 DROP_RESET_ACTIVE | \
1938 DROP_RESET_SEQNO | \
1941 i915_drop_caches_get(void *data, u64 *val)
1948 gt_drop_caches(struct intel_gt *gt, u64 val)
1952 if (val & DROP_RESET_ACTIVE &&
1953 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1954 intel_gt_set_wedged(gt);
1956 if (val & DROP_RETIRE)
1957 intel_gt_retire_requests(gt);
1959 if (val & (DROP_IDLE | DROP_ACTIVE)) {
1960 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1965 if (val & DROP_IDLE) {
1966 ret = intel_gt_pm_wait_for_idle(gt);
1971 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1972 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1978 i915_drop_caches_set(void *data, u64 val)
1980 struct drm_i915_private *i915 = data;
1983 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1984 val, val & DROP_ALL);
1986 ret = gt_drop_caches(&i915->gt, val);
1990 fs_reclaim_acquire(GFP_KERNEL);
1991 if (val & DROP_BOUND)
1992 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1994 if (val & DROP_UNBOUND)
1995 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1997 if (val & DROP_SHRINK_ALL)
1998 i915_gem_shrink_all(i915);
1999 fs_reclaim_release(GFP_KERNEL);
2004 if (val & DROP_FREED)
2005 i915_gem_drain_freed_objects(i915);
2010 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
2011 i915_drop_caches_get, i915_drop_caches_set,
2015 i915_cache_sharing_get(void *data, u64 *val)
2017 struct drm_i915_private *dev_priv = data;
2018 intel_wakeref_t wakeref;
2021 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
2024 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2025 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2027 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2033 i915_cache_sharing_set(void *data, u64 val)
2035 struct drm_i915_private *dev_priv = data;
2036 intel_wakeref_t wakeref;
2038 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
2044 drm_dbg(&dev_priv->drm,
2045 "Manually setting uncore sharing to %llu\n", val);
2046 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2049 /* Update the cache sharing policy here as well */
2050 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2051 snpcr &= ~GEN6_MBC_SNPCR_MASK;
2052 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
2053 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2060 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
2063 int offset = slice * sseu->ss_stride;
2065 memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
2068 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2069 i915_cache_sharing_get, i915_cache_sharing_set,
2072 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
2073 struct sseu_dev_info *sseu)
2076 const int ss_max = SS_MAX;
2077 u32 sig1[SS_MAX], sig2[SS_MAX];
2080 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
2081 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
2082 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
2083 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
2085 for (ss = 0; ss < ss_max; ss++) {
2086 unsigned int eu_cnt;
2088 if (sig1[ss] & CHV_SS_PG_ENABLE)
2089 /* skip disabled subslice */
2092 sseu->slice_mask = BIT(0);
2093 sseu->subslice_mask[0] |= BIT(ss);
2094 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
2095 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
2096 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
2097 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
2098 sseu->eu_total += eu_cnt;
2099 sseu->eu_per_subslice = max_t(unsigned int,
2100 sseu->eu_per_subslice, eu_cnt);
2105 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
2106 struct sseu_dev_info *sseu)
2109 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2110 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
2113 for (s = 0; s < info->sseu.max_slices; s++) {
2115 * FIXME: Valid SS Mask respects the spec and read
2116 * only valid bits for those registers, excluding reserved
2117 * although this seems wrong because it would leave many
2118 * subslices without ACK.
2120 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
2121 GEN10_PGCTL_VALID_SS_MASK(s);
2122 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
2123 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
2126 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
2127 GEN9_PGCTL_SSA_EU19_ACK |
2128 GEN9_PGCTL_SSA_EU210_ACK |
2129 GEN9_PGCTL_SSA_EU311_ACK;
2130 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
2131 GEN9_PGCTL_SSB_EU19_ACK |
2132 GEN9_PGCTL_SSB_EU210_ACK |
2133 GEN9_PGCTL_SSB_EU311_ACK;
2135 for (s = 0; s < info->sseu.max_slices; s++) {
2136 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
2137 /* skip disabled slice */
2140 sseu->slice_mask |= BIT(s);
2141 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
2143 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
2144 unsigned int eu_cnt;
2146 if (info->sseu.has_subslice_pg &&
2147 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
2148 /* skip disabled subslice */
2151 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
2153 sseu->eu_total += eu_cnt;
2154 sseu->eu_per_subslice = max_t(unsigned int,
2155 sseu->eu_per_subslice,
2162 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
2163 struct sseu_dev_info *sseu)
2166 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2167 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
2170 for (s = 0; s < info->sseu.max_slices; s++) {
2171 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
2172 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
2173 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
2176 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
2177 GEN9_PGCTL_SSA_EU19_ACK |
2178 GEN9_PGCTL_SSA_EU210_ACK |
2179 GEN9_PGCTL_SSA_EU311_ACK;
2180 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
2181 GEN9_PGCTL_SSB_EU19_ACK |
2182 GEN9_PGCTL_SSB_EU210_ACK |
2183 GEN9_PGCTL_SSB_EU311_ACK;
2185 for (s = 0; s < info->sseu.max_slices; s++) {
2186 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
2187 /* skip disabled slice */
2190 sseu->slice_mask |= BIT(s);
2192 if (IS_GEN9_BC(dev_priv))
2193 intel_sseu_copy_subslices(&info->sseu, s,
2194 sseu->subslice_mask);
2196 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
2197 unsigned int eu_cnt;
2198 u8 ss_idx = s * info->sseu.ss_stride +
2201 if (IS_GEN9_LP(dev_priv)) {
2202 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
2203 /* skip disabled subslice */
2206 sseu->subslice_mask[ss_idx] |=
2207 BIT(ss % BITS_PER_BYTE);
2210 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
2212 sseu->eu_total += eu_cnt;
2213 sseu->eu_per_subslice = max_t(unsigned int,
2214 sseu->eu_per_subslice,
2221 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
2222 struct sseu_dev_info *sseu)
2224 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2225 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
2228 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
2230 if (sseu->slice_mask) {
2231 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
2232 for (s = 0; s < fls(sseu->slice_mask); s++)
2233 intel_sseu_copy_subslices(&info->sseu, s,
2234 sseu->subslice_mask);
2235 sseu->eu_total = sseu->eu_per_subslice *
2236 intel_sseu_subslice_total(sseu);
2238 /* subtract fused off EU(s) from enabled slice(s) */
2239 for (s = 0; s < fls(sseu->slice_mask); s++) {
2240 u8 subslice_7eu = info->sseu.subslice_7eu[s];
2242 sseu->eu_total -= hweight8(subslice_7eu);
2247 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
2248 const struct sseu_dev_info *sseu)
2250 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2251 const char *type = is_available_info ? "Available" : "Enabled";
2254 seq_printf(m, " %s Slice Mask: %04x\n", type,
2256 seq_printf(m, " %s Slice Total: %u\n", type,
2257 hweight8(sseu->slice_mask));
2258 seq_printf(m, " %s Subslice Total: %u\n", type,
2259 intel_sseu_subslice_total(sseu));
2260 for (s = 0; s < fls(sseu->slice_mask); s++) {
2261 seq_printf(m, " %s Slice%i subslices: %u\n", type,
2262 s, intel_sseu_subslices_per_slice(sseu, s));
2264 seq_printf(m, " %s EU Total: %u\n", type,
2266 seq_printf(m, " %s EU Per Subslice: %u\n", type,
2267 sseu->eu_per_subslice);
2269 if (!is_available_info)
2272 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
2273 if (HAS_POOLED_EU(dev_priv))
2274 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
2276 seq_printf(m, " Has Slice Power Gating: %s\n",
2277 yesno(sseu->has_slice_pg));
2278 seq_printf(m, " Has Subslice Power Gating: %s\n",
2279 yesno(sseu->has_subslice_pg));
2280 seq_printf(m, " Has EU Power Gating: %s\n",
2281 yesno(sseu->has_eu_pg));
2284 static int i915_sseu_status(struct seq_file *m, void *unused)
2286 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2287 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2288 struct sseu_dev_info sseu;
2289 intel_wakeref_t wakeref;
2291 if (INTEL_GEN(dev_priv) < 8)
2294 seq_puts(m, "SSEU Device Info\n");
2295 i915_print_sseu_info(m, true, &info->sseu);
2297 seq_puts(m, "SSEU Device Status\n");
2298 memset(&sseu, 0, sizeof(sseu));
2299 intel_sseu_set_info(&sseu, info->sseu.max_slices,
2300 info->sseu.max_subslices,
2301 info->sseu.max_eus_per_subslice);
2303 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2304 if (IS_CHERRYVIEW(dev_priv))
2305 cherryview_sseu_device_status(dev_priv, &sseu);
2306 else if (IS_BROADWELL(dev_priv))
2307 bdw_sseu_device_status(dev_priv, &sseu);
2308 else if (IS_GEN(dev_priv, 9))
2309 gen9_sseu_device_status(dev_priv, &sseu);
2310 else if (INTEL_GEN(dev_priv) >= 10)
2311 gen10_sseu_device_status(dev_priv, &sseu);
2314 i915_print_sseu_info(m, false, &sseu);
2319 static int i915_forcewake_open(struct inode *inode, struct file *file)
2321 struct drm_i915_private *i915 = inode->i_private;
2322 struct intel_gt *gt = &i915->gt;
2324 atomic_inc(>->user_wakeref);
2325 intel_gt_pm_get(gt);
2326 if (INTEL_GEN(i915) >= 6)
2327 intel_uncore_forcewake_user_get(gt->uncore);
2332 static int i915_forcewake_release(struct inode *inode, struct file *file)
2334 struct drm_i915_private *i915 = inode->i_private;
2335 struct intel_gt *gt = &i915->gt;
2337 if (INTEL_GEN(i915) >= 6)
2338 intel_uncore_forcewake_user_put(&i915->uncore);
2339 intel_gt_pm_put(gt);
2340 atomic_dec(>->user_wakeref);
2345 static const struct file_operations i915_forcewake_fops = {
2346 .owner = THIS_MODULE,
2347 .open = i915_forcewake_open,
2348 .release = i915_forcewake_release,
2351 static const struct drm_info_list i915_debugfs_list[] = {
2352 {"i915_capabilities", i915_capabilities, 0},
2353 {"i915_gem_objects", i915_gem_object_info, 0},
2354 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2355 {"i915_gem_interrupt", i915_interrupt_info, 0},
2356 {"i915_guc_info", i915_guc_info, 0},
2357 {"i915_guc_load_status", i915_guc_load_status_info, 0},
2358 {"i915_guc_log_dump", i915_guc_log_dump, 0},
2359 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
2360 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
2361 {"i915_huc_load_status", i915_huc_load_status_info, 0},
2362 {"i915_frequency_info", i915_frequency_info, 0},
2363 {"i915_drpc_info", i915_drpc_info, 0},
2364 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2365 {"i915_context_status", i915_context_status, 0},
2366 {"i915_forcewake_domains", i915_forcewake_domains, 0},
2367 {"i915_swizzle_info", i915_swizzle_info, 0},
2368 {"i915_llc", i915_llc, 0},
2369 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
2370 {"i915_engine_info", i915_engine_info, 0},
2371 {"i915_rcs_topology", i915_rcs_topology, 0},
2372 {"i915_shrinker_info", i915_shrinker_info, 0},
2373 {"i915_wa_registers", i915_wa_registers, 0},
2374 {"i915_sseu_status", i915_sseu_status, 0},
2375 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2377 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2379 static const struct i915_debugfs_files {
2381 const struct file_operations *fops;
2382 } i915_debugfs_files[] = {
2383 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
2384 {"i915_wedged", &i915_wedged_fops},
2385 {"i915_cache_sharing", &i915_cache_sharing_fops},
2386 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2387 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2388 {"i915_error_state", &i915_error_state_fops},
2389 {"i915_gpu_info", &i915_gpu_info_fops},
2391 {"i915_guc_log_level", &i915_guc_log_level_fops},
2392 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
2395 int i915_debugfs_register(struct drm_i915_private *dev_priv)
2397 struct drm_minor *minor = dev_priv->drm.primary;
2400 i915_debugfs_params(dev_priv);
2402 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
2403 to_i915(minor->dev), &i915_forcewake_fops);
2404 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2405 debugfs_create_file(i915_debugfs_files[i].name,
2407 minor->debugfs_root,
2408 to_i915(minor->dev),
2409 i915_debugfs_files[i].fops);
2412 return drm_debugfs_create_files(i915_debugfs_list,
2413 I915_DEBUGFS_ENTRIES,
2414 minor->debugfs_root, minor);