drm/i915: Streamline skl_commit_modeset_enables()
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
41
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_gt_requests.h"
45 #include "gt/intel_reset.h"
46 #include "gt/intel_rc6.h"
47 #include "gt/intel_rps.h"
48 #include "gt/uc/intel_guc_submission.h"
49
50 #include "i915_debugfs.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_csr.h"
54 #include "intel_pm.h"
55 #include "intel_sideband.h"
56
57 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
58 {
59         return to_i915(node->minor->dev);
60 }
61
62 static int i915_capabilities(struct seq_file *m, void *data)
63 {
64         struct drm_i915_private *i915 = node_to_i915(m->private);
65         struct drm_printer p = drm_seq_file_printer(m);
66
67         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
68
69         intel_device_info_print_static(INTEL_INFO(i915), &p);
70         intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
71         intel_driver_caps_print(&i915->caps, &p);
72
73         kernel_param_lock(THIS_MODULE);
74         i915_params_dump(&i915_modparams, &p);
75         kernel_param_unlock(THIS_MODULE);
76
77         return 0;
78 }
79
80 static char get_tiling_flag(struct drm_i915_gem_object *obj)
81 {
82         switch (i915_gem_object_get_tiling(obj)) {
83         default:
84         case I915_TILING_NONE: return ' ';
85         case I915_TILING_X: return 'X';
86         case I915_TILING_Y: return 'Y';
87         }
88 }
89
90 static char get_global_flag(struct drm_i915_gem_object *obj)
91 {
92         return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
93 }
94
95 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
96 {
97         return obj->mm.mapping ? 'M' : ' ';
98 }
99
100 static const char *
101 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
102 {
103         size_t x = 0;
104
105         switch (page_sizes) {
106         case 0:
107                 return "";
108         case I915_GTT_PAGE_SIZE_4K:
109                 return "4K";
110         case I915_GTT_PAGE_SIZE_64K:
111                 return "64K";
112         case I915_GTT_PAGE_SIZE_2M:
113                 return "2M";
114         default:
115                 if (!buf)
116                         return "M";
117
118                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
119                         x += snprintf(buf + x, len - x, "2M, ");
120                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
121                         x += snprintf(buf + x, len - x, "64K, ");
122                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
123                         x += snprintf(buf + x, len - x, "4K, ");
124                 buf[x-2] = '\0';
125
126                 return buf;
127         }
128 }
129
130 static void
131 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
132 {
133         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
134         struct intel_engine_cs *engine;
135         struct i915_vma *vma;
136         int pin_count = 0;
137
138         seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
139                    &obj->base,
140                    get_tiling_flag(obj),
141                    get_global_flag(obj),
142                    get_pin_mapped_flag(obj),
143                    obj->base.size / 1024,
144                    obj->read_domains,
145                    obj->write_domain,
146                    i915_cache_level_str(dev_priv, obj->cache_level),
147                    obj->mm.dirty ? " dirty" : "",
148                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
149         if (obj->base.name)
150                 seq_printf(m, " (name: %d)", obj->base.name);
151
152         spin_lock(&obj->vma.lock);
153         list_for_each_entry(vma, &obj->vma.list, obj_link) {
154                 if (!drm_mm_node_allocated(&vma->node))
155                         continue;
156
157                 spin_unlock(&obj->vma.lock);
158
159                 if (i915_vma_is_pinned(vma))
160                         pin_count++;
161
162                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
163                            i915_vma_is_ggtt(vma) ? "g" : "pp",
164                            vma->node.start, vma->node.size,
165                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
166                 if (i915_vma_is_ggtt(vma)) {
167                         switch (vma->ggtt_view.type) {
168                         case I915_GGTT_VIEW_NORMAL:
169                                 seq_puts(m, ", normal");
170                                 break;
171
172                         case I915_GGTT_VIEW_PARTIAL:
173                                 seq_printf(m, ", partial [%08llx+%x]",
174                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
175                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
176                                 break;
177
178                         case I915_GGTT_VIEW_ROTATED:
179                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
180                                            vma->ggtt_view.rotated.plane[0].width,
181                                            vma->ggtt_view.rotated.plane[0].height,
182                                            vma->ggtt_view.rotated.plane[0].stride,
183                                            vma->ggtt_view.rotated.plane[0].offset,
184                                            vma->ggtt_view.rotated.plane[1].width,
185                                            vma->ggtt_view.rotated.plane[1].height,
186                                            vma->ggtt_view.rotated.plane[1].stride,
187                                            vma->ggtt_view.rotated.plane[1].offset);
188                                 break;
189
190                         case I915_GGTT_VIEW_REMAPPED:
191                                 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192                                            vma->ggtt_view.remapped.plane[0].width,
193                                            vma->ggtt_view.remapped.plane[0].height,
194                                            vma->ggtt_view.remapped.plane[0].stride,
195                                            vma->ggtt_view.remapped.plane[0].offset,
196                                            vma->ggtt_view.remapped.plane[1].width,
197                                            vma->ggtt_view.remapped.plane[1].height,
198                                            vma->ggtt_view.remapped.plane[1].stride,
199                                            vma->ggtt_view.remapped.plane[1].offset);
200                                 break;
201
202                         default:
203                                 MISSING_CASE(vma->ggtt_view.type);
204                                 break;
205                         }
206                 }
207                 if (vma->fence)
208                         seq_printf(m, " , fence: %d", vma->fence->id);
209                 seq_puts(m, ")");
210
211                 spin_lock(&obj->vma.lock);
212         }
213         spin_unlock(&obj->vma.lock);
214
215         seq_printf(m, " (pinned x %d)", pin_count);
216         if (obj->stolen)
217                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
218         if (i915_gem_object_is_framebuffer(obj))
219                 seq_printf(m, " (fb)");
220
221         engine = i915_gem_object_last_write_engine(obj);
222         if (engine)
223                 seq_printf(m, " (%s)", engine->name);
224 }
225
226 struct file_stats {
227         struct i915_address_space *vm;
228         unsigned long count;
229         u64 total, unbound;
230         u64 active, inactive;
231         u64 closed;
232 };
233
234 static int per_file_stats(int id, void *ptr, void *data)
235 {
236         struct drm_i915_gem_object *obj = ptr;
237         struct file_stats *stats = data;
238         struct i915_vma *vma;
239
240         if (!kref_get_unless_zero(&obj->base.refcount))
241                 return 0;
242
243         stats->count++;
244         stats->total += obj->base.size;
245         if (!atomic_read(&obj->bind_count))
246                 stats->unbound += obj->base.size;
247
248         spin_lock(&obj->vma.lock);
249         if (!stats->vm) {
250                 for_each_ggtt_vma(vma, obj) {
251                         if (!drm_mm_node_allocated(&vma->node))
252                                 continue;
253
254                         if (i915_vma_is_active(vma))
255                                 stats->active += vma->node.size;
256                         else
257                                 stats->inactive += vma->node.size;
258
259                         if (i915_vma_is_closed(vma))
260                                 stats->closed += vma->node.size;
261                 }
262         } else {
263                 struct rb_node *p = obj->vma.tree.rb_node;
264
265                 while (p) {
266                         long cmp;
267
268                         vma = rb_entry(p, typeof(*vma), obj_node);
269                         cmp = i915_vma_compare(vma, stats->vm, NULL);
270                         if (cmp == 0) {
271                                 if (drm_mm_node_allocated(&vma->node)) {
272                                         if (i915_vma_is_active(vma))
273                                                 stats->active += vma->node.size;
274                                         else
275                                                 stats->inactive += vma->node.size;
276
277                                         if (i915_vma_is_closed(vma))
278                                                 stats->closed += vma->node.size;
279                                 }
280                                 break;
281                         }
282                         if (cmp < 0)
283                                 p = p->rb_right;
284                         else
285                                 p = p->rb_left;
286                 }
287         }
288         spin_unlock(&obj->vma.lock);
289
290         i915_gem_object_put(obj);
291         return 0;
292 }
293
294 #define print_file_stats(m, name, stats) do { \
295         if (stats.count) \
296                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
297                            name, \
298                            stats.count, \
299                            stats.total, \
300                            stats.active, \
301                            stats.inactive, \
302                            stats.unbound, \
303                            stats.closed); \
304 } while (0)
305
306 static void print_context_stats(struct seq_file *m,
307                                 struct drm_i915_private *i915)
308 {
309         struct file_stats kstats = {};
310         struct i915_gem_context *ctx, *cn;
311
312         spin_lock(&i915->gem.contexts.lock);
313         list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
314                 struct i915_gem_engines_iter it;
315                 struct intel_context *ce;
316
317                 if (!kref_get_unless_zero(&ctx->ref))
318                         continue;
319
320                 spin_unlock(&i915->gem.contexts.lock);
321
322                 for_each_gem_engine(ce,
323                                     i915_gem_context_lock_engines(ctx), it) {
324                         intel_context_lock_pinned(ce);
325                         if (intel_context_is_pinned(ce)) {
326                                 rcu_read_lock();
327                                 if (ce->state)
328                                         per_file_stats(0,
329                                                        ce->state->obj, &kstats);
330                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
331                                 rcu_read_unlock();
332                         }
333                         intel_context_unlock_pinned(ce);
334                 }
335                 i915_gem_context_unlock_engines(ctx);
336
337                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
338                         struct file_stats stats = {
339                                 .vm = rcu_access_pointer(ctx->vm),
340                         };
341                         struct drm_file *file = ctx->file_priv->file;
342                         struct task_struct *task;
343                         char name[80];
344
345                         rcu_read_lock();
346                         idr_for_each(&file->object_idr, per_file_stats, &stats);
347                         rcu_read_unlock();
348
349                         rcu_read_lock();
350                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
351                         snprintf(name, sizeof(name), "%s",
352                                  task ? task->comm : "<unknown>");
353                         rcu_read_unlock();
354
355                         print_file_stats(m, name, stats);
356                 }
357
358                 spin_lock(&i915->gem.contexts.lock);
359                 list_safe_reset_next(ctx, cn, link);
360                 i915_gem_context_put(ctx);
361         }
362         spin_unlock(&i915->gem.contexts.lock);
363
364         print_file_stats(m, "[k]contexts", kstats);
365 }
366
367 static int i915_gem_object_info(struct seq_file *m, void *data)
368 {
369         struct drm_i915_private *i915 = node_to_i915(m->private);
370
371         seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
372                    i915->mm.shrink_count,
373                    atomic_read(&i915->mm.free_count),
374                    i915->mm.shrink_memory);
375
376         seq_putc(m, '\n');
377
378         print_context_stats(m, i915);
379
380         return 0;
381 }
382
383 static void gen8_display_interrupt_info(struct seq_file *m)
384 {
385         struct drm_i915_private *dev_priv = node_to_i915(m->private);
386         enum pipe pipe;
387
388         for_each_pipe(dev_priv, pipe) {
389                 enum intel_display_power_domain power_domain;
390                 intel_wakeref_t wakeref;
391
392                 power_domain = POWER_DOMAIN_PIPE(pipe);
393                 wakeref = intel_display_power_get_if_enabled(dev_priv,
394                                                              power_domain);
395                 if (!wakeref) {
396                         seq_printf(m, "Pipe %c power disabled\n",
397                                    pipe_name(pipe));
398                         continue;
399                 }
400                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
401                            pipe_name(pipe),
402                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
403                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
404                            pipe_name(pipe),
405                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
406                 seq_printf(m, "Pipe %c IER:\t%08x\n",
407                            pipe_name(pipe),
408                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
409
410                 intel_display_power_put(dev_priv, power_domain, wakeref);
411         }
412
413         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
414                    I915_READ(GEN8_DE_PORT_IMR));
415         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
416                    I915_READ(GEN8_DE_PORT_IIR));
417         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
418                    I915_READ(GEN8_DE_PORT_IER));
419
420         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
421                    I915_READ(GEN8_DE_MISC_IMR));
422         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
423                    I915_READ(GEN8_DE_MISC_IIR));
424         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
425                    I915_READ(GEN8_DE_MISC_IER));
426
427         seq_printf(m, "PCU interrupt mask:\t%08x\n",
428                    I915_READ(GEN8_PCU_IMR));
429         seq_printf(m, "PCU interrupt identity:\t%08x\n",
430                    I915_READ(GEN8_PCU_IIR));
431         seq_printf(m, "PCU interrupt enable:\t%08x\n",
432                    I915_READ(GEN8_PCU_IER));
433 }
434
435 static int i915_interrupt_info(struct seq_file *m, void *data)
436 {
437         struct drm_i915_private *dev_priv = node_to_i915(m->private);
438         struct intel_engine_cs *engine;
439         intel_wakeref_t wakeref;
440         int i, pipe;
441
442         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
443
444         if (IS_CHERRYVIEW(dev_priv)) {
445                 intel_wakeref_t pref;
446
447                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
448                            I915_READ(GEN8_MASTER_IRQ));
449
450                 seq_printf(m, "Display IER:\t%08x\n",
451                            I915_READ(VLV_IER));
452                 seq_printf(m, "Display IIR:\t%08x\n",
453                            I915_READ(VLV_IIR));
454                 seq_printf(m, "Display IIR_RW:\t%08x\n",
455                            I915_READ(VLV_IIR_RW));
456                 seq_printf(m, "Display IMR:\t%08x\n",
457                            I915_READ(VLV_IMR));
458                 for_each_pipe(dev_priv, pipe) {
459                         enum intel_display_power_domain power_domain;
460
461                         power_domain = POWER_DOMAIN_PIPE(pipe);
462                         pref = intel_display_power_get_if_enabled(dev_priv,
463                                                                   power_domain);
464                         if (!pref) {
465                                 seq_printf(m, "Pipe %c power disabled\n",
466                                            pipe_name(pipe));
467                                 continue;
468                         }
469
470                         seq_printf(m, "Pipe %c stat:\t%08x\n",
471                                    pipe_name(pipe),
472                                    I915_READ(PIPESTAT(pipe)));
473
474                         intel_display_power_put(dev_priv, power_domain, pref);
475                 }
476
477                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
478                 seq_printf(m, "Port hotplug:\t%08x\n",
479                            I915_READ(PORT_HOTPLUG_EN));
480                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
481                            I915_READ(VLV_DPFLIPSTAT));
482                 seq_printf(m, "DPINVGTT:\t%08x\n",
483                            I915_READ(DPINVGTT));
484                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
485
486                 for (i = 0; i < 4; i++) {
487                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
488                                    i, I915_READ(GEN8_GT_IMR(i)));
489                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
490                                    i, I915_READ(GEN8_GT_IIR(i)));
491                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
492                                    i, I915_READ(GEN8_GT_IER(i)));
493                 }
494
495                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
496                            I915_READ(GEN8_PCU_IMR));
497                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
498                            I915_READ(GEN8_PCU_IIR));
499                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
500                            I915_READ(GEN8_PCU_IER));
501         } else if (INTEL_GEN(dev_priv) >= 11) {
502                 seq_printf(m, "Master Interrupt Control:  %08x\n",
503                            I915_READ(GEN11_GFX_MSTR_IRQ));
504
505                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
506                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
507                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
508                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
509                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
510                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
511                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
512                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
513                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
514                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
515                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
516                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
517
518                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
519                            I915_READ(GEN11_DISPLAY_INT_CTL));
520
521                 gen8_display_interrupt_info(m);
522         } else if (INTEL_GEN(dev_priv) >= 8) {
523                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
524                            I915_READ(GEN8_MASTER_IRQ));
525
526                 for (i = 0; i < 4; i++) {
527                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
528                                    i, I915_READ(GEN8_GT_IMR(i)));
529                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
530                                    i, I915_READ(GEN8_GT_IIR(i)));
531                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
532                                    i, I915_READ(GEN8_GT_IER(i)));
533                 }
534
535                 gen8_display_interrupt_info(m);
536         } else if (IS_VALLEYVIEW(dev_priv)) {
537                 intel_wakeref_t pref;
538
539                 seq_printf(m, "Display IER:\t%08x\n",
540                            I915_READ(VLV_IER));
541                 seq_printf(m, "Display IIR:\t%08x\n",
542                            I915_READ(VLV_IIR));
543                 seq_printf(m, "Display IIR_RW:\t%08x\n",
544                            I915_READ(VLV_IIR_RW));
545                 seq_printf(m, "Display IMR:\t%08x\n",
546                            I915_READ(VLV_IMR));
547                 for_each_pipe(dev_priv, pipe) {
548                         enum intel_display_power_domain power_domain;
549
550                         power_domain = POWER_DOMAIN_PIPE(pipe);
551                         pref = intel_display_power_get_if_enabled(dev_priv,
552                                                                   power_domain);
553                         if (!pref) {
554                                 seq_printf(m, "Pipe %c power disabled\n",
555                                            pipe_name(pipe));
556                                 continue;
557                         }
558
559                         seq_printf(m, "Pipe %c stat:\t%08x\n",
560                                    pipe_name(pipe),
561                                    I915_READ(PIPESTAT(pipe)));
562                         intel_display_power_put(dev_priv, power_domain, pref);
563                 }
564
565                 seq_printf(m, "Master IER:\t%08x\n",
566                            I915_READ(VLV_MASTER_IER));
567
568                 seq_printf(m, "Render IER:\t%08x\n",
569                            I915_READ(GTIER));
570                 seq_printf(m, "Render IIR:\t%08x\n",
571                            I915_READ(GTIIR));
572                 seq_printf(m, "Render IMR:\t%08x\n",
573                            I915_READ(GTIMR));
574
575                 seq_printf(m, "PM IER:\t\t%08x\n",
576                            I915_READ(GEN6_PMIER));
577                 seq_printf(m, "PM IIR:\t\t%08x\n",
578                            I915_READ(GEN6_PMIIR));
579                 seq_printf(m, "PM IMR:\t\t%08x\n",
580                            I915_READ(GEN6_PMIMR));
581
582                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
583                 seq_printf(m, "Port hotplug:\t%08x\n",
584                            I915_READ(PORT_HOTPLUG_EN));
585                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
586                            I915_READ(VLV_DPFLIPSTAT));
587                 seq_printf(m, "DPINVGTT:\t%08x\n",
588                            I915_READ(DPINVGTT));
589                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
590
591         } else if (!HAS_PCH_SPLIT(dev_priv)) {
592                 seq_printf(m, "Interrupt enable:    %08x\n",
593                            I915_READ(GEN2_IER));
594                 seq_printf(m, "Interrupt identity:  %08x\n",
595                            I915_READ(GEN2_IIR));
596                 seq_printf(m, "Interrupt mask:      %08x\n",
597                            I915_READ(GEN2_IMR));
598                 for_each_pipe(dev_priv, pipe)
599                         seq_printf(m, "Pipe %c stat:         %08x\n",
600                                    pipe_name(pipe),
601                                    I915_READ(PIPESTAT(pipe)));
602         } else {
603                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
604                            I915_READ(DEIER));
605                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
606                            I915_READ(DEIIR));
607                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
608                            I915_READ(DEIMR));
609                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
610                            I915_READ(SDEIER));
611                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
612                            I915_READ(SDEIIR));
613                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
614                            I915_READ(SDEIMR));
615                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
616                            I915_READ(GTIER));
617                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
618                            I915_READ(GTIIR));
619                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
620                            I915_READ(GTIMR));
621         }
622
623         if (INTEL_GEN(dev_priv) >= 11) {
624                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
625                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
626                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
627                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
628                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
629                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
630                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
631                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
632                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
633                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
634                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
635                            I915_READ(GEN11_GUC_SG_INTR_MASK));
636                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
637                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
638                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
639                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
640                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
641                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
642
643         } else if (INTEL_GEN(dev_priv) >= 6) {
644                 for_each_uabi_engine(engine, dev_priv) {
645                         seq_printf(m,
646                                    "Graphics Interrupt mask (%s):       %08x\n",
647                                    engine->name, ENGINE_READ(engine, RING_IMR));
648                 }
649         }
650
651         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
652
653         return 0;
654 }
655
656 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
657 {
658         struct drm_i915_private *i915 = node_to_i915(m->private);
659         unsigned int i;
660
661         seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
662
663         rcu_read_lock();
664         for (i = 0; i < i915->ggtt.num_fences; i++) {
665                 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
666                 struct i915_vma *vma = reg->vma;
667
668                 seq_printf(m, "Fence %d, pin count = %d, object = ",
669                            i, atomic_read(&reg->pin_count));
670                 if (!vma)
671                         seq_puts(m, "unused");
672                 else
673                         describe_obj(m, vma->obj);
674                 seq_putc(m, '\n');
675         }
676         rcu_read_unlock();
677
678         return 0;
679 }
680
681 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
682 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
683                               size_t count, loff_t *pos)
684 {
685         struct i915_gpu_state *error;
686         ssize_t ret;
687         void *buf;
688
689         error = file->private_data;
690         if (!error)
691                 return 0;
692
693         /* Bounce buffer required because of kernfs __user API convenience. */
694         buf = kmalloc(count, GFP_KERNEL);
695         if (!buf)
696                 return -ENOMEM;
697
698         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
699         if (ret <= 0)
700                 goto out;
701
702         if (!copy_to_user(ubuf, buf, ret))
703                 *pos += ret;
704         else
705                 ret = -EFAULT;
706
707 out:
708         kfree(buf);
709         return ret;
710 }
711
712 static int gpu_state_release(struct inode *inode, struct file *file)
713 {
714         i915_gpu_state_put(file->private_data);
715         return 0;
716 }
717
718 static int i915_gpu_info_open(struct inode *inode, struct file *file)
719 {
720         struct drm_i915_private *i915 = inode->i_private;
721         struct i915_gpu_state *gpu;
722         intel_wakeref_t wakeref;
723
724         gpu = NULL;
725         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
726                 gpu = i915_capture_gpu_state(i915);
727         if (IS_ERR(gpu))
728                 return PTR_ERR(gpu);
729
730         file->private_data = gpu;
731         return 0;
732 }
733
734 static const struct file_operations i915_gpu_info_fops = {
735         .owner = THIS_MODULE,
736         .open = i915_gpu_info_open,
737         .read = gpu_state_read,
738         .llseek = default_llseek,
739         .release = gpu_state_release,
740 };
741
742 static ssize_t
743 i915_error_state_write(struct file *filp,
744                        const char __user *ubuf,
745                        size_t cnt,
746                        loff_t *ppos)
747 {
748         struct i915_gpu_state *error = filp->private_data;
749
750         if (!error)
751                 return 0;
752
753         DRM_DEBUG_DRIVER("Resetting error state\n");
754         i915_reset_error_state(error->i915);
755
756         return cnt;
757 }
758
759 static int i915_error_state_open(struct inode *inode, struct file *file)
760 {
761         struct i915_gpu_state *error;
762
763         error = i915_first_error_state(inode->i_private);
764         if (IS_ERR(error))
765                 return PTR_ERR(error);
766
767         file->private_data  = error;
768         return 0;
769 }
770
771 static const struct file_operations i915_error_state_fops = {
772         .owner = THIS_MODULE,
773         .open = i915_error_state_open,
774         .read = gpu_state_read,
775         .write = i915_error_state_write,
776         .llseek = default_llseek,
777         .release = gpu_state_release,
778 };
779 #endif
780
781 static int i915_frequency_info(struct seq_file *m, void *unused)
782 {
783         struct drm_i915_private *dev_priv = node_to_i915(m->private);
784         struct intel_uncore *uncore = &dev_priv->uncore;
785         struct intel_rps *rps = &dev_priv->gt.rps;
786         intel_wakeref_t wakeref;
787         int ret = 0;
788
789         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
790
791         if (IS_GEN(dev_priv, 5)) {
792                 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
793                 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
794
795                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
796                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
797                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
798                            MEMSTAT_VID_SHIFT);
799                 seq_printf(m, "Current P-state: %d\n",
800                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
801         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
802                 u32 rpmodectl, freq_sts;
803
804                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
805                 seq_printf(m, "Video Turbo Mode: %s\n",
806                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
807                 seq_printf(m, "HW control enabled: %s\n",
808                            yesno(rpmodectl & GEN6_RP_ENABLE));
809                 seq_printf(m, "SW control enabled: %s\n",
810                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
811                                   GEN6_RP_MEDIA_SW_MODE));
812
813                 vlv_punit_get(dev_priv);
814                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
815                 vlv_punit_put(dev_priv);
816
817                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
818                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
819
820                 seq_printf(m, "actual GPU freq: %d MHz\n",
821                            intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
822
823                 seq_printf(m, "current GPU freq: %d MHz\n",
824                            intel_gpu_freq(rps, rps->cur_freq));
825
826                 seq_printf(m, "max GPU freq: %d MHz\n",
827                            intel_gpu_freq(rps, rps->max_freq));
828
829                 seq_printf(m, "min GPU freq: %d MHz\n",
830                            intel_gpu_freq(rps, rps->min_freq));
831
832                 seq_printf(m, "idle GPU freq: %d MHz\n",
833                            intel_gpu_freq(rps, rps->idle_freq));
834
835                 seq_printf(m,
836                            "efficient (RPe) frequency: %d MHz\n",
837                            intel_gpu_freq(rps, rps->efficient_freq));
838         } else if (INTEL_GEN(dev_priv) >= 6) {
839                 u32 rp_state_limits;
840                 u32 gt_perf_status;
841                 u32 rp_state_cap;
842                 u32 rpmodectl, rpinclimit, rpdeclimit;
843                 u32 rpstat, cagf, reqf;
844                 u32 rpupei, rpcurup, rpprevup;
845                 u32 rpdownei, rpcurdown, rpprevdown;
846                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
847                 int max_freq;
848
849                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
850                 if (IS_GEN9_LP(dev_priv)) {
851                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
852                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
853                 } else {
854                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
855                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
856                 }
857
858                 /* RPSTAT1 is in the GT power well */
859                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
860
861                 reqf = I915_READ(GEN6_RPNSWREQ);
862                 if (INTEL_GEN(dev_priv) >= 9)
863                         reqf >>= 23;
864                 else {
865                         reqf &= ~GEN6_TURBO_DISABLE;
866                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
867                                 reqf >>= 24;
868                         else
869                                 reqf >>= 25;
870                 }
871                 reqf = intel_gpu_freq(rps, reqf);
872
873                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
874                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
875                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
876
877                 rpstat = I915_READ(GEN6_RPSTAT1);
878                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
879                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
880                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
881                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
882                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
883                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
884                 cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
885
886                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
887
888                 if (INTEL_GEN(dev_priv) >= 11) {
889                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
890                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
891                         /*
892                          * The equivalent to the PM ISR & IIR cannot be read
893                          * without affecting the current state of the system
894                          */
895                         pm_isr = 0;
896                         pm_iir = 0;
897                 } else if (INTEL_GEN(dev_priv) >= 8) {
898                         pm_ier = I915_READ(GEN8_GT_IER(2));
899                         pm_imr = I915_READ(GEN8_GT_IMR(2));
900                         pm_isr = I915_READ(GEN8_GT_ISR(2));
901                         pm_iir = I915_READ(GEN8_GT_IIR(2));
902                 } else {
903                         pm_ier = I915_READ(GEN6_PMIER);
904                         pm_imr = I915_READ(GEN6_PMIMR);
905                         pm_isr = I915_READ(GEN6_PMISR);
906                         pm_iir = I915_READ(GEN6_PMIIR);
907                 }
908                 pm_mask = I915_READ(GEN6_PMINTRMSK);
909
910                 seq_printf(m, "Video Turbo Mode: %s\n",
911                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
912                 seq_printf(m, "HW control enabled: %s\n",
913                            yesno(rpmodectl & GEN6_RP_ENABLE));
914                 seq_printf(m, "SW control enabled: %s\n",
915                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
916                                   GEN6_RP_MEDIA_SW_MODE));
917
918                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
919                            pm_ier, pm_imr, pm_mask);
920                 if (INTEL_GEN(dev_priv) <= 10)
921                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
922                                    pm_isr, pm_iir);
923                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
924                            rps->pm_intrmsk_mbz);
925                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
926                 seq_printf(m, "Render p-state ratio: %d\n",
927                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
928                 seq_printf(m, "Render p-state VID: %d\n",
929                            gt_perf_status & 0xff);
930                 seq_printf(m, "Render p-state limit: %d\n",
931                            rp_state_limits & 0xff);
932                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
933                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
934                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
935                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
936                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
937                 seq_printf(m, "CAGF: %dMHz\n", cagf);
938                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
939                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
940                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
941                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
942                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
943                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
944                 seq_printf(m, "Up threshold: %d%%\n",
945                            rps->power.up_threshold);
946
947                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
948                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
949                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
950                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
951                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
952                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
953                 seq_printf(m, "Down threshold: %d%%\n",
954                            rps->power.down_threshold);
955
956                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
957                             rp_state_cap >> 16) & 0xff;
958                 max_freq *= (IS_GEN9_BC(dev_priv) ||
959                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
960                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
961                            intel_gpu_freq(rps, max_freq));
962
963                 max_freq = (rp_state_cap & 0xff00) >> 8;
964                 max_freq *= (IS_GEN9_BC(dev_priv) ||
965                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
966                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
967                            intel_gpu_freq(rps, max_freq));
968
969                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
970                             rp_state_cap >> 0) & 0xff;
971                 max_freq *= (IS_GEN9_BC(dev_priv) ||
972                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
973                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
974                            intel_gpu_freq(rps, max_freq));
975                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
976                            intel_gpu_freq(rps, rps->max_freq));
977
978                 seq_printf(m, "Current freq: %d MHz\n",
979                            intel_gpu_freq(rps, rps->cur_freq));
980                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
981                 seq_printf(m, "Idle freq: %d MHz\n",
982                            intel_gpu_freq(rps, rps->idle_freq));
983                 seq_printf(m, "Min freq: %d MHz\n",
984                            intel_gpu_freq(rps, rps->min_freq));
985                 seq_printf(m, "Boost freq: %d MHz\n",
986                            intel_gpu_freq(rps, rps->boost_freq));
987                 seq_printf(m, "Max freq: %d MHz\n",
988                            intel_gpu_freq(rps, rps->max_freq));
989                 seq_printf(m,
990                            "efficient (RPe) frequency: %d MHz\n",
991                            intel_gpu_freq(rps, rps->efficient_freq));
992         } else {
993                 seq_puts(m, "no P-state info available\n");
994         }
995
996         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
997         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
998         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
999
1000         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1001         return ret;
1002 }
1003
1004 static int ironlake_drpc_info(struct seq_file *m)
1005 {
1006         struct drm_i915_private *i915 = node_to_i915(m->private);
1007         struct intel_uncore *uncore = &i915->uncore;
1008         u32 rgvmodectl, rstdbyctl;
1009         u16 crstandvid;
1010
1011         rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1012         rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1013         crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1014
1015         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1016         seq_printf(m, "Boost freq: %d\n",
1017                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1018                    MEMMODE_BOOST_FREQ_SHIFT);
1019         seq_printf(m, "HW control enabled: %s\n",
1020                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1021         seq_printf(m, "SW control enabled: %s\n",
1022                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1023         seq_printf(m, "Gated voltage change: %s\n",
1024                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1025         seq_printf(m, "Starting frequency: P%d\n",
1026                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1027         seq_printf(m, "Max P-state: P%d\n",
1028                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1029         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1030         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1031         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1032         seq_printf(m, "Render standby enabled: %s\n",
1033                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1034         seq_puts(m, "Current RS state: ");
1035         switch (rstdbyctl & RSX_STATUS_MASK) {
1036         case RSX_STATUS_ON:
1037                 seq_puts(m, "on\n");
1038                 break;
1039         case RSX_STATUS_RC1:
1040                 seq_puts(m, "RC1\n");
1041                 break;
1042         case RSX_STATUS_RC1E:
1043                 seq_puts(m, "RC1E\n");
1044                 break;
1045         case RSX_STATUS_RS1:
1046                 seq_puts(m, "RS1\n");
1047                 break;
1048         case RSX_STATUS_RS2:
1049                 seq_puts(m, "RS2 (RC6)\n");
1050                 break;
1051         case RSX_STATUS_RS3:
1052                 seq_puts(m, "RC3 (RC6+)\n");
1053                 break;
1054         default:
1055                 seq_puts(m, "unknown\n");
1056                 break;
1057         }
1058
1059         return 0;
1060 }
1061
1062 static int i915_forcewake_domains(struct seq_file *m, void *data)
1063 {
1064         struct drm_i915_private *i915 = node_to_i915(m->private);
1065         struct intel_uncore *uncore = &i915->uncore;
1066         struct intel_uncore_forcewake_domain *fw_domain;
1067         unsigned int tmp;
1068
1069         seq_printf(m, "user.bypass_count = %u\n",
1070                    uncore->user_forcewake_count);
1071
1072         for_each_fw_domain(fw_domain, uncore, tmp)
1073                 seq_printf(m, "%s.wake_count = %u\n",
1074                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1075                            READ_ONCE(fw_domain->wake_count));
1076
1077         return 0;
1078 }
1079
1080 static void print_rc6_res(struct seq_file *m,
1081                           const char *title,
1082                           const i915_reg_t reg)
1083 {
1084         struct drm_i915_private *i915 = node_to_i915(m->private);
1085         intel_wakeref_t wakeref;
1086
1087         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1088                 seq_printf(m, "%s %u (%llu us)\n", title,
1089                            intel_uncore_read(&i915->uncore, reg),
1090                            intel_rc6_residency_us(&i915->gt.rc6, reg));
1091 }
1092
1093 static int vlv_drpc_info(struct seq_file *m)
1094 {
1095         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1096         u32 rcctl1, pw_status;
1097
1098         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1099         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1100
1101         seq_printf(m, "RC6 Enabled: %s\n",
1102                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1103                                         GEN6_RC_CTL_EI_MODE(1))));
1104         seq_printf(m, "Render Power Well: %s\n",
1105                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1106         seq_printf(m, "Media Power Well: %s\n",
1107                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1108
1109         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1110         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1111
1112         return i915_forcewake_domains(m, NULL);
1113 }
1114
1115 static int gen6_drpc_info(struct seq_file *m)
1116 {
1117         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1118         u32 gt_core_status, rcctl1, rc6vids = 0;
1119         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1120
1121         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1122         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1123
1124         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1125         if (INTEL_GEN(dev_priv) >= 9) {
1126                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1127                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1128         }
1129
1130         if (INTEL_GEN(dev_priv) <= 7)
1131                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1132                                        &rc6vids, NULL);
1133
1134         seq_printf(m, "RC1e Enabled: %s\n",
1135                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1136         seq_printf(m, "RC6 Enabled: %s\n",
1137                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1138         if (INTEL_GEN(dev_priv) >= 9) {
1139                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1140                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1141                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1142                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1143         }
1144         seq_printf(m, "Deep RC6 Enabled: %s\n",
1145                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1146         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1147                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1148         seq_puts(m, "Current RC state: ");
1149         switch (gt_core_status & GEN6_RCn_MASK) {
1150         case GEN6_RC0:
1151                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1152                         seq_puts(m, "Core Power Down\n");
1153                 else
1154                         seq_puts(m, "on\n");
1155                 break;
1156         case GEN6_RC3:
1157                 seq_puts(m, "RC3\n");
1158                 break;
1159         case GEN6_RC6:
1160                 seq_puts(m, "RC6\n");
1161                 break;
1162         case GEN6_RC7:
1163                 seq_puts(m, "RC7\n");
1164                 break;
1165         default:
1166                 seq_puts(m, "Unknown\n");
1167                 break;
1168         }
1169
1170         seq_printf(m, "Core Power Down: %s\n",
1171                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1172         if (INTEL_GEN(dev_priv) >= 9) {
1173                 seq_printf(m, "Render Power Well: %s\n",
1174                         (gen9_powergate_status &
1175                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1176                 seq_printf(m, "Media Power Well: %s\n",
1177                         (gen9_powergate_status &
1178                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1179         }
1180
1181         /* Not exactly sure what this is */
1182         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1183                       GEN6_GT_GFX_RC6_LOCKED);
1184         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1185         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1186         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1187
1188         if (INTEL_GEN(dev_priv) <= 7) {
1189                 seq_printf(m, "RC6   voltage: %dmV\n",
1190                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1191                 seq_printf(m, "RC6+  voltage: %dmV\n",
1192                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1193                 seq_printf(m, "RC6++ voltage: %dmV\n",
1194                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1195         }
1196
1197         return i915_forcewake_domains(m, NULL);
1198 }
1199
1200 static int i915_drpc_info(struct seq_file *m, void *unused)
1201 {
1202         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1203         intel_wakeref_t wakeref;
1204         int err = -ENODEV;
1205
1206         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1207                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1208                         err = vlv_drpc_info(m);
1209                 else if (INTEL_GEN(dev_priv) >= 6)
1210                         err = gen6_drpc_info(m);
1211                 else
1212                         err = ironlake_drpc_info(m);
1213         }
1214
1215         return err;
1216 }
1217
1218 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1219 {
1220         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1221
1222         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1223                    dev_priv->fb_tracking.busy_bits);
1224
1225         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1226                    dev_priv->fb_tracking.flip_bits);
1227
1228         return 0;
1229 }
1230
1231 static int i915_fbc_status(struct seq_file *m, void *unused)
1232 {
1233         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1234         struct intel_fbc *fbc = &dev_priv->fbc;
1235         intel_wakeref_t wakeref;
1236
1237         if (!HAS_FBC(dev_priv))
1238                 return -ENODEV;
1239
1240         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1241         mutex_lock(&fbc->lock);
1242
1243         if (intel_fbc_is_active(dev_priv))
1244                 seq_puts(m, "FBC enabled\n");
1245         else
1246                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1247
1248         if (intel_fbc_is_active(dev_priv)) {
1249                 u32 mask;
1250
1251                 if (INTEL_GEN(dev_priv) >= 8)
1252                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1253                 else if (INTEL_GEN(dev_priv) >= 7)
1254                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1255                 else if (INTEL_GEN(dev_priv) >= 5)
1256                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1257                 else if (IS_G4X(dev_priv))
1258                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1259                 else
1260                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1261                                                         FBC_STAT_COMPRESSED);
1262
1263                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1264         }
1265
1266         mutex_unlock(&fbc->lock);
1267         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1268
1269         return 0;
1270 }
1271
1272 static int i915_fbc_false_color_get(void *data, u64 *val)
1273 {
1274         struct drm_i915_private *dev_priv = data;
1275
1276         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1277                 return -ENODEV;
1278
1279         *val = dev_priv->fbc.false_color;
1280
1281         return 0;
1282 }
1283
1284 static int i915_fbc_false_color_set(void *data, u64 val)
1285 {
1286         struct drm_i915_private *dev_priv = data;
1287         u32 reg;
1288
1289         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1290                 return -ENODEV;
1291
1292         mutex_lock(&dev_priv->fbc.lock);
1293
1294         reg = I915_READ(ILK_DPFC_CONTROL);
1295         dev_priv->fbc.false_color = val;
1296
1297         I915_WRITE(ILK_DPFC_CONTROL, val ?
1298                    (reg | FBC_CTL_FALSE_COLOR) :
1299                    (reg & ~FBC_CTL_FALSE_COLOR));
1300
1301         mutex_unlock(&dev_priv->fbc.lock);
1302         return 0;
1303 }
1304
1305 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1306                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1307                         "%llu\n");
1308
1309 static int i915_ips_status(struct seq_file *m, void *unused)
1310 {
1311         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312         intel_wakeref_t wakeref;
1313
1314         if (!HAS_IPS(dev_priv))
1315                 return -ENODEV;
1316
1317         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1318
1319         seq_printf(m, "Enabled by kernel parameter: %s\n",
1320                    yesno(i915_modparams.enable_ips));
1321
1322         if (INTEL_GEN(dev_priv) >= 8) {
1323                 seq_puts(m, "Currently: unknown\n");
1324         } else {
1325                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1326                         seq_puts(m, "Currently: enabled\n");
1327                 else
1328                         seq_puts(m, "Currently: disabled\n");
1329         }
1330
1331         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1332
1333         return 0;
1334 }
1335
1336 static int i915_sr_status(struct seq_file *m, void *unused)
1337 {
1338         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1339         intel_wakeref_t wakeref;
1340         bool sr_enabled = false;
1341
1342         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1343
1344         if (INTEL_GEN(dev_priv) >= 9)
1345                 /* no global SR status; inspect per-plane WM */;
1346         else if (HAS_PCH_SPLIT(dev_priv))
1347                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1348         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1349                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1350                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1351         else if (IS_I915GM(dev_priv))
1352                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1353         else if (IS_PINEVIEW(dev_priv))
1354                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1355         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1356                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1357
1358         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1359
1360         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1361
1362         return 0;
1363 }
1364
1365 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1366 {
1367         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1368         struct intel_rps *rps = &dev_priv->gt.rps;
1369         unsigned int max_gpu_freq, min_gpu_freq;
1370         intel_wakeref_t wakeref;
1371         int gpu_freq, ia_freq;
1372
1373         if (!HAS_LLC(dev_priv))
1374                 return -ENODEV;
1375
1376         min_gpu_freq = rps->min_freq;
1377         max_gpu_freq = rps->max_freq;
1378         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1379                 /* Convert GT frequency to 50 HZ units */
1380                 min_gpu_freq /= GEN9_FREQ_SCALER;
1381                 max_gpu_freq /= GEN9_FREQ_SCALER;
1382         }
1383
1384         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1385
1386         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1387         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1388                 ia_freq = gpu_freq;
1389                 sandybridge_pcode_read(dev_priv,
1390                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1391                                        &ia_freq, NULL);
1392                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1393                            intel_gpu_freq(rps,
1394                                           (gpu_freq *
1395                                            (IS_GEN9_BC(dev_priv) ||
1396                                             INTEL_GEN(dev_priv) >= 10 ?
1397                                             GEN9_FREQ_SCALER : 1))),
1398                            ((ia_freq >> 0) & 0xff) * 100,
1399                            ((ia_freq >> 8) & 0xff) * 100);
1400         }
1401         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1402
1403         return 0;
1404 }
1405
1406 static int i915_opregion(struct seq_file *m, void *unused)
1407 {
1408         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1409
1410         if (opregion->header)
1411                 seq_write(m, opregion->header, OPREGION_SIZE);
1412
1413         return 0;
1414 }
1415
1416 static int i915_vbt(struct seq_file *m, void *unused)
1417 {
1418         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1419
1420         if (opregion->vbt)
1421                 seq_write(m, opregion->vbt, opregion->vbt_size);
1422
1423         return 0;
1424 }
1425
1426 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1427 {
1428         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1429         struct drm_device *dev = &dev_priv->drm;
1430         struct intel_framebuffer *fbdev_fb = NULL;
1431         struct drm_framebuffer *drm_fb;
1432
1433 #ifdef CONFIG_DRM_FBDEV_EMULATION
1434         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1435                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1436
1437                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1438                            fbdev_fb->base.width,
1439                            fbdev_fb->base.height,
1440                            fbdev_fb->base.format->depth,
1441                            fbdev_fb->base.format->cpp[0] * 8,
1442                            fbdev_fb->base.modifier,
1443                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1444                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1445                 seq_putc(m, '\n');
1446         }
1447 #endif
1448
1449         mutex_lock(&dev->mode_config.fb_lock);
1450         drm_for_each_fb(drm_fb, dev) {
1451                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1452                 if (fb == fbdev_fb)
1453                         continue;
1454
1455                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1456                            fb->base.width,
1457                            fb->base.height,
1458                            fb->base.format->depth,
1459                            fb->base.format->cpp[0] * 8,
1460                            fb->base.modifier,
1461                            drm_framebuffer_read_refcount(&fb->base));
1462                 describe_obj(m, intel_fb_obj(&fb->base));
1463                 seq_putc(m, '\n');
1464         }
1465         mutex_unlock(&dev->mode_config.fb_lock);
1466
1467         return 0;
1468 }
1469
1470 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1471 {
1472         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1473                    ring->space, ring->head, ring->tail, ring->emit);
1474 }
1475
1476 static int i915_context_status(struct seq_file *m, void *unused)
1477 {
1478         struct drm_i915_private *i915 = node_to_i915(m->private);
1479         struct i915_gem_context *ctx, *cn;
1480
1481         spin_lock(&i915->gem.contexts.lock);
1482         list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1483                 struct i915_gem_engines_iter it;
1484                 struct intel_context *ce;
1485
1486                 if (!kref_get_unless_zero(&ctx->ref))
1487                         continue;
1488
1489                 spin_unlock(&i915->gem.contexts.lock);
1490
1491                 seq_puts(m, "HW context ");
1492                 if (ctx->pid) {
1493                         struct task_struct *task;
1494
1495                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1496                         if (task) {
1497                                 seq_printf(m, "(%s [%d]) ",
1498                                            task->comm, task->pid);
1499                                 put_task_struct(task);
1500                         }
1501                 } else if (IS_ERR(ctx->file_priv)) {
1502                         seq_puts(m, "(deleted) ");
1503                 } else {
1504                         seq_puts(m, "(kernel) ");
1505                 }
1506
1507                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1508                 seq_putc(m, '\n');
1509
1510                 for_each_gem_engine(ce,
1511                                     i915_gem_context_lock_engines(ctx), it) {
1512                         intel_context_lock_pinned(ce);
1513                         if (intel_context_is_pinned(ce)) {
1514                                 seq_printf(m, "%s: ", ce->engine->name);
1515                                 if (ce->state)
1516                                         describe_obj(m, ce->state->obj);
1517                                 describe_ctx_ring(m, ce->ring);
1518                                 seq_putc(m, '\n');
1519                         }
1520                         intel_context_unlock_pinned(ce);
1521                 }
1522                 i915_gem_context_unlock_engines(ctx);
1523
1524                 seq_putc(m, '\n');
1525
1526                 spin_lock(&i915->gem.contexts.lock);
1527                 list_safe_reset_next(ctx, cn, link);
1528                 i915_gem_context_put(ctx);
1529         }
1530         spin_unlock(&i915->gem.contexts.lock);
1531
1532         return 0;
1533 }
1534
1535 static const char *swizzle_string(unsigned swizzle)
1536 {
1537         switch (swizzle) {
1538         case I915_BIT_6_SWIZZLE_NONE:
1539                 return "none";
1540         case I915_BIT_6_SWIZZLE_9:
1541                 return "bit9";
1542         case I915_BIT_6_SWIZZLE_9_10:
1543                 return "bit9/bit10";
1544         case I915_BIT_6_SWIZZLE_9_11:
1545                 return "bit9/bit11";
1546         case I915_BIT_6_SWIZZLE_9_10_11:
1547                 return "bit9/bit10/bit11";
1548         case I915_BIT_6_SWIZZLE_9_17:
1549                 return "bit9/bit17";
1550         case I915_BIT_6_SWIZZLE_9_10_17:
1551                 return "bit9/bit10/bit17";
1552         case I915_BIT_6_SWIZZLE_UNKNOWN:
1553                 return "unknown";
1554         }
1555
1556         return "bug";
1557 }
1558
1559 static int i915_swizzle_info(struct seq_file *m, void *data)
1560 {
1561         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1562         struct intel_uncore *uncore = &dev_priv->uncore;
1563         intel_wakeref_t wakeref;
1564
1565         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1566
1567         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1568                    swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1569         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1570                    swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1571
1572         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1573                 seq_printf(m, "DDC = 0x%08x\n",
1574                            intel_uncore_read(uncore, DCC));
1575                 seq_printf(m, "DDC2 = 0x%08x\n",
1576                            intel_uncore_read(uncore, DCC2));
1577                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1578                            intel_uncore_read16(uncore, C0DRB3));
1579                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1580                            intel_uncore_read16(uncore, C1DRB3));
1581         } else if (INTEL_GEN(dev_priv) >= 6) {
1582                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1583                            intel_uncore_read(uncore, MAD_DIMM_C0));
1584                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1585                            intel_uncore_read(uncore, MAD_DIMM_C1));
1586                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1587                            intel_uncore_read(uncore, MAD_DIMM_C2));
1588                 seq_printf(m, "TILECTL = 0x%08x\n",
1589                            intel_uncore_read(uncore, TILECTL));
1590                 if (INTEL_GEN(dev_priv) >= 8)
1591                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1592                                    intel_uncore_read(uncore, GAMTARBMODE));
1593                 else
1594                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1595                                    intel_uncore_read(uncore, ARB_MODE));
1596                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1597                            intel_uncore_read(uncore, DISP_ARB_CTL));
1598         }
1599
1600         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1601                 seq_puts(m, "L-shaped memory detected\n");
1602
1603         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1604
1605         return 0;
1606 }
1607
1608 static const char *rps_power_to_str(unsigned int power)
1609 {
1610         static const char * const strings[] = {
1611                 [LOW_POWER] = "low power",
1612                 [BETWEEN] = "mixed",
1613                 [HIGH_POWER] = "high power",
1614         };
1615
1616         if (power >= ARRAY_SIZE(strings) || !strings[power])
1617                 return "unknown";
1618
1619         return strings[power];
1620 }
1621
1622 static int i915_rps_boost_info(struct seq_file *m, void *data)
1623 {
1624         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1625         struct intel_rps *rps = &dev_priv->gt.rps;
1626         u32 act_freq = rps->cur_freq;
1627         intel_wakeref_t wakeref;
1628
1629         with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1630                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1631                         vlv_punit_get(dev_priv);
1632                         act_freq = vlv_punit_read(dev_priv,
1633                                                   PUNIT_REG_GPU_FREQ_STS);
1634                         vlv_punit_put(dev_priv);
1635                         act_freq = (act_freq >> 8) & 0xff;
1636                 } else {
1637                         act_freq = intel_get_cagf(rps,
1638                                                   I915_READ(GEN6_RPSTAT1));
1639                 }
1640         }
1641
1642         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1643         seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1644         seq_printf(m, "Boosts outstanding? %d\n",
1645                    atomic_read(&rps->num_waiters));
1646         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1647         seq_printf(m, "Frequency requested %d, actual %d\n",
1648                    intel_gpu_freq(rps, rps->cur_freq),
1649                    intel_gpu_freq(rps, act_freq));
1650         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1651                    intel_gpu_freq(rps, rps->min_freq),
1652                    intel_gpu_freq(rps, rps->min_freq_softlimit),
1653                    intel_gpu_freq(rps, rps->max_freq_softlimit),
1654                    intel_gpu_freq(rps, rps->max_freq));
1655         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1656                    intel_gpu_freq(rps, rps->idle_freq),
1657                    intel_gpu_freq(rps, rps->efficient_freq),
1658                    intel_gpu_freq(rps, rps->boost_freq));
1659
1660         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1661
1662         if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1663                 u32 rpup, rpupei;
1664                 u32 rpdown, rpdownei;
1665
1666                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1667                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1668                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1669                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1670                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1671                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1672
1673                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1674                            rps_power_to_str(rps->power.mode));
1675                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1676                            rpup && rpupei ? 100 * rpup / rpupei : 0,
1677                            rps->power.up_threshold);
1678                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1679                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1680                            rps->power.down_threshold);
1681         } else {
1682                 seq_puts(m, "\nRPS Autotuning inactive\n");
1683         }
1684
1685         return 0;
1686 }
1687
1688 static int i915_llc(struct seq_file *m, void *data)
1689 {
1690         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1691         const bool edram = INTEL_GEN(dev_priv) > 8;
1692
1693         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1694         seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1695                    dev_priv->edram_size_mb);
1696
1697         return 0;
1698 }
1699
1700 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1701 {
1702         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1703         intel_wakeref_t wakeref;
1704         struct drm_printer p;
1705
1706         if (!HAS_GT_UC(dev_priv))
1707                 return -ENODEV;
1708
1709         p = drm_seq_file_printer(m);
1710         intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1711
1712         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1713                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1714
1715         return 0;
1716 }
1717
1718 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1719 {
1720         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1721         intel_wakeref_t wakeref;
1722         struct drm_printer p;
1723
1724         if (!HAS_GT_UC(dev_priv))
1725                 return -ENODEV;
1726
1727         p = drm_seq_file_printer(m);
1728         intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1729
1730         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1731                 u32 tmp = I915_READ(GUC_STATUS);
1732                 u32 i;
1733
1734                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1735                 seq_printf(m, "\tBootrom status = 0x%x\n",
1736                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1737                 seq_printf(m, "\tuKernel status = 0x%x\n",
1738                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1739                 seq_printf(m, "\tMIA Core status = 0x%x\n",
1740                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1741                 seq_puts(m, "\nScratch registers:\n");
1742                 for (i = 0; i < 16; i++) {
1743                         seq_printf(m, "\t%2d: \t0x%x\n",
1744                                    i, I915_READ(SOFT_SCRATCH(i)));
1745                 }
1746         }
1747
1748         return 0;
1749 }
1750
1751 static const char *
1752 stringify_guc_log_type(enum guc_log_buffer_type type)
1753 {
1754         switch (type) {
1755         case GUC_ISR_LOG_BUFFER:
1756                 return "ISR";
1757         case GUC_DPC_LOG_BUFFER:
1758                 return "DPC";
1759         case GUC_CRASH_DUMP_LOG_BUFFER:
1760                 return "CRASH";
1761         default:
1762                 MISSING_CASE(type);
1763         }
1764
1765         return "";
1766 }
1767
1768 static void i915_guc_log_info(struct seq_file *m,
1769                               struct drm_i915_private *dev_priv)
1770 {
1771         struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1772         enum guc_log_buffer_type type;
1773
1774         if (!intel_guc_log_relay_created(log)) {
1775                 seq_puts(m, "GuC log relay not created\n");
1776                 return;
1777         }
1778
1779         seq_puts(m, "GuC logging stats:\n");
1780
1781         seq_printf(m, "\tRelay full count: %u\n",
1782                    log->relay.full_count);
1783
1784         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1785                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1786                            stringify_guc_log_type(type),
1787                            log->stats[type].flush,
1788                            log->stats[type].sampled_overflow);
1789         }
1790 }
1791
1792 static int i915_guc_info(struct seq_file *m, void *data)
1793 {
1794         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1795
1796         if (!USES_GUC(dev_priv))
1797                 return -ENODEV;
1798
1799         i915_guc_log_info(m, dev_priv);
1800
1801         /* Add more as required ... */
1802
1803         return 0;
1804 }
1805
1806 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1807 {
1808         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1809         const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1810         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1811         int index;
1812
1813         if (!USES_GUC_SUBMISSION(dev_priv))
1814                 return -ENODEV;
1815
1816         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1817                 struct intel_engine_cs *engine;
1818
1819                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1820                         continue;
1821
1822                 seq_printf(m, "GuC stage descriptor %u:\n", index);
1823                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1824                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1825                 seq_printf(m, "\tPriority: %d\n", desc->priority);
1826                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1827                 seq_printf(m, "\tEngines used: 0x%x\n",
1828                            desc->engines_used);
1829                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1830                            desc->db_trigger_phy,
1831                            desc->db_trigger_cpu,
1832                            desc->db_trigger_uk);
1833                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1834                            desc->process_desc);
1835                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1836                            desc->wq_addr, desc->wq_size);
1837                 seq_putc(m, '\n');
1838
1839                 for_each_uabi_engine(engine, dev_priv) {
1840                         u32 guc_engine_id = engine->guc_id;
1841                         struct guc_execlist_context *lrc =
1842                                                 &desc->lrc[guc_engine_id];
1843
1844                         seq_printf(m, "\t%s LRC:\n", engine->name);
1845                         seq_printf(m, "\t\tContext desc: 0x%x\n",
1846                                    lrc->context_desc);
1847                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1848                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1849                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1850                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1851                         seq_putc(m, '\n');
1852                 }
1853         }
1854
1855         return 0;
1856 }
1857
1858 static int i915_guc_log_dump(struct seq_file *m, void *data)
1859 {
1860         struct drm_info_node *node = m->private;
1861         struct drm_i915_private *dev_priv = node_to_i915(node);
1862         bool dump_load_err = !!node->info_ent->data;
1863         struct drm_i915_gem_object *obj = NULL;
1864         u32 *log;
1865         int i = 0;
1866
1867         if (!HAS_GT_UC(dev_priv))
1868                 return -ENODEV;
1869
1870         if (dump_load_err)
1871                 obj = dev_priv->gt.uc.load_err_log;
1872         else if (dev_priv->gt.uc.guc.log.vma)
1873                 obj = dev_priv->gt.uc.guc.log.vma->obj;
1874
1875         if (!obj)
1876                 return 0;
1877
1878         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1879         if (IS_ERR(log)) {
1880                 DRM_DEBUG("Failed to pin object\n");
1881                 seq_puts(m, "(log data unaccessible)\n");
1882                 return PTR_ERR(log);
1883         }
1884
1885         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1886                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1887                            *(log + i), *(log + i + 1),
1888                            *(log + i + 2), *(log + i + 3));
1889
1890         seq_putc(m, '\n');
1891
1892         i915_gem_object_unpin_map(obj);
1893
1894         return 0;
1895 }
1896
1897 static int i915_guc_log_level_get(void *data, u64 *val)
1898 {
1899         struct drm_i915_private *dev_priv = data;
1900
1901         if (!USES_GUC(dev_priv))
1902                 return -ENODEV;
1903
1904         *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1905
1906         return 0;
1907 }
1908
1909 static int i915_guc_log_level_set(void *data, u64 val)
1910 {
1911         struct drm_i915_private *dev_priv = data;
1912
1913         if (!USES_GUC(dev_priv))
1914                 return -ENODEV;
1915
1916         return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1917 }
1918
1919 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1920                         i915_guc_log_level_get, i915_guc_log_level_set,
1921                         "%lld\n");
1922
1923 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1924 {
1925         struct drm_i915_private *i915 = inode->i_private;
1926         struct intel_guc *guc = &i915->gt.uc.guc;
1927         struct intel_guc_log *log = &guc->log;
1928
1929         if (!intel_guc_is_running(guc))
1930                 return -ENODEV;
1931
1932         file->private_data = log;
1933
1934         return intel_guc_log_relay_open(log);
1935 }
1936
1937 static ssize_t
1938 i915_guc_log_relay_write(struct file *filp,
1939                          const char __user *ubuf,
1940                          size_t cnt,
1941                          loff_t *ppos)
1942 {
1943         struct intel_guc_log *log = filp->private_data;
1944         int val;
1945         int ret;
1946
1947         ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1948         if (ret < 0)
1949                 return ret;
1950
1951         /*
1952          * Enable and start the guc log relay on value of 1.
1953          * Flush log relay for any other value.
1954          */
1955         if (val == 1)
1956                 ret = intel_guc_log_relay_start(log);
1957         else
1958                 intel_guc_log_relay_flush(log);
1959
1960         return ret ?: cnt;
1961 }
1962
1963 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1964 {
1965         struct drm_i915_private *i915 = inode->i_private;
1966         struct intel_guc *guc = &i915->gt.uc.guc;
1967
1968         intel_guc_log_relay_close(&guc->log);
1969         return 0;
1970 }
1971
1972 static const struct file_operations i915_guc_log_relay_fops = {
1973         .owner = THIS_MODULE,
1974         .open = i915_guc_log_relay_open,
1975         .write = i915_guc_log_relay_write,
1976         .release = i915_guc_log_relay_release,
1977 };
1978
1979 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
1980 {
1981         u8 val;
1982         static const char * const sink_status[] = {
1983                 "inactive",
1984                 "transition to active, capture and display",
1985                 "active, display from RFB",
1986                 "active, capture and display on sink device timings",
1987                 "transition to inactive, capture and display, timing re-sync",
1988                 "reserved",
1989                 "reserved",
1990                 "sink internal error",
1991         };
1992         struct drm_connector *connector = m->private;
1993         struct drm_i915_private *dev_priv = to_i915(connector->dev);
1994         struct intel_dp *intel_dp =
1995                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
1996         int ret;
1997
1998         if (!CAN_PSR(dev_priv)) {
1999                 seq_puts(m, "PSR Unsupported\n");
2000                 return -ENODEV;
2001         }
2002
2003         if (connector->status != connector_status_connected)
2004                 return -ENODEV;
2005
2006         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2007
2008         if (ret == 1) {
2009                 const char *str = "unknown";
2010
2011                 val &= DP_PSR_SINK_STATE_MASK;
2012                 if (val < ARRAY_SIZE(sink_status))
2013                         str = sink_status[val];
2014                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2015         } else {
2016                 return ret;
2017         }
2018
2019         return 0;
2020 }
2021 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2022
2023 static void
2024 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2025 {
2026         u32 val, status_val;
2027         const char *status = "unknown";
2028
2029         if (dev_priv->psr.psr2_enabled) {
2030                 static const char * const live_status[] = {
2031                         "IDLE",
2032                         "CAPTURE",
2033                         "CAPTURE_FS",
2034                         "SLEEP",
2035                         "BUFON_FW",
2036                         "ML_UP",
2037                         "SU_STANDBY",
2038                         "FAST_SLEEP",
2039                         "DEEP_SLEEP",
2040                         "BUF_ON",
2041                         "TG_ON"
2042                 };
2043                 val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2044                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2045                               EDP_PSR2_STATUS_STATE_SHIFT;
2046                 if (status_val < ARRAY_SIZE(live_status))
2047                         status = live_status[status_val];
2048         } else {
2049                 static const char * const live_status[] = {
2050                         "IDLE",
2051                         "SRDONACK",
2052                         "SRDENT",
2053                         "BUFOFF",
2054                         "BUFON",
2055                         "AUXACK",
2056                         "SRDOFFACK",
2057                         "SRDENT_ON",
2058                 };
2059                 val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2060                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2061                               EDP_PSR_STATUS_STATE_SHIFT;
2062                 if (status_val < ARRAY_SIZE(live_status))
2063                         status = live_status[status_val];
2064         }
2065
2066         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2067 }
2068
2069 static int i915_edp_psr_status(struct seq_file *m, void *data)
2070 {
2071         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2072         struct i915_psr *psr = &dev_priv->psr;
2073         intel_wakeref_t wakeref;
2074         const char *status;
2075         bool enabled;
2076         u32 val;
2077
2078         if (!HAS_PSR(dev_priv))
2079                 return -ENODEV;
2080
2081         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2082         if (psr->dp)
2083                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2084         seq_puts(m, "\n");
2085
2086         if (!psr->sink_support)
2087                 return 0;
2088
2089         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2090         mutex_lock(&psr->lock);
2091
2092         if (psr->enabled)
2093                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2094         else
2095                 status = "disabled";
2096         seq_printf(m, "PSR mode: %s\n", status);
2097
2098         if (!psr->enabled) {
2099                 seq_printf(m, "PSR sink not reliable: %s\n",
2100                            yesno(psr->sink_not_reliable));
2101
2102                 goto unlock;
2103         }
2104
2105         if (psr->psr2_enabled) {
2106                 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2107                 enabled = val & EDP_PSR2_ENABLE;
2108         } else {
2109                 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2110                 enabled = val & EDP_PSR_ENABLE;
2111         }
2112         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2113                    enableddisabled(enabled), val);
2114         psr_source_status(dev_priv, m);
2115         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2116                    psr->busy_frontbuffer_bits);
2117
2118         /*
2119          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2120          */
2121         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2122                 val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2123                 val &= EDP_PSR_PERF_CNT_MASK;
2124                 seq_printf(m, "Performance counter: %u\n", val);
2125         }
2126
2127         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2128                 seq_printf(m, "Last attempted entry at: %lld\n",
2129                            psr->last_entry_attempt);
2130                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2131         }
2132
2133         if (psr->psr2_enabled) {
2134                 u32 su_frames_val[3];
2135                 int frame;
2136
2137                 /*
2138                  * Reading all 3 registers before hand to minimize crossing a
2139                  * frame boundary between register reads
2140                  */
2141                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2142                         val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2143                                                        frame));
2144                         su_frames_val[frame / 3] = val;
2145                 }
2146
2147                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2148
2149                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2150                         u32 su_blocks;
2151
2152                         su_blocks = su_frames_val[frame / 3] &
2153                                     PSR2_SU_STATUS_MASK(frame);
2154                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2155                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2156                 }
2157         }
2158
2159 unlock:
2160         mutex_unlock(&psr->lock);
2161         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2162
2163         return 0;
2164 }
2165
2166 static int
2167 i915_edp_psr_debug_set(void *data, u64 val)
2168 {
2169         struct drm_i915_private *dev_priv = data;
2170         intel_wakeref_t wakeref;
2171         int ret;
2172
2173         if (!CAN_PSR(dev_priv))
2174                 return -ENODEV;
2175
2176         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2177
2178         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2179
2180         ret = intel_psr_debug_set(dev_priv, val);
2181
2182         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2183
2184         return ret;
2185 }
2186
2187 static int
2188 i915_edp_psr_debug_get(void *data, u64 *val)
2189 {
2190         struct drm_i915_private *dev_priv = data;
2191
2192         if (!CAN_PSR(dev_priv))
2193                 return -ENODEV;
2194
2195         *val = READ_ONCE(dev_priv->psr.debug);
2196         return 0;
2197 }
2198
2199 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2200                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2201                         "%llu\n");
2202
2203 static int i915_energy_uJ(struct seq_file *m, void *data)
2204 {
2205         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2206         unsigned long long power;
2207         intel_wakeref_t wakeref;
2208         u32 units;
2209
2210         if (INTEL_GEN(dev_priv) < 6)
2211                 return -ENODEV;
2212
2213         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2214                 return -ENODEV;
2215
2216         units = (power & 0x1f00) >> 8;
2217         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2218                 power = I915_READ(MCH_SECP_NRG_STTS);
2219
2220         power = (1000000 * power) >> units; /* convert to uJ */
2221         seq_printf(m, "%llu", power);
2222
2223         return 0;
2224 }
2225
2226 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2227 {
2228         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2229         struct pci_dev *pdev = dev_priv->drm.pdev;
2230
2231         if (!HAS_RUNTIME_PM(dev_priv))
2232                 seq_puts(m, "Runtime power management not supported\n");
2233
2234         seq_printf(m, "Runtime power status: %s\n",
2235                    enableddisabled(!dev_priv->power_domains.wakeref));
2236
2237         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2238         seq_printf(m, "IRQs disabled: %s\n",
2239                    yesno(!intel_irqs_enabled(dev_priv)));
2240 #ifdef CONFIG_PM
2241         seq_printf(m, "Usage count: %d\n",
2242                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2243 #else
2244         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2245 #endif
2246         seq_printf(m, "PCI device power state: %s [%d]\n",
2247                    pci_power_name(pdev->current_state),
2248                    pdev->current_state);
2249
2250         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2251                 struct drm_printer p = drm_seq_file_printer(m);
2252
2253                 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2254         }
2255
2256         return 0;
2257 }
2258
2259 static int i915_power_domain_info(struct seq_file *m, void *unused)
2260 {
2261         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2262         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2263         int i;
2264
2265         mutex_lock(&power_domains->lock);
2266
2267         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2268         for (i = 0; i < power_domains->power_well_count; i++) {
2269                 struct i915_power_well *power_well;
2270                 enum intel_display_power_domain power_domain;
2271
2272                 power_well = &power_domains->power_wells[i];
2273                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2274                            power_well->count);
2275
2276                 for_each_power_domain(power_domain, power_well->desc->domains)
2277                         seq_printf(m, "  %-23s %d\n",
2278                                  intel_display_power_domain_str(power_domain),
2279                                  power_domains->domain_use_count[power_domain]);
2280         }
2281
2282         mutex_unlock(&power_domains->lock);
2283
2284         return 0;
2285 }
2286
2287 static int i915_dmc_info(struct seq_file *m, void *unused)
2288 {
2289         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290         intel_wakeref_t wakeref;
2291         struct intel_csr *csr;
2292         i915_reg_t dc5_reg, dc6_reg = {};
2293
2294         if (!HAS_CSR(dev_priv))
2295                 return -ENODEV;
2296
2297         csr = &dev_priv->csr;
2298
2299         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2300
2301         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2302         seq_printf(m, "path: %s\n", csr->fw_path);
2303
2304         if (!csr->dmc_payload)
2305                 goto out;
2306
2307         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2308                    CSR_VERSION_MINOR(csr->version));
2309
2310         if (INTEL_GEN(dev_priv) >= 12) {
2311                 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2312                 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2313                 /*
2314                  * NOTE: DMC_DEBUG3 is a general purpose reg.
2315                  * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2316                  * reg for DC3CO debugging and validation,
2317                  * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2318                  */
2319                 seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2320         } else {
2321                 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2322                                                  SKL_CSR_DC3_DC5_COUNT;
2323                 if (!IS_GEN9_LP(dev_priv))
2324                         dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2325         }
2326
2327         seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2328         if (dc6_reg.reg)
2329                 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2330
2331 out:
2332         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2333         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2334         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2335
2336         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2337
2338         return 0;
2339 }
2340
2341 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2342                                  const struct drm_display_mode *mode)
2343 {
2344         int i;
2345
2346         for (i = 0; i < tabs; i++)
2347                 seq_putc(m, '\t');
2348
2349         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2350 }
2351
2352 static void intel_encoder_info(struct seq_file *m,
2353                                struct intel_crtc *crtc,
2354                                struct intel_encoder *encoder)
2355 {
2356         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2357         struct drm_connector_list_iter conn_iter;
2358         struct drm_connector *connector;
2359
2360         seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
2361                    encoder->base.base.id, encoder->base.name);
2362
2363         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2364         drm_for_each_connector_iter(connector, &conn_iter) {
2365                 const struct drm_connector_state *conn_state =
2366                         connector->state;
2367
2368                 if (conn_state->best_encoder != &encoder->base)
2369                         continue;
2370
2371                 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
2372                            connector->base.id, connector->name);
2373         }
2374         drm_connector_list_iter_end(&conn_iter);
2375 }
2376
2377 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2378 {
2379         const struct drm_display_mode *mode = panel->fixed_mode;
2380
2381         seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2382 }
2383
2384 static void intel_hdcp_info(struct seq_file *m,
2385                             struct intel_connector *intel_connector)
2386 {
2387         bool hdcp_cap, hdcp2_cap;
2388
2389         hdcp_cap = intel_hdcp_capable(intel_connector);
2390         hdcp2_cap = intel_hdcp2_capable(intel_connector);
2391
2392         if (hdcp_cap)
2393                 seq_puts(m, "HDCP1.4 ");
2394         if (hdcp2_cap)
2395                 seq_puts(m, "HDCP2.2 ");
2396
2397         if (!hdcp_cap && !hdcp2_cap)
2398                 seq_puts(m, "None");
2399
2400         seq_puts(m, "\n");
2401 }
2402
2403 static void intel_dp_info(struct seq_file *m,
2404                           struct intel_connector *intel_connector)
2405 {
2406         struct intel_encoder *intel_encoder = intel_connector->encoder;
2407         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2408
2409         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2410         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2411         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2412                 intel_panel_info(m, &intel_connector->panel);
2413
2414         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2415                                 &intel_dp->aux);
2416         if (intel_connector->hdcp.shim) {
2417                 seq_puts(m, "\tHDCP version: ");
2418                 intel_hdcp_info(m, intel_connector);
2419         }
2420 }
2421
2422 static void intel_dp_mst_info(struct seq_file *m,
2423                           struct intel_connector *intel_connector)
2424 {
2425         struct intel_encoder *intel_encoder = intel_connector->encoder;
2426         struct intel_dp_mst_encoder *intel_mst =
2427                 enc_to_mst(&intel_encoder->base);
2428         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2429         struct intel_dp *intel_dp = &intel_dig_port->dp;
2430         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2431                                         intel_connector->port);
2432
2433         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2434 }
2435
2436 static void intel_hdmi_info(struct seq_file *m,
2437                             struct intel_connector *intel_connector)
2438 {
2439         struct intel_encoder *intel_encoder = intel_connector->encoder;
2440         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2441
2442         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2443         if (intel_connector->hdcp.shim) {
2444                 seq_puts(m, "\tHDCP version: ");
2445                 intel_hdcp_info(m, intel_connector);
2446         }
2447 }
2448
2449 static void intel_lvds_info(struct seq_file *m,
2450                             struct intel_connector *intel_connector)
2451 {
2452         intel_panel_info(m, &intel_connector->panel);
2453 }
2454
2455 static void intel_connector_info(struct seq_file *m,
2456                                  struct drm_connector *connector)
2457 {
2458         struct intel_connector *intel_connector = to_intel_connector(connector);
2459         const struct drm_connector_state *conn_state = connector->state;
2460         struct intel_encoder *encoder =
2461                 to_intel_encoder(conn_state->best_encoder);
2462         const struct drm_display_mode *mode;
2463
2464         seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
2465                    connector->base.id, connector->name,
2466                    drm_get_connector_status_name(connector->status));
2467
2468         if (connector->status == connector_status_disconnected)
2469                 return;
2470
2471         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2472                    connector->display_info.width_mm,
2473                    connector->display_info.height_mm);
2474         seq_printf(m, "\tsubpixel order: %s\n",
2475                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2476         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2477
2478         if (!encoder)
2479                 return;
2480
2481         switch (connector->connector_type) {
2482         case DRM_MODE_CONNECTOR_DisplayPort:
2483         case DRM_MODE_CONNECTOR_eDP:
2484                 if (encoder->type == INTEL_OUTPUT_DP_MST)
2485                         intel_dp_mst_info(m, intel_connector);
2486                 else
2487                         intel_dp_info(m, intel_connector);
2488                 break;
2489         case DRM_MODE_CONNECTOR_LVDS:
2490                 if (encoder->type == INTEL_OUTPUT_LVDS)
2491                         intel_lvds_info(m, intel_connector);
2492                 break;
2493         case DRM_MODE_CONNECTOR_HDMIA:
2494                 if (encoder->type == INTEL_OUTPUT_HDMI ||
2495                     encoder->type == INTEL_OUTPUT_DDI)
2496                         intel_hdmi_info(m, intel_connector);
2497                 break;
2498         default:
2499                 break;
2500         }
2501
2502         seq_printf(m, "\tmodes:\n");
2503         list_for_each_entry(mode, &connector->modes, head)
2504                 intel_seq_print_mode(m, 2, mode);
2505 }
2506
2507 static const char *plane_type(enum drm_plane_type type)
2508 {
2509         switch (type) {
2510         case DRM_PLANE_TYPE_OVERLAY:
2511                 return "OVL";
2512         case DRM_PLANE_TYPE_PRIMARY:
2513                 return "PRI";
2514         case DRM_PLANE_TYPE_CURSOR:
2515                 return "CUR";
2516         /*
2517          * Deliberately omitting default: to generate compiler warnings
2518          * when a new drm_plane_type gets added.
2519          */
2520         }
2521
2522         return "unknown";
2523 }
2524
2525 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2526 {
2527         /*
2528          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2529          * will print them all to visualize if the values are misused
2530          */
2531         snprintf(buf, bufsize,
2532                  "%s%s%s%s%s%s(0x%08x)",
2533                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2534                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2535                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2536                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2537                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2538                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2539                  rotation);
2540 }
2541
2542 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
2543 {
2544         const struct intel_plane_state *plane_state =
2545                 to_intel_plane_state(plane->base.state);
2546         const struct drm_framebuffer *fb = plane_state->uapi.fb;
2547         struct drm_format_name_buf format_name;
2548         struct drm_rect src, dst;
2549         char rot_str[48];
2550
2551         src = drm_plane_state_src(&plane_state->uapi);
2552         dst = drm_plane_state_dest(&plane_state->uapi);
2553
2554         if (fb)
2555                 drm_get_format_name(fb->format->format, &format_name);
2556
2557         plane_rotation(rot_str, sizeof(rot_str),
2558                        plane_state->uapi.rotation);
2559
2560         seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2561                    fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
2562                    fb ? fb->width : 0, fb ? fb->height : 0,
2563                    DRM_RECT_FP_ARG(&src),
2564                    DRM_RECT_ARG(&dst),
2565                    rot_str);
2566 }
2567
2568 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
2569 {
2570         const struct intel_plane_state *plane_state =
2571                 to_intel_plane_state(plane->base.state);
2572         const struct drm_framebuffer *fb = plane_state->hw.fb;
2573         struct drm_format_name_buf format_name;
2574         char rot_str[48];
2575
2576         if (!fb)
2577                 return;
2578
2579         drm_get_format_name(fb->format->format, &format_name);
2580
2581         plane_rotation(rot_str, sizeof(rot_str),
2582                        plane_state->hw.rotation);
2583
2584         seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2585                    fb->base.id, format_name.str,
2586                    fb->width, fb->height,
2587                    yesno(plane_state->uapi.visible),
2588                    DRM_RECT_FP_ARG(&plane_state->uapi.src),
2589                    DRM_RECT_ARG(&plane_state->uapi.dst),
2590                    rot_str);
2591 }
2592
2593 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
2594 {
2595         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2596         struct intel_plane *plane;
2597
2598         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2599                 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
2600                            plane->base.base.id, plane->base.name,
2601                            plane_type(plane->base.type));
2602                 intel_plane_uapi_info(m, plane);
2603                 intel_plane_hw_info(m, plane);
2604         }
2605 }
2606
2607 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
2608 {
2609         const struct intel_crtc_state *crtc_state =
2610                 to_intel_crtc_state(crtc->base.state);
2611         int num_scalers = crtc->num_scalers;
2612         int i;
2613
2614         /* Not all platformas have a scaler */
2615         if (num_scalers) {
2616                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2617                            num_scalers,
2618                            crtc_state->scaler_state.scaler_users,
2619                            crtc_state->scaler_state.scaler_id);
2620
2621                 for (i = 0; i < num_scalers; i++) {
2622                         const struct intel_scaler *sc =
2623                                 &crtc_state->scaler_state.scalers[i];
2624
2625                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2626                                    i, yesno(sc->in_use), sc->mode);
2627                 }
2628                 seq_puts(m, "\n");
2629         } else {
2630                 seq_puts(m, "\tNo scalers available on this platform\n");
2631         }
2632 }
2633
2634 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
2635 {
2636         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2637         const struct intel_crtc_state *crtc_state =
2638                 to_intel_crtc_state(crtc->base.state);
2639         struct intel_encoder *encoder;
2640
2641         seq_printf(m, "[CRTC:%d:%s]:\n",
2642                    crtc->base.base.id, crtc->base.name);
2643
2644         seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
2645                    yesno(crtc_state->uapi.enable),
2646                    yesno(crtc_state->uapi.active),
2647                    DRM_MODE_ARG(&crtc_state->uapi.mode));
2648
2649         if (crtc_state->hw.enable) {
2650                 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
2651                            yesno(crtc_state->hw.active),
2652                            DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
2653
2654                 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
2655                            crtc_state->pipe_src_w, crtc_state->pipe_src_h,
2656                            yesno(crtc_state->dither), crtc_state->pipe_bpp);
2657
2658                 intel_scaler_info(m, crtc);
2659         }
2660
2661         for_each_intel_encoder_mask(&dev_priv->drm, encoder,
2662                                     crtc_state->uapi.encoder_mask)
2663                 intel_encoder_info(m, crtc, encoder);
2664
2665         intel_plane_info(m, crtc);
2666
2667         seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
2668                    yesno(!crtc->cpu_fifo_underrun_disabled),
2669                    yesno(!crtc->pch_fifo_underrun_disabled));
2670 }
2671
2672 static int i915_display_info(struct seq_file *m, void *unused)
2673 {
2674         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2675         struct drm_device *dev = &dev_priv->drm;
2676         struct intel_crtc *crtc;
2677         struct drm_connector *connector;
2678         struct drm_connector_list_iter conn_iter;
2679         intel_wakeref_t wakeref;
2680
2681         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2682
2683         drm_modeset_lock_all(dev);
2684
2685         seq_printf(m, "CRTC info\n");
2686         seq_printf(m, "---------\n");
2687         for_each_intel_crtc(dev, crtc)
2688                 intel_crtc_info(m, crtc);
2689
2690         seq_printf(m, "\n");
2691         seq_printf(m, "Connector info\n");
2692         seq_printf(m, "--------------\n");
2693         drm_connector_list_iter_begin(dev, &conn_iter);
2694         drm_for_each_connector_iter(connector, &conn_iter)
2695                 intel_connector_info(m, connector);
2696         drm_connector_list_iter_end(&conn_iter);
2697
2698         drm_modeset_unlock_all(dev);
2699
2700         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2701
2702         return 0;
2703 }
2704
2705 static int i915_engine_info(struct seq_file *m, void *unused)
2706 {
2707         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2708         struct intel_engine_cs *engine;
2709         intel_wakeref_t wakeref;
2710         struct drm_printer p;
2711
2712         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2713
2714         seq_printf(m, "GT awake? %s [%d]\n",
2715                    yesno(dev_priv->gt.awake),
2716                    atomic_read(&dev_priv->gt.wakeref.count));
2717         seq_printf(m, "CS timestamp frequency: %u kHz\n",
2718                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2719
2720         p = drm_seq_file_printer(m);
2721         for_each_uabi_engine(engine, dev_priv)
2722                 intel_engine_dump(engine, &p, "%s\n", engine->name);
2723
2724         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2725
2726         return 0;
2727 }
2728
2729 static int i915_rcs_topology(struct seq_file *m, void *unused)
2730 {
2731         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2732         struct drm_printer p = drm_seq_file_printer(m);
2733
2734         intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2735
2736         return 0;
2737 }
2738
2739 static int i915_shrinker_info(struct seq_file *m, void *unused)
2740 {
2741         struct drm_i915_private *i915 = node_to_i915(m->private);
2742
2743         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2744         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2745
2746         return 0;
2747 }
2748
2749 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2750 {
2751         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2752         struct drm_device *dev = &dev_priv->drm;
2753         int i;
2754
2755         drm_modeset_lock_all(dev);
2756         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2757                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2758
2759                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2760                            pll->info->id);
2761                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2762                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2763                 seq_printf(m, " tracked hardware state:\n");
2764                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2765                 seq_printf(m, " dpll_md: 0x%08x\n",
2766                            pll->state.hw_state.dpll_md);
2767                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2768                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2769                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2770                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2771                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2772                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2773                            pll->state.hw_state.mg_refclkin_ctl);
2774                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2775                            pll->state.hw_state.mg_clktop2_coreclkctl1);
2776                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2777                            pll->state.hw_state.mg_clktop2_hsclkctl);
2778                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
2779                            pll->state.hw_state.mg_pll_div0);
2780                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
2781                            pll->state.hw_state.mg_pll_div1);
2782                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
2783                            pll->state.hw_state.mg_pll_lf);
2784                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2785                            pll->state.hw_state.mg_pll_frac_lock);
2786                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2787                            pll->state.hw_state.mg_pll_ssc);
2788                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
2789                            pll->state.hw_state.mg_pll_bias);
2790                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2791                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
2792         }
2793         drm_modeset_unlock_all(dev);
2794
2795         return 0;
2796 }
2797
2798 static int i915_wa_registers(struct seq_file *m, void *unused)
2799 {
2800         struct drm_i915_private *i915 = node_to_i915(m->private);
2801         struct intel_engine_cs *engine;
2802
2803         for_each_uabi_engine(engine, i915) {
2804                 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2805                 const struct i915_wa *wa;
2806                 unsigned int count;
2807
2808                 count = wal->count;
2809                 if (!count)
2810                         continue;
2811
2812                 seq_printf(m, "%s: Workarounds applied: %u\n",
2813                            engine->name, count);
2814
2815                 for (wa = wal->list; count--; wa++)
2816                         seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2817                                    i915_mmio_reg_offset(wa->reg),
2818                                    wa->val, wa->mask);
2819
2820                 seq_printf(m, "\n");
2821         }
2822
2823         return 0;
2824 }
2825
2826 static int i915_ipc_status_show(struct seq_file *m, void *data)
2827 {
2828         struct drm_i915_private *dev_priv = m->private;
2829
2830         seq_printf(m, "Isochronous Priority Control: %s\n",
2831                         yesno(dev_priv->ipc_enabled));
2832         return 0;
2833 }
2834
2835 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2836 {
2837         struct drm_i915_private *dev_priv = inode->i_private;
2838
2839         if (!HAS_IPC(dev_priv))
2840                 return -ENODEV;
2841
2842         return single_open(file, i915_ipc_status_show, dev_priv);
2843 }
2844
2845 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2846                                      size_t len, loff_t *offp)
2847 {
2848         struct seq_file *m = file->private_data;
2849         struct drm_i915_private *dev_priv = m->private;
2850         intel_wakeref_t wakeref;
2851         bool enable;
2852         int ret;
2853
2854         ret = kstrtobool_from_user(ubuf, len, &enable);
2855         if (ret < 0)
2856                 return ret;
2857
2858         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2859                 if (!dev_priv->ipc_enabled && enable)
2860                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2861                 dev_priv->wm.distrust_bios_wm = true;
2862                 dev_priv->ipc_enabled = enable;
2863                 intel_enable_ipc(dev_priv);
2864         }
2865
2866         return len;
2867 }
2868
2869 static const struct file_operations i915_ipc_status_fops = {
2870         .owner = THIS_MODULE,
2871         .open = i915_ipc_status_open,
2872         .read = seq_read,
2873         .llseek = seq_lseek,
2874         .release = single_release,
2875         .write = i915_ipc_status_write
2876 };
2877
2878 static int i915_ddb_info(struct seq_file *m, void *unused)
2879 {
2880         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2881         struct drm_device *dev = &dev_priv->drm;
2882         struct skl_ddb_entry *entry;
2883         struct intel_crtc *crtc;
2884
2885         if (INTEL_GEN(dev_priv) < 9)
2886                 return -ENODEV;
2887
2888         drm_modeset_lock_all(dev);
2889
2890         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2891
2892         for_each_intel_crtc(&dev_priv->drm, crtc) {
2893                 struct intel_crtc_state *crtc_state =
2894                         to_intel_crtc_state(crtc->base.state);
2895                 enum pipe pipe = crtc->pipe;
2896                 enum plane_id plane_id;
2897
2898                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2899
2900                 for_each_plane_id_on_crtc(crtc, plane_id) {
2901                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2902                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2903                                    entry->start, entry->end,
2904                                    skl_ddb_entry_size(entry));
2905                 }
2906
2907                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2908                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2909                            entry->end, skl_ddb_entry_size(entry));
2910         }
2911
2912         drm_modeset_unlock_all(dev);
2913
2914         return 0;
2915 }
2916
2917 static void drrs_status_per_crtc(struct seq_file *m,
2918                                  struct drm_device *dev,
2919                                  struct intel_crtc *intel_crtc)
2920 {
2921         struct drm_i915_private *dev_priv = to_i915(dev);
2922         struct i915_drrs *drrs = &dev_priv->drrs;
2923         int vrefresh = 0;
2924         struct drm_connector *connector;
2925         struct drm_connector_list_iter conn_iter;
2926
2927         drm_connector_list_iter_begin(dev, &conn_iter);
2928         drm_for_each_connector_iter(connector, &conn_iter) {
2929                 if (connector->state->crtc != &intel_crtc->base)
2930                         continue;
2931
2932                 seq_printf(m, "%s:\n", connector->name);
2933         }
2934         drm_connector_list_iter_end(&conn_iter);
2935
2936         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2937                 seq_puts(m, "\tVBT: DRRS_type: Static");
2938         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2939                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
2940         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2941                 seq_puts(m, "\tVBT: DRRS_type: None");
2942         else
2943                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2944
2945         seq_puts(m, "\n\n");
2946
2947         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2948                 struct intel_panel *panel;
2949
2950                 mutex_lock(&drrs->mutex);
2951                 /* DRRS Supported */
2952                 seq_puts(m, "\tDRRS Supported: Yes\n");
2953
2954                 /* disable_drrs() will make drrs->dp NULL */
2955                 if (!drrs->dp) {
2956                         seq_puts(m, "Idleness DRRS: Disabled\n");
2957                         if (dev_priv->psr.enabled)
2958                                 seq_puts(m,
2959                                 "\tAs PSR is enabled, DRRS is not enabled\n");
2960                         mutex_unlock(&drrs->mutex);
2961                         return;
2962                 }
2963
2964                 panel = &drrs->dp->attached_connector->panel;
2965                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2966                                         drrs->busy_frontbuffer_bits);
2967
2968                 seq_puts(m, "\n\t\t");
2969                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2970                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2971                         vrefresh = panel->fixed_mode->vrefresh;
2972                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2973                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2974                         vrefresh = panel->downclock_mode->vrefresh;
2975                 } else {
2976                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
2977                                                 drrs->refresh_rate_type);
2978                         mutex_unlock(&drrs->mutex);
2979                         return;
2980                 }
2981                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
2982
2983                 seq_puts(m, "\n\t\t");
2984                 mutex_unlock(&drrs->mutex);
2985         } else {
2986                 /* DRRS not supported. Print the VBT parameter*/
2987                 seq_puts(m, "\tDRRS Supported : No");
2988         }
2989         seq_puts(m, "\n");
2990 }
2991
2992 static int i915_drrs_status(struct seq_file *m, void *unused)
2993 {
2994         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2995         struct drm_device *dev = &dev_priv->drm;
2996         struct intel_crtc *intel_crtc;
2997         int active_crtc_cnt = 0;
2998
2999         drm_modeset_lock_all(dev);
3000         for_each_intel_crtc(dev, intel_crtc) {
3001                 if (intel_crtc->base.state->active) {
3002                         active_crtc_cnt++;
3003                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3004
3005                         drrs_status_per_crtc(m, dev, intel_crtc);
3006                 }
3007         }
3008         drm_modeset_unlock_all(dev);
3009
3010         if (!active_crtc_cnt)
3011                 seq_puts(m, "No active crtc found\n");
3012
3013         return 0;
3014 }
3015
3016 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3017 {
3018         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3019         struct drm_device *dev = &dev_priv->drm;
3020         struct intel_encoder *intel_encoder;
3021         struct intel_digital_port *intel_dig_port;
3022         struct drm_connector *connector;
3023         struct drm_connector_list_iter conn_iter;
3024
3025         drm_connector_list_iter_begin(dev, &conn_iter);
3026         drm_for_each_connector_iter(connector, &conn_iter) {
3027                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3028                         continue;
3029
3030                 intel_encoder = intel_attached_encoder(connector);
3031                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3032                         continue;
3033
3034                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3035                 if (!intel_dig_port->dp.can_mst)
3036                         continue;
3037
3038                 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3039                            intel_dig_port->base.base.base.id,
3040                            intel_dig_port->base.base.name);
3041                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3042         }
3043         drm_connector_list_iter_end(&conn_iter);
3044
3045         return 0;
3046 }
3047
3048 static ssize_t i915_displayport_test_active_write(struct file *file,
3049                                                   const char __user *ubuf,
3050                                                   size_t len, loff_t *offp)
3051 {
3052         char *input_buffer;
3053         int status = 0;
3054         struct drm_device *dev;
3055         struct drm_connector *connector;
3056         struct drm_connector_list_iter conn_iter;
3057         struct intel_dp *intel_dp;
3058         int val = 0;
3059
3060         dev = ((struct seq_file *)file->private_data)->private;
3061
3062         if (len == 0)
3063                 return 0;
3064
3065         input_buffer = memdup_user_nul(ubuf, len);
3066         if (IS_ERR(input_buffer))
3067                 return PTR_ERR(input_buffer);
3068
3069         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3070
3071         drm_connector_list_iter_begin(dev, &conn_iter);
3072         drm_for_each_connector_iter(connector, &conn_iter) {
3073                 struct intel_encoder *encoder;
3074
3075                 if (connector->connector_type !=
3076                     DRM_MODE_CONNECTOR_DisplayPort)
3077                         continue;
3078
3079                 encoder = to_intel_encoder(connector->encoder);
3080                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3081                         continue;
3082
3083                 if (encoder && connector->status == connector_status_connected) {
3084                         intel_dp = enc_to_intel_dp(&encoder->base);
3085                         status = kstrtoint(input_buffer, 10, &val);
3086                         if (status < 0)
3087                                 break;
3088                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3089                         /* To prevent erroneous activation of the compliance
3090                          * testing code, only accept an actual value of 1 here
3091                          */
3092                         if (val == 1)
3093                                 intel_dp->compliance.test_active = 1;
3094                         else
3095                                 intel_dp->compliance.test_active = 0;
3096                 }
3097         }
3098         drm_connector_list_iter_end(&conn_iter);
3099         kfree(input_buffer);
3100         if (status < 0)
3101                 return status;
3102
3103         *offp += len;
3104         return len;
3105 }
3106
3107 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3108 {
3109         struct drm_i915_private *dev_priv = m->private;
3110         struct drm_device *dev = &dev_priv->drm;
3111         struct drm_connector *connector;
3112         struct drm_connector_list_iter conn_iter;
3113         struct intel_dp *intel_dp;
3114
3115         drm_connector_list_iter_begin(dev, &conn_iter);
3116         drm_for_each_connector_iter(connector, &conn_iter) {
3117                 struct intel_encoder *encoder;
3118
3119                 if (connector->connector_type !=
3120                     DRM_MODE_CONNECTOR_DisplayPort)
3121                         continue;
3122
3123                 encoder = to_intel_encoder(connector->encoder);
3124                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3125                         continue;
3126
3127                 if (encoder && connector->status == connector_status_connected) {
3128                         intel_dp = enc_to_intel_dp(&encoder->base);
3129                         if (intel_dp->compliance.test_active)
3130                                 seq_puts(m, "1");
3131                         else
3132                                 seq_puts(m, "0");
3133                 } else
3134                         seq_puts(m, "0");
3135         }
3136         drm_connector_list_iter_end(&conn_iter);
3137
3138         return 0;
3139 }
3140
3141 static int i915_displayport_test_active_open(struct inode *inode,
3142                                              struct file *file)
3143 {
3144         return single_open(file, i915_displayport_test_active_show,
3145                            inode->i_private);
3146 }
3147
3148 static const struct file_operations i915_displayport_test_active_fops = {
3149         .owner = THIS_MODULE,
3150         .open = i915_displayport_test_active_open,
3151         .read = seq_read,
3152         .llseek = seq_lseek,
3153         .release = single_release,
3154         .write = i915_displayport_test_active_write
3155 };
3156
3157 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3158 {
3159         struct drm_i915_private *dev_priv = m->private;
3160         struct drm_device *dev = &dev_priv->drm;
3161         struct drm_connector *connector;
3162         struct drm_connector_list_iter conn_iter;
3163         struct intel_dp *intel_dp;
3164
3165         drm_connector_list_iter_begin(dev, &conn_iter);
3166         drm_for_each_connector_iter(connector, &conn_iter) {
3167                 struct intel_encoder *encoder;
3168
3169                 if (connector->connector_type !=
3170                     DRM_MODE_CONNECTOR_DisplayPort)
3171                         continue;
3172
3173                 encoder = to_intel_encoder(connector->encoder);
3174                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3175                         continue;
3176
3177                 if (encoder && connector->status == connector_status_connected) {
3178                         intel_dp = enc_to_intel_dp(&encoder->base);
3179                         if (intel_dp->compliance.test_type ==
3180                             DP_TEST_LINK_EDID_READ)
3181                                 seq_printf(m, "%lx",
3182                                            intel_dp->compliance.test_data.edid);
3183                         else if (intel_dp->compliance.test_type ==
3184                                  DP_TEST_LINK_VIDEO_PATTERN) {
3185                                 seq_printf(m, "hdisplay: %d\n",
3186                                            intel_dp->compliance.test_data.hdisplay);
3187                                 seq_printf(m, "vdisplay: %d\n",
3188                                            intel_dp->compliance.test_data.vdisplay);
3189                                 seq_printf(m, "bpc: %u\n",
3190                                            intel_dp->compliance.test_data.bpc);
3191                         }
3192                 } else
3193                         seq_puts(m, "0");
3194         }
3195         drm_connector_list_iter_end(&conn_iter);
3196
3197         return 0;
3198 }
3199 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3200
3201 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3202 {
3203         struct drm_i915_private *dev_priv = m->private;
3204         struct drm_device *dev = &dev_priv->drm;
3205         struct drm_connector *connector;
3206         struct drm_connector_list_iter conn_iter;
3207         struct intel_dp *intel_dp;
3208
3209         drm_connector_list_iter_begin(dev, &conn_iter);
3210         drm_for_each_connector_iter(connector, &conn_iter) {
3211                 struct intel_encoder *encoder;
3212
3213                 if (connector->connector_type !=
3214                     DRM_MODE_CONNECTOR_DisplayPort)
3215                         continue;
3216
3217                 encoder = to_intel_encoder(connector->encoder);
3218                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3219                         continue;
3220
3221                 if (encoder && connector->status == connector_status_connected) {
3222                         intel_dp = enc_to_intel_dp(&encoder->base);
3223                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3224                 } else
3225                         seq_puts(m, "0");
3226         }
3227         drm_connector_list_iter_end(&conn_iter);
3228
3229         return 0;
3230 }
3231 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3232
3233 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3234 {
3235         struct drm_i915_private *dev_priv = m->private;
3236         struct drm_device *dev = &dev_priv->drm;
3237         int level;
3238         int num_levels;
3239
3240         if (IS_CHERRYVIEW(dev_priv))
3241                 num_levels = 3;
3242         else if (IS_VALLEYVIEW(dev_priv))
3243                 num_levels = 1;
3244         else if (IS_G4X(dev_priv))
3245                 num_levels = 3;
3246         else
3247                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3248
3249         drm_modeset_lock_all(dev);
3250
3251         for (level = 0; level < num_levels; level++) {
3252                 unsigned int latency = wm[level];
3253
3254                 /*
3255                  * - WM1+ latency values in 0.5us units
3256                  * - latencies are in us on gen9/vlv/chv
3257                  */
3258                 if (INTEL_GEN(dev_priv) >= 9 ||
3259                     IS_VALLEYVIEW(dev_priv) ||
3260                     IS_CHERRYVIEW(dev_priv) ||
3261                     IS_G4X(dev_priv))
3262                         latency *= 10;
3263                 else if (level > 0)
3264                         latency *= 5;
3265
3266                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3267                            level, wm[level], latency / 10, latency % 10);
3268         }
3269
3270         drm_modeset_unlock_all(dev);
3271 }
3272
3273 static int pri_wm_latency_show(struct seq_file *m, void *data)
3274 {
3275         struct drm_i915_private *dev_priv = m->private;
3276         const u16 *latencies;
3277
3278         if (INTEL_GEN(dev_priv) >= 9)
3279                 latencies = dev_priv->wm.skl_latency;
3280         else
3281                 latencies = dev_priv->wm.pri_latency;
3282
3283         wm_latency_show(m, latencies);
3284
3285         return 0;
3286 }
3287
3288 static int spr_wm_latency_show(struct seq_file *m, void *data)
3289 {
3290         struct drm_i915_private *dev_priv = m->private;
3291         const u16 *latencies;
3292
3293         if (INTEL_GEN(dev_priv) >= 9)
3294                 latencies = dev_priv->wm.skl_latency;
3295         else
3296                 latencies = dev_priv->wm.spr_latency;
3297
3298         wm_latency_show(m, latencies);
3299
3300         return 0;
3301 }
3302
3303 static int cur_wm_latency_show(struct seq_file *m, void *data)
3304 {
3305         struct drm_i915_private *dev_priv = m->private;
3306         const u16 *latencies;
3307
3308         if (INTEL_GEN(dev_priv) >= 9)
3309                 latencies = dev_priv->wm.skl_latency;
3310         else
3311                 latencies = dev_priv->wm.cur_latency;
3312
3313         wm_latency_show(m, latencies);
3314
3315         return 0;
3316 }
3317
3318 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3319 {
3320         struct drm_i915_private *dev_priv = inode->i_private;
3321
3322         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3323                 return -ENODEV;
3324
3325         return single_open(file, pri_wm_latency_show, dev_priv);
3326 }
3327
3328 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3329 {
3330         struct drm_i915_private *dev_priv = inode->i_private;
3331
3332         if (HAS_GMCH(dev_priv))
3333                 return -ENODEV;
3334
3335         return single_open(file, spr_wm_latency_show, dev_priv);
3336 }
3337
3338 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3339 {
3340         struct drm_i915_private *dev_priv = inode->i_private;
3341
3342         if (HAS_GMCH(dev_priv))
3343                 return -ENODEV;
3344
3345         return single_open(file, cur_wm_latency_show, dev_priv);
3346 }
3347
3348 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3349                                 size_t len, loff_t *offp, u16 wm[8])
3350 {
3351         struct seq_file *m = file->private_data;
3352         struct drm_i915_private *dev_priv = m->private;
3353         struct drm_device *dev = &dev_priv->drm;
3354         u16 new[8] = { 0 };
3355         int num_levels;
3356         int level;
3357         int ret;
3358         char tmp[32];
3359
3360         if (IS_CHERRYVIEW(dev_priv))
3361                 num_levels = 3;
3362         else if (IS_VALLEYVIEW(dev_priv))
3363                 num_levels = 1;
3364         else if (IS_G4X(dev_priv))
3365                 num_levels = 3;
3366         else
3367                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3368
3369         if (len >= sizeof(tmp))
3370                 return -EINVAL;
3371
3372         if (copy_from_user(tmp, ubuf, len))
3373                 return -EFAULT;
3374
3375         tmp[len] = '\0';
3376
3377         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3378                      &new[0], &new[1], &new[2], &new[3],
3379                      &new[4], &new[5], &new[6], &new[7]);
3380         if (ret != num_levels)
3381                 return -EINVAL;
3382
3383         drm_modeset_lock_all(dev);
3384
3385         for (level = 0; level < num_levels; level++)
3386                 wm[level] = new[level];
3387
3388         drm_modeset_unlock_all(dev);
3389
3390         return len;
3391 }
3392
3393
3394 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3395                                     size_t len, loff_t *offp)
3396 {
3397         struct seq_file *m = file->private_data;
3398         struct drm_i915_private *dev_priv = m->private;
3399         u16 *latencies;
3400
3401         if (INTEL_GEN(dev_priv) >= 9)
3402                 latencies = dev_priv->wm.skl_latency;
3403         else
3404                 latencies = dev_priv->wm.pri_latency;
3405
3406         return wm_latency_write(file, ubuf, len, offp, latencies);
3407 }
3408
3409 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3410                                     size_t len, loff_t *offp)
3411 {
3412         struct seq_file *m = file->private_data;
3413         struct drm_i915_private *dev_priv = m->private;
3414         u16 *latencies;
3415
3416         if (INTEL_GEN(dev_priv) >= 9)
3417                 latencies = dev_priv->wm.skl_latency;
3418         else
3419                 latencies = dev_priv->wm.spr_latency;
3420
3421         return wm_latency_write(file, ubuf, len, offp, latencies);
3422 }
3423
3424 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3425                                     size_t len, loff_t *offp)
3426 {
3427         struct seq_file *m = file->private_data;
3428         struct drm_i915_private *dev_priv = m->private;
3429         u16 *latencies;
3430
3431         if (INTEL_GEN(dev_priv) >= 9)
3432                 latencies = dev_priv->wm.skl_latency;
3433         else
3434                 latencies = dev_priv->wm.cur_latency;
3435
3436         return wm_latency_write(file, ubuf, len, offp, latencies);
3437 }
3438
3439 static const struct file_operations i915_pri_wm_latency_fops = {
3440         .owner = THIS_MODULE,
3441         .open = pri_wm_latency_open,
3442         .read = seq_read,
3443         .llseek = seq_lseek,
3444         .release = single_release,
3445         .write = pri_wm_latency_write
3446 };
3447
3448 static const struct file_operations i915_spr_wm_latency_fops = {
3449         .owner = THIS_MODULE,
3450         .open = spr_wm_latency_open,
3451         .read = seq_read,
3452         .llseek = seq_lseek,
3453         .release = single_release,
3454         .write = spr_wm_latency_write
3455 };
3456
3457 static const struct file_operations i915_cur_wm_latency_fops = {
3458         .owner = THIS_MODULE,
3459         .open = cur_wm_latency_open,
3460         .read = seq_read,
3461         .llseek = seq_lseek,
3462         .release = single_release,
3463         .write = cur_wm_latency_write
3464 };
3465
3466 static int
3467 i915_wedged_get(void *data, u64 *val)
3468 {
3469         struct drm_i915_private *i915 = data;
3470         int ret = intel_gt_terminally_wedged(&i915->gt);
3471
3472         switch (ret) {
3473         case -EIO:
3474                 *val = 1;
3475                 return 0;
3476         case 0:
3477                 *val = 0;
3478                 return 0;
3479         default:
3480                 return ret;
3481         }
3482 }
3483
3484 static int
3485 i915_wedged_set(void *data, u64 val)
3486 {
3487         struct drm_i915_private *i915 = data;
3488
3489         /* Flush any previous reset before applying for a new one */
3490         wait_event(i915->gt.reset.queue,
3491                    !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3492
3493         intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3494                               "Manually set wedged engine mask = %llx", val);
3495         return 0;
3496 }
3497
3498 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3499                         i915_wedged_get, i915_wedged_set,
3500                         "%llu\n");
3501
3502 static int
3503 i915_perf_noa_delay_set(void *data, u64 val)
3504 {
3505         struct drm_i915_private *i915 = data;
3506         const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3507
3508         /*
3509          * This would lead to infinite waits as we're doing timestamp
3510          * difference on the CS with only 32bits.
3511          */
3512         if (val > mul_u32_u32(U32_MAX, clk))
3513                 return -EINVAL;
3514
3515         atomic64_set(&i915->perf.noa_programming_delay, val);
3516         return 0;
3517 }
3518
3519 static int
3520 i915_perf_noa_delay_get(void *data, u64 *val)
3521 {
3522         struct drm_i915_private *i915 = data;
3523
3524         *val = atomic64_read(&i915->perf.noa_programming_delay);
3525         return 0;
3526 }
3527
3528 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3529                         i915_perf_noa_delay_get,
3530                         i915_perf_noa_delay_set,
3531                         "%llu\n");
3532
3533 #define DROP_UNBOUND    BIT(0)
3534 #define DROP_BOUND      BIT(1)
3535 #define DROP_RETIRE     BIT(2)
3536 #define DROP_ACTIVE     BIT(3)
3537 #define DROP_FREED      BIT(4)
3538 #define DROP_SHRINK_ALL BIT(5)
3539 #define DROP_IDLE       BIT(6)
3540 #define DROP_RESET_ACTIVE       BIT(7)
3541 #define DROP_RESET_SEQNO        BIT(8)
3542 #define DROP_RCU        BIT(9)
3543 #define DROP_ALL (DROP_UNBOUND  | \
3544                   DROP_BOUND    | \
3545                   DROP_RETIRE   | \
3546                   DROP_ACTIVE   | \
3547                   DROP_FREED    | \
3548                   DROP_SHRINK_ALL |\
3549                   DROP_IDLE     | \
3550                   DROP_RESET_ACTIVE | \
3551                   DROP_RESET_SEQNO | \
3552                   DROP_RCU)
3553 static int
3554 i915_drop_caches_get(void *data, u64 *val)
3555 {
3556         *val = DROP_ALL;
3557
3558         return 0;
3559 }
3560 static int
3561 gt_drop_caches(struct intel_gt *gt, u64 val)
3562 {
3563         int ret;
3564
3565         if (val & DROP_RESET_ACTIVE &&
3566             wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3567                 intel_gt_set_wedged(gt);
3568
3569         if (val & DROP_RETIRE)
3570                 intel_gt_retire_requests(gt);
3571
3572         if (val & (DROP_IDLE | DROP_ACTIVE)) {
3573                 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3574                 if (ret)
3575                         return ret;
3576         }
3577
3578         if (val & DROP_IDLE) {
3579                 ret = intel_gt_pm_wait_for_idle(gt);
3580                 if (ret)
3581                         return ret;
3582         }
3583
3584         if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3585                 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3586
3587         return 0;
3588 }
3589
3590 static int
3591 i915_drop_caches_set(void *data, u64 val)
3592 {
3593         struct drm_i915_private *i915 = data;
3594         int ret;
3595
3596         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3597                   val, val & DROP_ALL);
3598
3599         ret = gt_drop_caches(&i915->gt, val);
3600         if (ret)
3601                 return ret;
3602
3603         fs_reclaim_acquire(GFP_KERNEL);
3604         if (val & DROP_BOUND)
3605                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3606
3607         if (val & DROP_UNBOUND)
3608                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3609
3610         if (val & DROP_SHRINK_ALL)
3611                 i915_gem_shrink_all(i915);
3612         fs_reclaim_release(GFP_KERNEL);
3613
3614         if (val & DROP_RCU)
3615                 rcu_barrier();
3616
3617         if (val & DROP_FREED)
3618                 i915_gem_drain_freed_objects(i915);
3619
3620         return 0;
3621 }
3622
3623 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3624                         i915_drop_caches_get, i915_drop_caches_set,
3625                         "0x%08llx\n");
3626
3627 static int
3628 i915_cache_sharing_get(void *data, u64 *val)
3629 {
3630         struct drm_i915_private *dev_priv = data;
3631         intel_wakeref_t wakeref;
3632         u32 snpcr = 0;
3633
3634         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3635                 return -ENODEV;
3636
3637         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3638                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3639
3640         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3641
3642         return 0;
3643 }
3644
3645 static int
3646 i915_cache_sharing_set(void *data, u64 val)
3647 {
3648         struct drm_i915_private *dev_priv = data;
3649         intel_wakeref_t wakeref;
3650
3651         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3652                 return -ENODEV;
3653
3654         if (val > 3)
3655                 return -EINVAL;
3656
3657         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3658         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3659                 u32 snpcr;
3660
3661                 /* Update the cache sharing policy here as well */
3662                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3663                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3664                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3665                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3666         }
3667
3668         return 0;
3669 }
3670
3671 static void
3672 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3673                           u8 *to_mask)
3674 {
3675         int offset = slice * sseu->ss_stride;
3676
3677         memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3678 }
3679
3680 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3681                         i915_cache_sharing_get, i915_cache_sharing_set,
3682                         "%llu\n");
3683
3684 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3685                                           struct sseu_dev_info *sseu)
3686 {
3687 #define SS_MAX 2
3688         const int ss_max = SS_MAX;
3689         u32 sig1[SS_MAX], sig2[SS_MAX];
3690         int ss;
3691
3692         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3693         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3694         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3695         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3696
3697         for (ss = 0; ss < ss_max; ss++) {
3698                 unsigned int eu_cnt;
3699
3700                 if (sig1[ss] & CHV_SS_PG_ENABLE)
3701                         /* skip disabled subslice */
3702                         continue;
3703
3704                 sseu->slice_mask = BIT(0);
3705                 sseu->subslice_mask[0] |= BIT(ss);
3706                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3707                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3708                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3709                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3710                 sseu->eu_total += eu_cnt;
3711                 sseu->eu_per_subslice = max_t(unsigned int,
3712                                               sseu->eu_per_subslice, eu_cnt);
3713         }
3714 #undef SS_MAX
3715 }
3716
3717 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3718                                      struct sseu_dev_info *sseu)
3719 {
3720 #define SS_MAX 6
3721         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3722         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3723         int s, ss;
3724
3725         for (s = 0; s < info->sseu.max_slices; s++) {
3726                 /*
3727                  * FIXME: Valid SS Mask respects the spec and read
3728                  * only valid bits for those registers, excluding reserved
3729                  * although this seems wrong because it would leave many
3730                  * subslices without ACK.
3731                  */
3732                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3733                         GEN10_PGCTL_VALID_SS_MASK(s);
3734                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3735                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3736         }
3737
3738         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3739                      GEN9_PGCTL_SSA_EU19_ACK |
3740                      GEN9_PGCTL_SSA_EU210_ACK |
3741                      GEN9_PGCTL_SSA_EU311_ACK;
3742         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3743                      GEN9_PGCTL_SSB_EU19_ACK |
3744                      GEN9_PGCTL_SSB_EU210_ACK |
3745                      GEN9_PGCTL_SSB_EU311_ACK;
3746
3747         for (s = 0; s < info->sseu.max_slices; s++) {
3748                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3749                         /* skip disabled slice */
3750                         continue;
3751
3752                 sseu->slice_mask |= BIT(s);
3753                 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3754
3755                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3756                         unsigned int eu_cnt;
3757
3758                         if (info->sseu.has_subslice_pg &&
3759                             !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3760                                 /* skip disabled subslice */
3761                                 continue;
3762
3763                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3764                                                eu_mask[ss % 2]);
3765                         sseu->eu_total += eu_cnt;
3766                         sseu->eu_per_subslice = max_t(unsigned int,
3767                                                       sseu->eu_per_subslice,
3768                                                       eu_cnt);
3769                 }
3770         }
3771 #undef SS_MAX
3772 }
3773
3774 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3775                                     struct sseu_dev_info *sseu)
3776 {
3777 #define SS_MAX 3
3778         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3779         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3780         int s, ss;
3781
3782         for (s = 0; s < info->sseu.max_slices; s++) {
3783                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3784                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3785                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3786         }
3787
3788         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3789                      GEN9_PGCTL_SSA_EU19_ACK |
3790                      GEN9_PGCTL_SSA_EU210_ACK |
3791                      GEN9_PGCTL_SSA_EU311_ACK;
3792         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3793                      GEN9_PGCTL_SSB_EU19_ACK |
3794                      GEN9_PGCTL_SSB_EU210_ACK |
3795                      GEN9_PGCTL_SSB_EU311_ACK;
3796
3797         for (s = 0; s < info->sseu.max_slices; s++) {
3798                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3799                         /* skip disabled slice */
3800                         continue;
3801
3802                 sseu->slice_mask |= BIT(s);
3803
3804                 if (IS_GEN9_BC(dev_priv))
3805                         intel_sseu_copy_subslices(&info->sseu, s,
3806                                                   sseu->subslice_mask);
3807
3808                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3809                         unsigned int eu_cnt;
3810                         u8 ss_idx = s * info->sseu.ss_stride +
3811                                     ss / BITS_PER_BYTE;
3812
3813                         if (IS_GEN9_LP(dev_priv)) {
3814                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3815                                         /* skip disabled subslice */
3816                                         continue;
3817
3818                                 sseu->subslice_mask[ss_idx] |=
3819                                         BIT(ss % BITS_PER_BYTE);
3820                         }
3821
3822                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3823                                                eu_mask[ss%2]);
3824                         sseu->eu_total += eu_cnt;
3825                         sseu->eu_per_subslice = max_t(unsigned int,
3826                                                       sseu->eu_per_subslice,
3827                                                       eu_cnt);
3828                 }
3829         }
3830 #undef SS_MAX
3831 }
3832
3833 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3834                                          struct sseu_dev_info *sseu)
3835 {
3836         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3837         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3838         int s;
3839
3840         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3841
3842         if (sseu->slice_mask) {
3843                 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3844                 for (s = 0; s < fls(sseu->slice_mask); s++)
3845                         intel_sseu_copy_subslices(&info->sseu, s,
3846                                                   sseu->subslice_mask);
3847                 sseu->eu_total = sseu->eu_per_subslice *
3848                                  intel_sseu_subslice_total(sseu);
3849
3850                 /* subtract fused off EU(s) from enabled slice(s) */
3851                 for (s = 0; s < fls(sseu->slice_mask); s++) {
3852                         u8 subslice_7eu = info->sseu.subslice_7eu[s];
3853
3854                         sseu->eu_total -= hweight8(subslice_7eu);
3855                 }
3856         }
3857 }
3858
3859 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3860                                  const struct sseu_dev_info *sseu)
3861 {
3862         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3863         const char *type = is_available_info ? "Available" : "Enabled";
3864         int s;
3865
3866         seq_printf(m, "  %s Slice Mask: %04x\n", type,
3867                    sseu->slice_mask);
3868         seq_printf(m, "  %s Slice Total: %u\n", type,
3869                    hweight8(sseu->slice_mask));
3870         seq_printf(m, "  %s Subslice Total: %u\n", type,
3871                    intel_sseu_subslice_total(sseu));
3872         for (s = 0; s < fls(sseu->slice_mask); s++) {
3873                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3874                            s, intel_sseu_subslices_per_slice(sseu, s));
3875         }
3876         seq_printf(m, "  %s EU Total: %u\n", type,
3877                    sseu->eu_total);
3878         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3879                    sseu->eu_per_subslice);
3880
3881         if (!is_available_info)
3882                 return;
3883
3884         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3885         if (HAS_POOLED_EU(dev_priv))
3886                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3887
3888         seq_printf(m, "  Has Slice Power Gating: %s\n",
3889                    yesno(sseu->has_slice_pg));
3890         seq_printf(m, "  Has Subslice Power Gating: %s\n",
3891                    yesno(sseu->has_subslice_pg));
3892         seq_printf(m, "  Has EU Power Gating: %s\n",
3893                    yesno(sseu->has_eu_pg));
3894 }
3895
3896 static int i915_sseu_status(struct seq_file *m, void *unused)
3897 {
3898         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3899         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3900         struct sseu_dev_info sseu;
3901         intel_wakeref_t wakeref;
3902
3903         if (INTEL_GEN(dev_priv) < 8)
3904                 return -ENODEV;
3905
3906         seq_puts(m, "SSEU Device Info\n");
3907         i915_print_sseu_info(m, true, &info->sseu);
3908
3909         seq_puts(m, "SSEU Device Status\n");
3910         memset(&sseu, 0, sizeof(sseu));
3911         intel_sseu_set_info(&sseu, info->sseu.max_slices,
3912                             info->sseu.max_subslices,
3913                             info->sseu.max_eus_per_subslice);
3914
3915         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3916                 if (IS_CHERRYVIEW(dev_priv))
3917                         cherryview_sseu_device_status(dev_priv, &sseu);
3918                 else if (IS_BROADWELL(dev_priv))
3919                         broadwell_sseu_device_status(dev_priv, &sseu);
3920                 else if (IS_GEN(dev_priv, 9))
3921                         gen9_sseu_device_status(dev_priv, &sseu);
3922                 else if (INTEL_GEN(dev_priv) >= 10)
3923                         gen10_sseu_device_status(dev_priv, &sseu);
3924         }
3925
3926         i915_print_sseu_info(m, false, &sseu);
3927
3928         return 0;
3929 }
3930
3931 static int i915_forcewake_open(struct inode *inode, struct file *file)
3932 {
3933         struct drm_i915_private *i915 = inode->i_private;
3934         struct intel_gt *gt = &i915->gt;
3935
3936         atomic_inc(&gt->user_wakeref);
3937         intel_gt_pm_get(gt);
3938         if (INTEL_GEN(i915) >= 6)
3939                 intel_uncore_forcewake_user_get(gt->uncore);
3940
3941         return 0;
3942 }
3943
3944 static int i915_forcewake_release(struct inode *inode, struct file *file)
3945 {
3946         struct drm_i915_private *i915 = inode->i_private;
3947         struct intel_gt *gt = &i915->gt;
3948
3949         if (INTEL_GEN(i915) >= 6)
3950                 intel_uncore_forcewake_user_put(&i915->uncore);
3951         intel_gt_pm_put(gt);
3952         atomic_dec(&gt->user_wakeref);
3953
3954         return 0;
3955 }
3956
3957 static const struct file_operations i915_forcewake_fops = {
3958         .owner = THIS_MODULE,
3959         .open = i915_forcewake_open,
3960         .release = i915_forcewake_release,
3961 };
3962
3963 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3964 {
3965         struct drm_i915_private *dev_priv = m->private;
3966         struct i915_hotplug *hotplug = &dev_priv->hotplug;
3967
3968         /* Synchronize with everything first in case there's been an HPD
3969          * storm, but we haven't finished handling it in the kernel yet
3970          */
3971         intel_synchronize_irq(dev_priv);
3972         flush_work(&dev_priv->hotplug.dig_port_work);
3973         flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3974
3975         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
3976         seq_printf(m, "Detected: %s\n",
3977                    yesno(delayed_work_pending(&hotplug->reenable_work)));
3978
3979         return 0;
3980 }
3981
3982 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
3983                                         const char __user *ubuf, size_t len,
3984                                         loff_t *offp)
3985 {
3986         struct seq_file *m = file->private_data;
3987         struct drm_i915_private *dev_priv = m->private;
3988         struct i915_hotplug *hotplug = &dev_priv->hotplug;
3989         unsigned int new_threshold;
3990         int i;
3991         char *newline;
3992         char tmp[16];
3993
3994         if (len >= sizeof(tmp))
3995                 return -EINVAL;
3996
3997         if (copy_from_user(tmp, ubuf, len))
3998                 return -EFAULT;
3999
4000         tmp[len] = '\0';
4001
4002         /* Strip newline, if any */
4003         newline = strchr(tmp, '\n');
4004         if (newline)
4005                 *newline = '\0';
4006
4007         if (strcmp(tmp, "reset") == 0)
4008                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4009         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4010                 return -EINVAL;
4011
4012         if (new_threshold > 0)
4013                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4014                               new_threshold);
4015         else
4016                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4017
4018         spin_lock_irq(&dev_priv->irq_lock);
4019         hotplug->hpd_storm_threshold = new_threshold;
4020         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4021         for_each_hpd_pin(i)
4022                 hotplug->stats[i].count = 0;
4023         spin_unlock_irq(&dev_priv->irq_lock);
4024
4025         /* Re-enable hpd immediately if we were in an irq storm */
4026         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4027
4028         return len;
4029 }
4030
4031 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4032 {
4033         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4034 }
4035
4036 static const struct file_operations i915_hpd_storm_ctl_fops = {
4037         .owner = THIS_MODULE,
4038         .open = i915_hpd_storm_ctl_open,
4039         .read = seq_read,
4040         .llseek = seq_lseek,
4041         .release = single_release,
4042         .write = i915_hpd_storm_ctl_write
4043 };
4044
4045 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4046 {
4047         struct drm_i915_private *dev_priv = m->private;
4048
4049         seq_printf(m, "Enabled: %s\n",
4050                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4051
4052         return 0;
4053 }
4054
4055 static int
4056 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4057 {
4058         return single_open(file, i915_hpd_short_storm_ctl_show,
4059                            inode->i_private);
4060 }
4061
4062 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4063                                               const char __user *ubuf,
4064                                               size_t len, loff_t *offp)
4065 {
4066         struct seq_file *m = file->private_data;
4067         struct drm_i915_private *dev_priv = m->private;
4068         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4069         char *newline;
4070         char tmp[16];
4071         int i;
4072         bool new_state;
4073
4074         if (len >= sizeof(tmp))
4075                 return -EINVAL;
4076
4077         if (copy_from_user(tmp, ubuf, len))
4078                 return -EFAULT;
4079
4080         tmp[len] = '\0';
4081
4082         /* Strip newline, if any */
4083         newline = strchr(tmp, '\n');
4084         if (newline)
4085                 *newline = '\0';
4086
4087         /* Reset to the "default" state for this system */
4088         if (strcmp(tmp, "reset") == 0)
4089                 new_state = !HAS_DP_MST(dev_priv);
4090         else if (kstrtobool(tmp, &new_state) != 0)
4091                 return -EINVAL;
4092
4093         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4094                       new_state ? "En" : "Dis");
4095
4096         spin_lock_irq(&dev_priv->irq_lock);
4097         hotplug->hpd_short_storm_enabled = new_state;
4098         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4099         for_each_hpd_pin(i)
4100                 hotplug->stats[i].count = 0;
4101         spin_unlock_irq(&dev_priv->irq_lock);
4102
4103         /* Re-enable hpd immediately if we were in an irq storm */
4104         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4105
4106         return len;
4107 }
4108
4109 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4110         .owner = THIS_MODULE,
4111         .open = i915_hpd_short_storm_ctl_open,
4112         .read = seq_read,
4113         .llseek = seq_lseek,
4114         .release = single_release,
4115         .write = i915_hpd_short_storm_ctl_write,
4116 };
4117
4118 static int i915_drrs_ctl_set(void *data, u64 val)
4119 {
4120         struct drm_i915_private *dev_priv = data;
4121         struct drm_device *dev = &dev_priv->drm;
4122         struct intel_crtc *crtc;
4123
4124         if (INTEL_GEN(dev_priv) < 7)
4125                 return -ENODEV;
4126
4127         for_each_intel_crtc(dev, crtc) {
4128                 struct drm_connector_list_iter conn_iter;
4129                 struct intel_crtc_state *crtc_state;
4130                 struct drm_connector *connector;
4131                 struct drm_crtc_commit *commit;
4132                 int ret;
4133
4134                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4135                 if (ret)
4136                         return ret;
4137
4138                 crtc_state = to_intel_crtc_state(crtc->base.state);
4139
4140                 if (!crtc_state->hw.active ||
4141                     !crtc_state->has_drrs)
4142                         goto out;
4143
4144                 commit = crtc_state->uapi.commit;
4145                 if (commit) {
4146                         ret = wait_for_completion_interruptible(&commit->hw_done);
4147                         if (ret)
4148                                 goto out;
4149                 }
4150
4151                 drm_connector_list_iter_begin(dev, &conn_iter);
4152                 drm_for_each_connector_iter(connector, &conn_iter) {
4153                         struct intel_encoder *encoder;
4154                         struct intel_dp *intel_dp;
4155
4156                         if (!(crtc_state->uapi.connector_mask &
4157                               drm_connector_mask(connector)))
4158                                 continue;
4159
4160                         encoder = intel_attached_encoder(connector);
4161                         if (encoder->type != INTEL_OUTPUT_EDP)
4162                                 continue;
4163
4164                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4165                                                 val ? "en" : "dis", val);
4166
4167                         intel_dp = enc_to_intel_dp(&encoder->base);
4168                         if (val)
4169                                 intel_edp_drrs_enable(intel_dp,
4170                                                       crtc_state);
4171                         else
4172                                 intel_edp_drrs_disable(intel_dp,
4173                                                        crtc_state);
4174                 }
4175                 drm_connector_list_iter_end(&conn_iter);
4176
4177 out:
4178                 drm_modeset_unlock(&crtc->base.mutex);
4179                 if (ret)
4180                         return ret;
4181         }
4182
4183         return 0;
4184 }
4185
4186 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4187
4188 static ssize_t
4189 i915_fifo_underrun_reset_write(struct file *filp,
4190                                const char __user *ubuf,
4191                                size_t cnt, loff_t *ppos)
4192 {
4193         struct drm_i915_private *dev_priv = filp->private_data;
4194         struct intel_crtc *intel_crtc;
4195         struct drm_device *dev = &dev_priv->drm;
4196         int ret;
4197         bool reset;
4198
4199         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4200         if (ret)
4201                 return ret;
4202
4203         if (!reset)
4204                 return cnt;
4205
4206         for_each_intel_crtc(dev, intel_crtc) {
4207                 struct drm_crtc_commit *commit;
4208                 struct intel_crtc_state *crtc_state;
4209
4210                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4211                 if (ret)
4212                         return ret;
4213
4214                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4215                 commit = crtc_state->uapi.commit;
4216                 if (commit) {
4217                         ret = wait_for_completion_interruptible(&commit->hw_done);
4218                         if (!ret)
4219                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4220                 }
4221
4222                 if (!ret && crtc_state->hw.active) {
4223                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4224                                       pipe_name(intel_crtc->pipe));
4225
4226                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4227                 }
4228
4229                 drm_modeset_unlock(&intel_crtc->base.mutex);
4230
4231                 if (ret)
4232                         return ret;
4233         }
4234
4235         ret = intel_fbc_reset_underrun(dev_priv);
4236         if (ret)
4237                 return ret;
4238
4239         return cnt;
4240 }
4241
4242 static const struct file_operations i915_fifo_underrun_reset_ops = {
4243         .owner = THIS_MODULE,
4244         .open = simple_open,
4245         .write = i915_fifo_underrun_reset_write,
4246         .llseek = default_llseek,
4247 };
4248
4249 static const struct drm_info_list i915_debugfs_list[] = {
4250         {"i915_capabilities", i915_capabilities, 0},
4251         {"i915_gem_objects", i915_gem_object_info, 0},
4252         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4253         {"i915_gem_interrupt", i915_interrupt_info, 0},
4254         {"i915_guc_info", i915_guc_info, 0},
4255         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4256         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4257         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4258         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4259         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4260         {"i915_frequency_info", i915_frequency_info, 0},
4261         {"i915_drpc_info", i915_drpc_info, 0},
4262         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4263         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4264         {"i915_fbc_status", i915_fbc_status, 0},
4265         {"i915_ips_status", i915_ips_status, 0},
4266         {"i915_sr_status", i915_sr_status, 0},
4267         {"i915_opregion", i915_opregion, 0},
4268         {"i915_vbt", i915_vbt, 0},
4269         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4270         {"i915_context_status", i915_context_status, 0},
4271         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4272         {"i915_swizzle_info", i915_swizzle_info, 0},
4273         {"i915_llc", i915_llc, 0},
4274         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4275         {"i915_energy_uJ", i915_energy_uJ, 0},
4276         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4277         {"i915_power_domain_info", i915_power_domain_info, 0},
4278         {"i915_dmc_info", i915_dmc_info, 0},
4279         {"i915_display_info", i915_display_info, 0},
4280         {"i915_engine_info", i915_engine_info, 0},
4281         {"i915_rcs_topology", i915_rcs_topology, 0},
4282         {"i915_shrinker_info", i915_shrinker_info, 0},
4283         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4284         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4285         {"i915_wa_registers", i915_wa_registers, 0},
4286         {"i915_ddb_info", i915_ddb_info, 0},
4287         {"i915_sseu_status", i915_sseu_status, 0},
4288         {"i915_drrs_status", i915_drrs_status, 0},
4289         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4290 };
4291 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4292
4293 static const struct i915_debugfs_files {
4294         const char *name;
4295         const struct file_operations *fops;
4296 } i915_debugfs_files[] = {
4297         {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4298         {"i915_wedged", &i915_wedged_fops},
4299         {"i915_cache_sharing", &i915_cache_sharing_fops},
4300         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4301 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4302         {"i915_error_state", &i915_error_state_fops},
4303         {"i915_gpu_info", &i915_gpu_info_fops},
4304 #endif
4305         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4306         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4307         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4308         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4309         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4310         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4311         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4312         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4313         {"i915_guc_log_level", &i915_guc_log_level_fops},
4314         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4315         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4316         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4317         {"i915_ipc_status", &i915_ipc_status_fops},
4318         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4319         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4320 };
4321
4322 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4323 {
4324         struct drm_minor *minor = dev_priv->drm.primary;
4325         int i;
4326
4327         debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4328                             to_i915(minor->dev), &i915_forcewake_fops);
4329
4330         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4331                 debugfs_create_file(i915_debugfs_files[i].name,
4332                                     S_IRUGO | S_IWUSR,
4333                                     minor->debugfs_root,
4334                                     to_i915(minor->dev),
4335                                     i915_debugfs_files[i].fops);
4336         }
4337
4338         return drm_debugfs_create_files(i915_debugfs_list,
4339                                         I915_DEBUGFS_ENTRIES,
4340                                         minor->debugfs_root, minor);
4341 }
4342
4343 struct dpcd_block {
4344         /* DPCD dump start address. */
4345         unsigned int offset;
4346         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4347         unsigned int end;
4348         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4349         size_t size;
4350         /* Only valid for eDP. */
4351         bool edp;
4352 };
4353
4354 static const struct dpcd_block i915_dpcd_debug[] = {
4355         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4356         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4357         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4358         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4359         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4360         { .offset = DP_SET_POWER },
4361         { .offset = DP_EDP_DPCD_REV },
4362         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4363         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4364         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4365 };
4366
4367 static int i915_dpcd_show(struct seq_file *m, void *data)
4368 {
4369         struct drm_connector *connector = m->private;
4370         struct intel_dp *intel_dp =
4371                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4372         u8 buf[16];
4373         ssize_t err;
4374         int i;
4375
4376         if (connector->status != connector_status_connected)
4377                 return -ENODEV;
4378
4379         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4380                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4381                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4382
4383                 if (b->edp &&
4384                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4385                         continue;
4386
4387                 /* low tech for now */
4388                 if (WARN_ON(size > sizeof(buf)))
4389                         continue;
4390
4391                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4392                 if (err < 0)
4393                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4394                 else
4395                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4396         }
4397
4398         return 0;
4399 }
4400 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4401
4402 static int i915_panel_show(struct seq_file *m, void *data)
4403 {
4404         struct drm_connector *connector = m->private;
4405         struct intel_dp *intel_dp =
4406                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4407
4408         if (connector->status != connector_status_connected)
4409                 return -ENODEV;
4410
4411         seq_printf(m, "Panel power up delay: %d\n",
4412                    intel_dp->panel_power_up_delay);
4413         seq_printf(m, "Panel power down delay: %d\n",
4414                    intel_dp->panel_power_down_delay);
4415         seq_printf(m, "Backlight on delay: %d\n",
4416                    intel_dp->backlight_on_delay);
4417         seq_printf(m, "Backlight off delay: %d\n",
4418                    intel_dp->backlight_off_delay);
4419
4420         return 0;
4421 }
4422 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4423
4424 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4425 {
4426         struct drm_connector *connector = m->private;
4427         struct intel_connector *intel_connector = to_intel_connector(connector);
4428
4429         if (connector->status != connector_status_connected)
4430                 return -ENODEV;
4431
4432         /* HDCP is supported by connector */
4433         if (!intel_connector->hdcp.shim)
4434                 return -EINVAL;
4435
4436         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4437                    connector->base.id);
4438         intel_hdcp_info(m, intel_connector);
4439
4440         return 0;
4441 }
4442 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4443
4444 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4445 {
4446         struct drm_connector *connector = m->private;
4447         struct drm_device *dev = connector->dev;
4448         struct drm_crtc *crtc;
4449         struct intel_dp *intel_dp;
4450         struct drm_modeset_acquire_ctx ctx;
4451         struct intel_crtc_state *crtc_state = NULL;
4452         int ret = 0;
4453         bool try_again = false;
4454
4455         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4456
4457         do {
4458                 try_again = false;
4459                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4460                                        &ctx);
4461                 if (ret) {
4462                         if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4463                                 try_again = true;
4464                                 continue;
4465                         }
4466                         break;
4467                 }
4468                 crtc = connector->state->crtc;
4469                 if (connector->status != connector_status_connected || !crtc) {
4470                         ret = -ENODEV;
4471                         break;
4472                 }
4473                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4474                 if (ret == -EDEADLK) {
4475                         ret = drm_modeset_backoff(&ctx);
4476                         if (!ret) {
4477                                 try_again = true;
4478                                 continue;
4479                         }
4480                         break;
4481                 } else if (ret) {
4482                         break;
4483                 }
4484                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4485                 crtc_state = to_intel_crtc_state(crtc->state);
4486                 seq_printf(m, "DSC_Enabled: %s\n",
4487                            yesno(crtc_state->dsc.compression_enable));
4488                 seq_printf(m, "DSC_Sink_Support: %s\n",
4489                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4490                 seq_printf(m, "Force_DSC_Enable: %s\n",
4491                            yesno(intel_dp->force_dsc_en));
4492                 if (!intel_dp_is_edp(intel_dp))
4493                         seq_printf(m, "FEC_Sink_Support: %s\n",
4494                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4495         } while (try_again);
4496
4497         drm_modeset_drop_locks(&ctx);
4498         drm_modeset_acquire_fini(&ctx);
4499
4500         return ret;
4501 }
4502
4503 static ssize_t i915_dsc_fec_support_write(struct file *file,
4504                                           const char __user *ubuf,
4505                                           size_t len, loff_t *offp)
4506 {
4507         bool dsc_enable = false;
4508         int ret;
4509         struct drm_connector *connector =
4510                 ((struct seq_file *)file->private_data)->private;
4511         struct intel_encoder *encoder = intel_attached_encoder(connector);
4512         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4513
4514         if (len == 0)
4515                 return 0;
4516
4517         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4518                          len);
4519
4520         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4521         if (ret < 0)
4522                 return ret;
4523
4524         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4525                          (dsc_enable) ? "true" : "false");
4526         intel_dp->force_dsc_en = dsc_enable;
4527
4528         *offp += len;
4529         return len;
4530 }
4531
4532 static int i915_dsc_fec_support_open(struct inode *inode,
4533                                      struct file *file)
4534 {
4535         return single_open(file, i915_dsc_fec_support_show,
4536                            inode->i_private);
4537 }
4538
4539 static const struct file_operations i915_dsc_fec_support_fops = {
4540         .owner = THIS_MODULE,
4541         .open = i915_dsc_fec_support_open,
4542         .read = seq_read,
4543         .llseek = seq_lseek,
4544         .release = single_release,
4545         .write = i915_dsc_fec_support_write
4546 };
4547
4548 /**
4549  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4550  * @connector: pointer to a registered drm_connector
4551  *
4552  * Cleanup will be done by drm_connector_unregister() through a call to
4553  * drm_debugfs_connector_remove().
4554  *
4555  * Returns 0 on success, negative error codes on error.
4556  */
4557 int i915_debugfs_connector_add(struct drm_connector *connector)
4558 {
4559         struct dentry *root = connector->debugfs_entry;
4560         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4561
4562         /* The connector must have been registered beforehands. */
4563         if (!root)
4564                 return -ENODEV;
4565
4566         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4567             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4568                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4569                                     connector, &i915_dpcd_fops);
4570
4571         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4572                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4573                                     connector, &i915_panel_fops);
4574                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4575                                     connector, &i915_psr_sink_status_fops);
4576         }
4577
4578         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4579             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4580             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4581                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4582                                     connector, &i915_hdcp_sink_capability_fops);
4583         }
4584
4585         if (INTEL_GEN(dev_priv) >= 10 &&
4586             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4587              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4588                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4589                                     connector, &i915_dsc_fec_support_fops);
4590
4591         return 0;
4592 }