Merge drm/drm-next into drm-intel-next-queued
authorJani Nikula <jani.nikula@intel.com>
Wed, 2 May 2018 09:20:32 +0000 (12:20 +0300)
committerJani Nikula <jani.nikula@intel.com>
Wed, 2 May 2018 09:20:32 +0000 (12:20 +0300)
Need d224985a5e31 ("sched/wait, drivers/drm: Convert wait_on_atomic_t()
usage to the new wait_var_event() API") in dinq to be able to fix
https://bugs.freedesktop.org/show_bug.cgi?id=106085.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
1  2 
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lrc.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -785,82 -741,7 +785,83 @@@ execlists_cancel_port_requests(struct i
                port++;
        }
  
+       execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
 +      execlists_user_end(execlists);
 +}
 +
 +static void clear_gtiir(struct intel_engine_cs *engine)
 +{
 +      struct drm_i915_private *dev_priv = engine->i915;
 +      int i;
 +
 +      /*
 +       * Clear any pending interrupt state.
 +       *
 +       * We do it twice out of paranoia that some of the IIR are
 +       * double buffered, and so if we only reset it once there may
 +       * still be an interrupt pending.
 +       */
 +      if (INTEL_GEN(dev_priv) >= 11) {
 +              static const struct {
 +                      u8 bank;
 +                      u8 bit;
 +              } gen11_gtiir[] = {
 +                      [RCS] = {0, GEN11_RCS0},
 +                      [BCS] = {0, GEN11_BCS},
 +                      [_VCS(0)] = {1, GEN11_VCS(0)},
 +                      [_VCS(1)] = {1, GEN11_VCS(1)},
 +                      [_VCS(2)] = {1, GEN11_VCS(2)},
 +                      [_VCS(3)] = {1, GEN11_VCS(3)},
 +                      [_VECS(0)] = {1, GEN11_VECS(0)},
 +                      [_VECS(1)] = {1, GEN11_VECS(1)},
 +              };
 +              unsigned long irqflags;
 +
 +              GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
 +
 +              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +              for (i = 0; i < 2; i++) {
 +                      gen11_reset_one_iir(dev_priv,
 +                                          gen11_gtiir[engine->id].bank,
 +                                          gen11_gtiir[engine->id].bit);
 +              }
 +              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +      } else {
 +              static const u8 gtiir[] = {
 +                      [RCS]  = 0,
 +                      [BCS]  = 0,
 +                      [VCS]  = 1,
 +                      [VCS2] = 1,
 +                      [VECS] = 3,
 +              };
 +
 +              GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
 +
 +              for (i = 0; i < 2; i++) {
 +                      I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
 +                                 engine->irq_keep_mask);
 +                      POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
 +              }
 +              GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
 +                         engine->irq_keep_mask);
 +      }
 +}
 +
 +static void reset_irq(struct intel_engine_cs *engine)
 +{
 +      /* Mark all CS interrupts as complete */
 +      smp_store_mb(engine->execlists.active, 0);
 +      synchronize_hardirq(engine->i915->drm.irq);
 +
 +      clear_gtiir(engine);
 +
 +      /*
 +       * The port is checked prior to scheduling a tasklet, but
 +       * just in case we have suspended the tasklet to do the
 +       * wedging make sure that when it wakes, it decides there
 +       * is no work to do by clearing the irq_posted bit.
 +       */
 +      clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
  }
  
  static void execlists_cancel_requests(struct intel_engine_cs *engine)