Merge tag 'drm-intel-next-2013-11-29' of git://people.freedesktop.org/~danvet/drm...
authorDave Airlie <airlied@redhat.com>
Wed, 18 Dec 2013 00:39:56 +0000 (10:39 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 18 Dec 2013 00:39:56 +0000 (10:39 +1000)
- some more ppgtt prep patches from Ben
- a few fbc fixes from Ville
- power well rework from Imre
- vlv forcewake improvements from Deepak S, Ville and Jesse
- a few smaller things all over

[airlied: fixup forwcewake conflict]
* tag 'drm-intel-next-2013-11-29' of git://people.freedesktop.org/~danvet/drm-intel: (97 commits)
  drm/i915: Fix port name in vlv_wait_port_ready() timeout warning
  drm/i915: Return a drm_mode_status enum in the mode_valid vfuncs
  drm/i915: add intel_display_power_enabled_sw() for use in atomic ctx
  drm/i915: drop DRM_ERROR in intel_fbdev init
  drm/i915/vlv: use parallel context restore when coming out of RC6
  drm/i915/vlv: use a lower RC6 timeout on VLV
  drm/i915/sdvo: Fix up debug output to not split lines
  drm/i915: make sparse happy for the new vlv mmio read function
  drm/i915: drop the right force-wake engine in the vlv mmio funcs
  drm/i915: Fix GT wake FIFO free entries for VLV
  drm/i915: Report all GTFIFODBG errors
  drm/i915: Enabling DebugFS for valleyview forcewake counts
  drm/i915/vlv: Valleyview support for forcewake Individual power wells.
  drm/i915: Add power well arguments to force wake routines.
  drm/i915: Do not attempt to re-enable an unconnected primary plane
  drm/i915: add a debugfs entry for power domain info
  drm/i915: add a default always-on power well
  drm/i915: don't do BDW/HSW specific powerdomains init on other platforms
  drm/i915: protect HSW power well check with IS_HASWELL in redisable_vga
  drm/i915: use IS_HASWELL/BROADWELL instead of HAS_POWER_WELL
  ...

Conflicts:
drivers/gpu/drm/i915/intel_display.c

12 files changed:
1  2 
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c

@@@ -114,7 -114,7 +114,7 @@@ MODULE_PARM_DESC(enable_hangcheck
                "(default: true)");
  
  int i915_enable_ppgtt __read_mostly = -1;
- module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
  MODULE_PARM_DESC(i915_enable_ppgtt,
                "Enable PPGTT (default: true)");
  
@@@ -155,7 -155,6 +155,6 @@@ MODULE_PARM_DESC(prefault_disable
                "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
  
  static struct drm_driver driver;
- extern int intel_agp_enabled;
  
  static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@@ -265,6 -264,7 +264,7 @@@ static const struct intel_device_info i
  static const struct intel_device_info intel_sandybridge_d_info = {
        .gen = 6, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
  };
@@@ -280,6 -280,7 +280,7 @@@ static const struct intel_device_info i
  #define GEN7_FEATURES  \
        .gen = 7, .num_pipes = 3, \
        .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .has_llc = 1
  
@@@ -292,7 -293,6 +293,6 @@@ static const struct intel_device_info i
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
-       .has_fbc = 1,
  };
  
  static const struct intel_device_info intel_ivybridge_q_info = {
@@@ -307,6 -307,7 +307,7 @@@ static const struct intel_device_info i
        .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
  };
  
@@@ -315,6 -316,7 +316,7 @@@ static const struct intel_device_info i
        .num_pipes = 2,
        .is_valleyview = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
+       .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
  };
  
@@@ -332,7 -334,6 +334,6 @@@ static const struct intel_device_info i
        .is_mobile = 1,
        .has_ddi = 1,
        .has_fpga_dbg = 1,
-       .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
  };
  
@@@ -534,10 -535,8 +535,10 @@@ static int i915_drm_freeze(struct drm_d
                 * Disable CRTCs directly since we want to preserve sw state
                 * for _thaw.
                 */
 +              mutex_lock(&dev->mode_config.mutex);
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                        dev_priv->display.crtc_disable(crtc);
 +              mutex_unlock(&dev->mode_config.mutex);
  
                intel_modeset_suspend_hw(dev);
        }
@@@ -761,14 -760,14 +762,14 @@@ int i915_reset(struct drm_device *dev
                DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
                dev_priv->gpu_error.stop_rings = 0;
                if (ret == -ENODEV) {
-                       DRM_ERROR("Reset not implemented, but ignoring "
-                                 "error for simulated gpu hangs\n");
+                       DRM_INFO("Reset not implemented, but ignoring "
+                                "error for simulated gpu hangs\n");
                        ret = 0;
                }
        }
  
        if (ret) {
-               DRM_ERROR("Failed to reset chip.\n");
+               DRM_ERROR("Failed to reset chip: %i\n", ret);
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
                        !dev_priv->ums.mm_suspended) {
-               bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
                dev_priv->ums.mm_suspended = 0;
  
                ret = i915_gem_init_hw(dev);
-               if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
-                       DRM_ERROR("HW contexts didn't survive reset\n");
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        DRM_ERROR("Failed hw init on reset %d\n", ret);
@@@ -830,17 -826,7 +828,7 @@@ static int i915_pci_probe(struct pci_de
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
  
-       /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
-        * implementation for gen3 (and only gen3) that used legacy drm maps
-        * (gasp!) to share buffers between X and the client. Hence we need to
-        * keep around the fake agp stuff for gen3, even when kms is enabled. */
-       if (intel_info->gen != 3) {
-               driver.driver_features &=
-                       ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
-       } else if (!intel_agp_enabled) {
-               DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
-               return -ENODEV;
-       }
+       driver.driver_features &= ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
  
        return drm_get_pci_dev(pdev, ent, &driver);
  }
@@@ -1023,14 -1009,24 +1011,24 @@@ static int __init i915_init(void
                driver.driver_features &= ~DRIVER_MODESET;
  #endif
  
-       if (!(driver.driver_features & DRIVER_MODESET))
+       if (!(driver.driver_features & DRIVER_MODESET)) {
                driver.get_vblank_timestamp = NULL;
+ #ifndef CONFIG_DRM_I915_UMS
+               /* Silently fail loading to not upset userspace. */
+               return 0;
+ #endif
+       }
  
        return drm_pci_init(&driver, &i915_pci_driver);
  }
  
  static void __exit i915_exit(void)
  {
+ #ifndef CONFIG_DRM_I915_UMS
+       if (!(driver.driver_features & DRIVER_MODESET))
+               return; /* Never loaded a driver. */
+ #endif
        drm_pci_exit(&driver, &i915_pci_driver);
  }
  
@@@ -89,6 -89,18 +89,18 @@@ enum port 
  };
  #define port_name(p) ((p) + 'A')
  
+ #define I915_NUM_PHYS_VLV 1
+ enum dpio_channel {
+       DPIO_CH0,
+       DPIO_CH1
+ };
+ enum dpio_phy {
+       DPIO_PHY0,
+       DPIO_PHY1
+ };
  enum intel_display_power_domain {
        POWER_DOMAIN_PIPE_A,
        POWER_DOMAIN_PIPE_B,
        POWER_DOMAIN_TRANSCODER_C,
        POWER_DOMAIN_TRANSCODER_EDP,
        POWER_DOMAIN_VGA,
+       POWER_DOMAIN_AUDIO,
        POWER_DOMAIN_INIT,
  
        POWER_DOMAIN_NUM,
@@@ -351,6 -364,7 +364,7 @@@ struct drm_i915_error_state 
        enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  };
  
+ struct intel_connector;
  struct intel_crtc_config;
  struct intel_crtc;
  struct intel_limit;
@@@ -413,11 -427,20 +427,20 @@@ struct drm_i915_display_funcs 
        /* render clock increase/decrease */
        /* display clock increase/decrease */
        /* pll clock increase/decrease */
+       int (*setup_backlight)(struct intel_connector *connector);
+       uint32_t (*get_backlight)(struct intel_connector *connector);
+       void (*set_backlight)(struct intel_connector *connector,
+                             uint32_t level);
+       void (*disable_backlight)(struct intel_connector *connector);
+       void (*enable_backlight)(struct intel_connector *connector);
  };
  
  struct intel_uncore_funcs {
-       void (*force_wake_get)(struct drm_i915_private *dev_priv);
-       void (*force_wake_put)(struct drm_i915_private *dev_priv);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv,
+                                                       int fw_engine);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv,
+                                                       int fw_engine);
  
        uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
        uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@@ -442,6 -465,9 +465,9 @@@ struct intel_uncore 
        unsigned fifo_count;
        unsigned forcewake_count;
  
+       unsigned fw_rendercount;
+       unsigned fw_mediacount;
        struct delayed_work force_wake_work;
  };
  
@@@ -708,7 -734,6 +734,6 @@@ enum intel_sbi_destination 
  #define QUIRK_PIPEA_FORCE (1<<0)
  #define QUIRK_LVDS_SSC_DISABLE (1<<1)
  #define QUIRK_INVERT_BRIGHTNESS (1<<2)
- #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
  
  struct intel_fbdev;
  struct intel_fbc_work;
@@@ -761,8 -786,6 +786,6 @@@ struct i915_suspend_saved_registers 
        u32 saveBLC_PWM_CTL;
        u32 saveBLC_PWM_CTL2;
        u32 saveBLC_HIST_CTL_B;
-       u32 saveBLC_PWM_CTL_B;
-       u32 saveBLC_PWM_CTL2_B;
        u32 saveBLC_CPU_PWM_CTL;
        u32 saveBLC_CPU_PWM_CTL2;
        u32 saveFPB0;
@@@ -932,21 -955,29 +955,29 @@@ struct intel_ilk_power_mgmt 
  
  /* Power well structure for haswell */
  struct i915_power_well {
+       const char *name;
+       bool always_on;
        /* power well enable/disable usage count */
        int count;
+       unsigned long domains;
+       void *data;
+       void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+                   bool enable);
+       bool (*is_enabled)(struct drm_device *dev,
+                          struct i915_power_well *power_well);
  };
  
- #define I915_MAX_POWER_WELLS 1
  struct i915_power_domains {
        /*
         * Power wells needed for initialization at driver init and suspend
         * time are on. They are kept on until after the first modeset.
         */
        bool init_power_on;
+       int power_well_count;
  
        struct mutex lock;
-       struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+       int domain_use_count[POWER_DOMAIN_NUM];
+       struct i915_power_well *power_wells;
  };
  
  struct i915_dri1_state {
@@@ -1077,34 -1108,30 +1108,30 @@@ struct i915_gpu_error 
        unsigned long missed_irq_rings;
  
        /**
-        * State variable and reset counter controlling the reset flow
+        * State variable controlling the reset flow and count
         *
-        * Upper bits are for the reset counter.  This counter is used by the
-        * wait_seqno code to race-free noticed that a reset event happened and
-        * that it needs to restart the entire ioctl (since most likely the
-        * seqno it waited for won't ever signal anytime soon).
+        * This is a counter which gets incremented when reset is triggered,
+        * and again when reset has been handled. So odd values (lowest bit set)
+        * means that reset is in progress and even values that
+        * (reset_counter >> 1):th reset was successfully completed.
+        *
+        * If reset is not completed succesfully, the I915_WEDGE bit is
+        * set meaning that hardware is terminally sour and there is no
+        * recovery. All waiters on the reset_queue will be woken when
+        * that happens.
+        *
+        * This counter is used by the wait_seqno code to notice that reset
+        * event happened and it needs to restart the entire ioctl (since most
+        * likely the seqno it waited for won't ever signal anytime soon).
         *
         * This is important for lock-free wait paths, where no contended lock
         * naturally enforces the correct ordering between the bail-out of the
         * waiter and the gpu reset work code.
-        *
-        * Lowest bit controls the reset state machine: Set means a reset is in
-        * progress. This state will (presuming we don't have any bugs) decay
-        * into either unset (successful reset) or the special WEDGED value (hw
-        * terminally sour). All waiters on the reset_queue will be woken when
-        * that happens.
         */
        atomic_t reset_counter;
  
-       /**
-        * Special values/flags for reset_counter
-        *
-        * Note that the code relies on
-        *      I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
-        * being true.
-        */
  #define I915_RESET_IN_PROGRESS_FLAG   1
- #define I915_WEDGED                   0xffffffff
+ #define I915_WEDGED                   (1 << 31)
  
        /**
         * Waitqueue to signal when the reset has completed. Used by clients
@@@ -1368,13 -1395,8 +1395,8 @@@ typedef struct drm_i915_private 
        struct intel_overlay *overlay;
        unsigned int sprite_scaling_enabled;
  
-       /* backlight */
-       struct {
-               int level;
-               bool enabled;
-               spinlock_t lock; /* bl registers and the above bl fields */
-               struct backlight_device *device;
-       } backlight;
+       /* backlight registers and fields in struct intel_panel */
+       spinlock_t backlight_lock;
  
        /* LVDS info */
        bool no_aux_handshake;
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        struct intel_ddi_plls ddi_plls;
+       int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
  
        /* Reclocking support */
        bool render_reclock_avail;
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
  
-       bool hw_contexts_disabled;
        uint32_t hw_context_size;
        struct list_head context_list;
  
@@@ -1755,8 -1777,13 +1777,13 @@@ struct drm_i915_file_private 
  #define IS_MOBILE(dev)                (INTEL_INFO(dev)->is_mobile)
  #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0C00)
- #define IS_ULT(dev)           (IS_HASWELL(dev) && \
+ #define IS_BDW_ULT(dev)               (IS_BROADWELL(dev) && \
+                                (((dev)->pdev->device & 0xf) == 0x2  || \
+                                ((dev)->pdev->device & 0xf) == 0x6 || \
+                                ((dev)->pdev->device & 0xf) == 0xe))
+ #define IS_HSW_ULT(dev)               (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+ #define IS_ULT(dev)           (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
  #define IS_HSW_GT3(dev)               (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
  #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
  #define HAS_IPS(dev)          (IS_ULT(dev) || IS_BROADWELL(dev))
  
  #define HAS_DDI(dev)          (INTEL_INFO(dev)->has_ddi)
- #define HAS_POWER_WELL(dev)   (IS_HASWELL(dev) || IS_BROADWELL(dev))
  #define HAS_FPGA_DBG_UNCLAIMED(dev)   (INTEL_INFO(dev)->has_fpga_dbg)
  #define HAS_PSR(dev)          (IS_HASWELL(dev) || IS_BROADWELL(dev))
 +#define HAS_PC8(dev)          (IS_HASWELL(dev)) /* XXX HSW:ULX */
  
  #define INTEL_PCH_DEVICE_ID_MASK              0xff00
  #define INTEL_PCH_IBX_DEVICE_ID_TYPE          0x3b00
@@@ -1908,7 -1933,6 +1934,6 @@@ extern void intel_pm_init(struct drm_de
  extern void intel_uncore_sanitize(struct drm_device *dev);
  extern void intel_uncore_early_sanitize(struct drm_device *dev);
  extern void intel_uncore_init(struct drm_device *dev);
- extern void intel_uncore_clear_errors(struct drm_device *dev);
  extern void intel_uncore_check_errors(struct drm_device *dev);
  extern void intel_uncore_fini(struct drm_device *dev);
  
@@@ -2060,12 -2084,17 +2085,17 @@@ int __must_check i915_gem_check_wedge(s
  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  {
        return unlikely(atomic_read(&error->reset_counter)
-                       & I915_RESET_IN_PROGRESS_FLAG);
+                       & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
  }
  
  static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
  {
-       return atomic_read(&error->reset_counter) == I915_WEDGED;
+       return atomic_read(&error->reset_counter) & I915_WEDGED;
+ }
+ static inline u32 i915_reset_count(struct i915_gpu_error *error)
+ {
+       return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
  }
  
  void i915_gem_reset(struct drm_device *dev);
@@@ -2177,7 -2206,7 +2207,7 @@@ i915_gem_obj_ggtt_pin(struct drm_i915_g
  }
  
  /* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
int __must_check i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct intel_ring_buffer *ring,
@@@ -2395,6 -2424,8 +2425,8 @@@ extern int intel_enable_rc6(const struc
  extern bool i915_semaphore_is_enabled(struct drm_device *dev);
  int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
+ int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
  
  /* overlay */
  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@@ -2410,8 -2441,8 +2442,8 @@@ extern void intel_display_print_error_s
   * must be set to prevent GT core from power down and stale values being
   * returned.
   */
- void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
- void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
  
  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@@ -2426,6 -2457,8 +2458,8 @@@ u32 vlv_cck_read(struct drm_i915_privat
  void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
  void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+ void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
  void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
  u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@@ -2435,8 -2468,27 +2469,27 @@@ u32 intel_sbi_read(struct drm_i915_priv
  void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                     enum intel_sbi_destination destination);
  
- int vlv_gpu_freq(int ddr_freq, int val);
- int vlv_freq_opcode(int ddr_freq, int val);
+ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+ void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+ #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+       (((reg) >= 0x2000 && (reg) < 0x4000) ||\
+       ((reg) >= 0x5000 && (reg) < 0x8000) ||\
+       ((reg) >= 0xB000 && (reg) < 0x12000) ||\
+       ((reg) >= 0x2E000 && (reg) < 0x30000))
+ #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+       (((reg) >= 0x12000 && (reg) < 0x14000) ||\
+       ((reg) >= 0x22000 && (reg) < 0x24000) ||\
+       ((reg) >= 0x30000 && (reg) < 0x40000))
+ #define FORCEWAKE_RENDER      (1 << 0)
+ #define FORCEWAKE_MEDIA               (1 << 1)
+ #define FORCEWAKE_ALL         (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
  
  #define I915_READ8(reg)               dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
  #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@@ -4442,9 -4442,10 +4442,9 @@@ i915_gem_init_hw(struct drm_device *dev
        if (dev_priv->ellc_size)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  
 -      if (IS_HSW_GT3(dev))
 -              I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
 -      else
 -              I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
 +      if (IS_HASWELL(dev))
 +              I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
 +                         LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  
        if (HAS_PCH_NOP(dev)) {
                u32 temp = I915_READ(GEN7_MSG_CTL);
         * XXX: There was some w/a described somewhere suggesting loading
         * contexts before PPGTT.
         */
-       i915_gem_context_init(dev);
+       ret = i915_gem_context_init(dev);
+       if (ret) {
+               i915_gem_cleanup_ringbuffer(dev);
+               DRM_ERROR("Context initialization failed %d\n", ret);
+               return ret;
+       }
        if (dev_priv->mm.aliasing_ppgtt) {
                ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
                if (ret) {
@@@ -33,9 -33,6 +33,9 @@@
  #include "intel_drv.h"
  #include <linux/dma_remapping.h>
  
 +#define  __EXEC_OBJECT_HAS_PIN (1<<31)
 +#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 +
  struct eb_vmas {
        struct list_head vmas;
        int and;
@@@ -46,7 -43,7 +46,7 @@@
  };
  
  static struct eb_vmas *
- eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+ eb_create(struct drm_i915_gem_execbuffer2 *args)
  {
        struct eb_vmas *eb = NULL;
  
@@@ -190,28 -187,7 +190,28 @@@ static struct i915_vma *eb_get_vma(stru
        }
  }
  
 -static void eb_destroy(struct eb_vmas *eb) {
 +static void
 +i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 +{
 +      struct drm_i915_gem_exec_object2 *entry;
 +      struct drm_i915_gem_object *obj = vma->obj;
 +
 +      if (!drm_mm_node_allocated(&vma->node))
 +              return;
 +
 +      entry = vma->exec_entry;
 +
 +      if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 +              i915_gem_object_unpin_fence(obj);
 +
 +      if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 +              i915_gem_object_unpin(obj);
 +
 +      entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 +}
 +
 +static void eb_destroy(struct eb_vmas *eb)
 +{
        while (!list_empty(&eb->vmas)) {
                struct i915_vma *vma;
  
                                       struct i915_vma,
                                       exec_list);
                list_del_init(&vma->exec_list);
 +              i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
@@@ -332,7 -307,7 +332,7 @@@ i915_gem_execbuffer_relocate_entry(stru
        target_i915_obj = target_vma->obj;
        target_obj = &target_vma->obj->base;
  
-       target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+       target_offset = target_vma->node.start;
  
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
         * pipe_control writes because the gpu doesn't properly redirect them
@@@ -479,8 -454,7 +479,7 @@@ i915_gem_execbuffer_relocate_vma_slow(s
  }
  
  static int
- i915_gem_execbuffer_relocate(struct eb_vmas *eb,
-                            struct i915_address_space *vm)
+ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
  {
        struct i915_vma *vma;
        int ret = 0;
        return ret;
  }
  
 -#define  __EXEC_OBJECT_HAS_PIN (1<<31)
 -#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 -
  static int
  need_reloc_mappable(struct i915_vma *vma)
  {
@@@ -574,6 -551,26 +573,6 @@@ i915_gem_execbuffer_reserve_vma(struct 
        return 0;
  }
  
 -static void
 -i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 -{
 -      struct drm_i915_gem_exec_object2 *entry;
 -      struct drm_i915_gem_object *obj = vma->obj;
 -
 -      if (!drm_mm_node_allocated(&vma->node))
 -              return;
 -
 -      entry = vma->exec_entry;
 -
 -      if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 -              i915_gem_object_unpin_fence(obj);
 -
 -      if (entry->flags & __EXEC_OBJECT_HAS_PIN)
 -              i915_gem_object_unpin(obj);
 -
 -      entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 -}
 -
  static int
  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                            struct list_head *vmas,
                                goto err;
                }
  
 -err:          /* Decrement pin count for bound objects */
 -              list_for_each_entry(vma, vmas, exec_list)
 -                      i915_gem_execbuffer_unreserve_vma(vma);
 -
 +err:
                if (ret != -ENOSPC || retry++)
                        return ret;
  
 +              /* Decrement pin count for bound objects */
 +              list_for_each_entry(vma, vmas, exec_list)
 +                      i915_gem_execbuffer_unreserve_vma(vma);
 +
                ret = i915_gem_evict_vm(vm, true);
                if (ret)
                        return ret;
@@@ -711,7 -707,6 +710,7 @@@ i915_gem_execbuffer_relocate_slow(struc
        while (!list_empty(&eb->vmas)) {
                vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
                list_del_init(&vma->exec_list);
 +              i915_gem_execbuffer_unreserve_vma(vma);
                drm_gem_object_unreference(&vma->obj->base);
        }
  
@@@ -1106,7 -1101,7 +1105,7 @@@ i915_gem_do_execbuffer(struct drm_devic
                goto pre_mutex_err;
        }
  
-       eb = eb_create(args, vm);
+       eb = eb_create(args);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
  
        /* The objects are in their final locations, apply the relocations. */
        if (need_relocs)
-               ret = i915_gem_execbuffer_relocate(eb, vm);
+               ret = i915_gem_execbuffer_relocate(eb);
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@@ -57,9 -57,7 +57,9 @@@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t
  #define HSW_WB_LLC_AGE3                       HSW_CACHEABILITY_CONTROL(0x2)
  #define HSW_WB_LLC_AGE0                       HSW_CACHEABILITY_CONTROL(0x3)
  #define HSW_WB_ELLC_LLC_AGE0          HSW_CACHEABILITY_CONTROL(0xb)
 +#define HSW_WB_ELLC_LLC_AGE3          HSW_CACHEABILITY_CONTROL(0x8)
  #define HSW_WT_ELLC_LLC_AGE0          HSW_CACHEABILITY_CONTROL(0x6)
 +#define HSW_WT_ELLC_LLC_AGE3          HSW_CACHEABILITY_CONTROL(0x7)
  
  #define GEN8_PTES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
  #define GEN8_PDES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@@ -187,10 -185,10 +187,10 @@@ static gen6_gtt_pte_t iris_pte_encode(d
        case I915_CACHE_NONE:
                break;
        case I915_CACHE_WT:
 -              pte |= HSW_WT_ELLC_LLC_AGE0;
 +              pte |= HSW_WT_ELLC_LLC_AGE3;
                break;
        default:
 -              pte |= HSW_WB_ELLC_LLC_AGE0;
 +              pte |= HSW_WB_ELLC_LLC_AGE3;
                break;
        }
  
@@@ -240,10 -238,16 +240,16 @@@ static int gen8_ppgtt_enable(struct drm
                for_each_ring(ring, dev_priv, j) {
                        ret = gen8_write_pdp(ring, i, addr);
                        if (ret)
-                               return ret;
+                               goto err_out;
                }
        }
        return 0;
+ err_out:
+       for_each_ring(ring, dev_priv, j)
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+       return ret;
  }
  
  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@@ -318,6 -322,8 +324,8 @@@ static void gen8_ppgtt_cleanup(struct i
                container_of(vm, struct i915_hw_ppgtt, base);
        int i, j;
  
+       drm_mm_takedown(&vm->mm);
        for (i = 0; i < ppgtt->num_pd_pages ; i++) {
                if (ppgtt->pd_dma_addr[i]) {
                        pci_unmap_page(ppgtt->base.dev->pdev,
                kfree(ppgtt->gen8_pt_dma_addr[i]);
        }
  
-       __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
-       __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+       __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
  }
  
  /**
@@@ -381,6 -387,8 +389,8 @@@ static int gen8_ppgtt_init(struct i915_
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
  
        BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
  
@@@ -632,6 -640,8 +642,8 @@@ static int gen6_ppgtt_init(struct i915_
        ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen6_ppgtt_cleanup;
        ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+       ppgtt->base.start = 0;
+       ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
@@@ -1126,7 -1136,6 +1138,6 @@@ void i915_gem_setup_global_gtt(struct d
                if (ret)
                        DRM_DEBUG_KMS("Reservation failed\n");
                obj->has_global_gtt_mapping = 1;
-               list_add(&vma->vma_link, &obj->vma_list);
        }
  
        dev_priv->gtt.base.start = start;
@@@ -1241,6 -1250,11 +1252,11 @@@ static inline unsigned int gen8_get_tot
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+       if (bdw_gmch_ctl > 4) {
+               WARN_ON(!i915_preliminary_hw_support);
+               return 4<<20;
+       }
        return bdw_gmch_ctl << 20;
  }
  
@@@ -1397,6 -1411,8 +1413,8 @@@ static void gen6_gmch_remove(struct i91
  {
  
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+       drm_mm_takedown(&vm->mm);
        iounmap(gtt->gsm);
        teardown_scratch_page(vm->dev);
  }
@@@ -173,7 -173,7 +173,7 @@@ static void intel_prepare_ddi_buffers(s
                ddi_translations = ddi_translations_dp;
                break;
        case PORT_D:
 -              if (intel_dpd_is_edp(dev))
 +              if (intel_dp_is_edp(dev, PORT_D))
                        ddi_translations = ddi_translations_edp;
                else
                        ddi_translations = ddi_translations_dp;
@@@ -713,8 -713,6 +713,6 @@@ bool intel_ddi_pll_mode_set(struct drm_
        uint32_t reg, val;
        int clock = intel_crtc->config.port_clock;
  
-       /* TODO: reuse PLLs when possible (compare values) */
        intel_ddi_put_crtc_pll(crtc);
  
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
        } else if (type == INTEL_OUTPUT_HDMI) {
                unsigned p, n2, r2;
  
-               if (plls->wrpll1_refcount == 0) {
+               intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+               val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+                     WRPLL_DIVIDER_POST(p);
+               if (val == I915_READ(WRPLL_CTL1)) {
+                       DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+                                     pipe_name(pipe));
+                       reg = WRPLL_CTL1;
+               } else if (val == I915_READ(WRPLL_CTL2)) {
+                       DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+                                     pipe_name(pipe));
+                       reg = WRPLL_CTL2;
+               } else if (plls->wrpll1_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
                                      pipe_name(pipe));
-                       plls->wrpll1_refcount++;
                        reg = WRPLL_CTL1;
-                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
                } else if (plls->wrpll2_refcount == 0) {
                        DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
                                      pipe_name(pipe));
-                       plls->wrpll2_refcount++;
                        reg = WRPLL_CTL2;
-                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
                } else {
                        DRM_ERROR("No WRPLLs available!\n");
                        return false;
                }
  
-               WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
-                    "WRPLL already enabled\n");
-               intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-               val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
-                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-                     WRPLL_DIVIDER_POST(p);
+               if (reg == WRPLL_CTL1) {
+                       plls->wrpll1_refcount++;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+               } else {
+                       plls->wrpll2_refcount++;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+               }
  
        } else if (type == INTEL_OUTPUT_ANALOG) {
                if (plls->spll_refcount == 0) {
@@@ -1158,10 -1165,9 +1165,10 @@@ static void intel_ddi_post_disable(stru
        if (wait)
                intel_wait_ddi_buf_idle(dev_priv, port);
  
 -      if (type == INTEL_OUTPUT_EDP) {
 +      if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                ironlake_edp_panel_vdd_on(intel_dp);
 +              intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
                ironlake_edp_panel_off(intel_dp);
        }
  
@@@ -1407,26 -1413,6 +1414,26 @@@ void intel_ddi_get_config(struct intel_
        default:
                break;
        }
 +
 +      if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
 +          pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
 +              /*
 +               * This is a big fat ugly hack.
 +               *
 +               * Some machines in UEFI boot mode provide us a VBT that has 18
 +               * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
 +               * unknown we fail to light up. Yet the same BIOS boots up with
 +               * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
 +               * max, not what it tells us to use.
 +               *
 +               * Note: This will still be broken if the eDP panel is not lit
 +               * up by the BIOS, and thus we can't get the mode at module
 +               * load.
 +               */
 +              DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
 +                            pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
 +              dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
 +      }
  }
  
  static void intel_ddi_destroy(struct drm_encoder *encoder)
@@@ -748,10 -748,10 +748,10 @@@ enum transcoder intel_pipe_to_cpu_trans
        return intel_crtc->config.cpu_transcoder;
  }
  
- static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 frame, frame_reg = PIPEFRAME(pipe);
+       u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
  
        frame = I915_READ(frame_reg);
  
@@@ -772,8 -772,8 +772,8 @@@ void intel_wait_for_vblank(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipestat_reg = PIPESTAT(pipe);
  
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ironlake_wait_for_vblank(dev, pipe);
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+               g4x_wait_for_vblank(dev, pipe);
                return;
        }
  
@@@ -1361,6 -1361,7 +1361,7 @@@ static void intel_init_dpio(struct drm_
        if (!IS_VALLEYVIEW(dev))
                return;
  
+       DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
        /*
         * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
         *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
@@@ -1494,18 -1495,25 +1495,25 @@@ static void vlv_disable_pll(struct drm_
        POSTING_READ(DPLL(pipe));
  }
  
- void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+               struct intel_digital_port *dport)
  {
        u32 port_mask;
  
-       if (!port)
+       switch (dport->port) {
+       case PORT_B:
                port_mask = DPLL_PORTB_READY_MASK;
-       else
+               break;
+       case PORT_C:
                port_mask = DPLL_PORTC_READY_MASK;
+               break;
+       default:
+               BUG();
+       }
  
        if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
                WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
-                    'B' + port, I915_READ(DPLL(0)));
+                    port_name(dport->port), I915_READ(DPLL(0)));
  }
  
  /**
@@@ -2233,7 -2241,12 +2241,12 @@@ void intel_display_handle_reset(struct 
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
                mutex_lock(&crtc->mutex);
-               if (intel_crtc->active)
+               /*
+                * FIXME: Once we have proper support for primary planes (and
+                * disabling them without disabling the entire crtc) allow again
+                * a NULL crtc->fb.
+                */
+               if (intel_crtc->active && crtc->fb)
                        dev_priv->display.update_plane(crtc, crtc->fb,
                                                       crtc->x, crtc->y);
                mutex_unlock(&crtc->mutex);
@@@ -3910,6 -3923,174 +3923,174 @@@ static void i9xx_pfit_enable(struct int
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
  }
  
+ int valleyview_get_vco(struct drm_i915_private *dev_priv)
+ {
+       int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+       /* Obtain SKU information */
+       mutex_lock(&dev_priv->dpio_lock);
+       hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+               CCK_FUSE_HPLL_FREQ_MASK;
+       mutex_unlock(&dev_priv->dpio_lock);
+       return vco_freq[hpll_freq];
+ }
+ /* Adjust CDclk dividers to allow high res or save power if possible */
+ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val, cmd;
+       if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+               cmd = 2;
+       else if (cdclk == 266)
+               cmd = 1;
+       else
+               cmd = 0;
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+       val &= ~DSPFREQGUAR_MASK;
+       val |= (cmd << DSPFREQGUAR_SHIFT);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+       if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+                     DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+                    50)) {
+               DRM_ERROR("timed out waiting for CDclk change\n");
+       }
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (cdclk == 400) {
+               u32 divider, vco;
+               vco = valleyview_get_vco(dev_priv);
+               divider = ((vco << 1) / cdclk) - 1;
+               mutex_lock(&dev_priv->dpio_lock);
+               /* adjust cdclk divider */
+               val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+               val &= ~0xf;
+               val |= divider;
+               vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+               mutex_unlock(&dev_priv->dpio_lock);
+       }
+       mutex_lock(&dev_priv->dpio_lock);
+       /* adjust self-refresh exit latency value */
+       val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+       val &= ~0x7f;
+       /*
+        * For high bandwidth configs, we set a higher latency in the bunit
+        * so that the core display fetch happens in time to avoid underruns.
+        */
+       if (cdclk == 400)
+               val |= 4500 / 250; /* 4.5 usec */
+       else
+               val |= 3000 / 250; /* 3.0 usec */
+       vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+       mutex_unlock(&dev_priv->dpio_lock);
+       /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+       intel_i2c_reset(dev);
+ }
+ static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+ {
+       int cur_cdclk, vco;
+       int divider;
+       vco = valleyview_get_vco(dev_priv);
+       mutex_lock(&dev_priv->dpio_lock);
+       divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+       mutex_unlock(&dev_priv->dpio_lock);
+       divider &= 0xf;
+       cur_cdclk = (vco << 1) / (divider + 1);
+       return cur_cdclk;
+ }
+ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+                                int max_pixclk)
+ {
+       int cur_cdclk;
+       cur_cdclk = valleyview_cur_cdclk(dev_priv);
+       /*
+        * Really only a few cases to deal with, as only 4 CDclks are supported:
+        *   200MHz
+        *   267MHz
+        *   320MHz
+        *   400MHz
+        * So we check to see whether we're above 90% of the lower bin and
+        * adjust if needed.
+        */
+       if (max_pixclk > 288000) {
+               return 400;
+       } else if (max_pixclk > 240000) {
+               return 320;
+       } else
+               return 266;
+       /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+ }
+ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+                                unsigned modeset_pipes,
+                                struct intel_crtc_config *pipe_config)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_crtc *intel_crtc;
+       int max_pixclk = 0;
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (modeset_pipes & (1 << intel_crtc->pipe))
+                       max_pixclk = max(max_pixclk,
+                                        pipe_config->adjusted_mode.crtc_clock);
+               else if (intel_crtc->base.enabled)
+                       max_pixclk = max(max_pixclk,
+                                        intel_crtc->config.adjusted_mode.crtc_clock);
+       }
+       return max_pixclk;
+ }
+ static void valleyview_modeset_global_pipes(struct drm_device *dev,
+                                           unsigned *prepare_pipes,
+                                           unsigned modeset_pipes,
+                                           struct intel_crtc_config *pipe_config)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc;
+       int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+                                              pipe_config);
+       int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+       if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+               return;
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+                           base.head)
+               if (intel_crtc->base.enabled)
+                       *prepare_pipes |= (1 << intel_crtc->pipe);
+ }
+ static void valleyview_modeset_global_resources(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+       int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+       int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+       if (req_cdclk != cur_cdclk)
+               valleyview_set_cdclk(dev, req_cdclk);
+ }
  static void valleyview_crtc_enable(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
@@@ -4634,24 -4815,24 +4815,24 @@@ static void vlv_pllb_recal_opamp(struc
         * PLLB opamp always calibrates to max value of 0x3f, force enable it
         * and set it to a reasonable value instead.
         */
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
        reg_val |= 0x00000030;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x8cffffff;
        reg_val = 0x8c000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
        reg_val &= 0xffffff00;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
        reg_val &= 0x00ffffff;
        reg_val |= 0xb0000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  }
  
  static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@@ -4720,15 -4901,15 +4901,15 @@@ static void vlv_update_pll(struct intel
                vlv_pllb_recal_opamp(dev_priv, pipe);
  
        /* Set up Tx target for periodic Rcomp update */
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
  
        /* Disable target IRef on PLL */
-       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+       reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
        reg_val &= 0x00ffffff;
-       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
  
        /* Disable fast lock */
-       vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+       vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
  
        /* Set idtafcrecal before PLL is enabled */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
         * Note: don't use the DAC post divider as it seems unstable.
         */
        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  
        mdiv |= DPIO_ENABLE_CALIBRATION;
-       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
  
        /* Set HBR and RBR LPF coefficients */
        if (crtc->config.port_clock == 162000 ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
-               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
  
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
                /* Use SSC source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
                else
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
        } else { /* HDMI or VGA */
                /* Use bend source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df70000);
                else
-                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
                                         0x0df40000);
        }
  
-       coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+       coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
                coreclk |= 0x01000000;
-       vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
  
-       vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
  
        /* Enable DPIO clock input */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
@@@ -5261,7 -5442,7 +5442,7 @@@ static void vlv_crtc_clock_get(struct i
        int refclk = 100000;
  
        mutex_lock(&dev_priv->dpio_lock);
-       mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+       mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
        mutex_unlock(&dev_priv->dpio_lock);
  
        clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@@ -5815,7 -5996,7 +5996,7 @@@ static void intel_set_pipe_csc(struct d
                uint16_t postoff = 0;
  
                if (intel_crtc->config.limited_color_range)
 -                      postoff = (16 * (1 << 13) / 255) & 0x1fff;
 +                      postoff = (16 * (1 << 12) / 255) & 0x1fff;
  
                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
                I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@@ -6402,7 -6583,7 +6583,7 @@@ static void hsw_restore_lcpll(struct dr
  
        /* Make sure we're not on PC8 state before disabling PC8, otherwise
         * we'll hang the machine! */
-       gen6_gt_force_wake_get(dev_priv);
 -      dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
++      gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        if (val & LCPLL_POWER_DOWN_ALLOW) {
                val &= ~LCPLL_POWER_DOWN_ALLOW;
                        DRM_ERROR("Switching back to LCPLL failed\n");
        }
  
-       gen6_gt_force_wake_put(dev_priv);
 -      dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
++      gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void hsw_enable_pc8_work(struct work_struct *__work)
@@@ -6518,9 -6699,6 +6699,9 @@@ static void __hsw_disable_package_c8(st
  
  void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
  {
 +      if (!HAS_PC8(dev_priv->dev))
 +              return;
 +
        mutex_lock(&dev_priv->pc8.lock);
        __hsw_enable_package_c8(dev_priv);
        mutex_unlock(&dev_priv->pc8.lock);
  
  void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
  {
 +      if (!HAS_PC8(dev_priv->dev))
 +              return;
 +
        mutex_lock(&dev_priv->pc8.lock);
        __hsw_disable_package_c8(dev_priv);
        mutex_unlock(&dev_priv->pc8.lock);
@@@ -6568,9 -6743,6 +6749,9 @@@ static void hsw_update_package_c8(struc
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool allow;
  
 +      if (!HAS_PC8(dev_priv->dev))
 +              return;
 +
        if (!i915_enable_pc8)
                return;
  
@@@ -6594,9 -6766,6 +6775,9 @@@ done
  
  static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
  {
 +      if (!HAS_PC8(dev_priv->dev))
 +              return;
 +
        mutex_lock(&dev_priv->pc8.lock);
        if (!dev_priv->pc8.gpu_idle) {
                dev_priv->pc8.gpu_idle = true;
  
  static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
  {
 +      if (!HAS_PC8(dev_priv->dev))
 +              return;
 +
        mutex_lock(&dev_priv->pc8.lock);
        if (dev_priv->pc8.gpu_idle) {
                dev_priv->pc8.gpu_idle = false;
@@@ -7203,9 -7369,7 +7384,9 @@@ static void i9xx_update_cursor(struct d
                intel_crtc->cursor_visible = visible;
        }
        /* and commit changes on next vblank */
 +      POSTING_READ(CURCNTR(pipe));
        I915_WRITE(CURBASE(pipe), base);
 +      POSTING_READ(CURBASE(pipe));
  }
  
  static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
                intel_crtc->cursor_visible = visible;
        }
        /* and commit changes on next vblank */
 +      POSTING_READ(CURCNTR_IVB(pipe));
        I915_WRITE(CURBASE_IVB(pipe), base);
 +      POSTING_READ(CURBASE_IVB(pipe));
  }
  
  /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@@ -8354,8 -8516,7 +8535,8 @@@ static int intel_gen7_queue_flip(struc
                intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
                                        DERRMR_PIPEB_PRI_FLIP_DONE |
                                        DERRMR_PIPEC_PRI_FLIP_DONE));
 -              intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
 +              intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
 +                              MI_SRM_LRM_GLOBAL_GTT);
                intel_ring_emit(ring, DERRMR);
                intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
        }
@@@ -9272,7 -9433,8 +9453,7 @@@ check_crtc_state(struct drm_device *dev
                        enum pipe pipe;
                        if (encoder->base.crtc != &crtc->base)
                                continue;
 -                      if (encoder->get_config &&
 -                          encoder->get_hw_state(encoder, &pipe))
 +                      if (encoder->get_hw_state(encoder, &pipe))
                                encoder->get_config(encoder, &pipe_config);
                }
  
@@@ -9402,6 -9564,21 +9583,21 @@@ static int __intel_set_mode(struct drm_
                                       "[modeset]");
        }
  
+       /*
+        * See if the config requires any additional preparation, e.g.
+        * to adjust global state with pipes off.  We need to do this
+        * here so we can get the modeset_pipe updated config for the new
+        * mode set on this crtc.  For other crtcs we need to use the
+        * adjusted_mode bits in the crtc directly.
+        */
+       if (IS_VALLEYVIEW(dev)) {
+               valleyview_modeset_global_pipes(dev, &prepare_pipes,
+                                               modeset_pipes, pipe_config);
+               /* may have added more to prepare_pipes than we should */
+               prepare_pipes &= ~disable_pipes;
+       }
        for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
                intel_crtc_disable(&intel_crtc->base);
  
@@@ -10050,7 -10227,7 +10246,7 @@@ static void intel_setup_outputs(struct 
                        intel_ddi_init(dev, PORT_D);
        } else if (HAS_PCH_SPLIT(dev)) {
                int found;
 -              dpd_is_edp = intel_dpd_is_edp(dev);
 +              dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
  
                if (has_edp_a(dev))
                        intel_dp_init(dev, DP_A, PORT_A);
                        intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
                                        PORT_C);
                        if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
 -                              intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
 -                                            PORT_C);
 +                              intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
                }
  
                intel_dsi_init(dev);
@@@ -10412,8 -10590,11 +10608,11 @@@ static void intel_init_display(struct d
                }
        } else if (IS_G4X(dev)) {
                dev_priv->display.write_eld = g4x_write_eld;
-       } else if (IS_VALLEYVIEW(dev))
+       } else if (IS_VALLEYVIEW(dev)) {
+               dev_priv->display.modeset_global_resources =
+                       valleyview_modeset_global_resources;
                dev_priv->display.write_eld = ironlake_write_eld;
+       }
  
        /* Default just returns -ENODEV to indicate unsupported */
        dev_priv->display.queue_flip = intel_default_queue_flip;
                dev_priv->display.queue_flip = intel_gen7_queue_flip;
                break;
        }
+       intel_panel_init_backlight_funcs(dev);
  }
  
  /*
@@@ -10476,17 -10659,6 +10677,6 @@@ static void quirk_invert_brightness(str
        DRM_INFO("applying inverted panel brightness quirk\n");
  }
  
- /*
-  * Some machines (Dell XPS13) suffer broken backlight controls if
-  * BLM_PCH_PWM_ENABLE is set.
-  */
- static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
-       DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
- }
  struct intel_quirk {
        int device;
        int subsystem_vendor;
@@@ -10546,11 -10718,6 +10736,6 @@@ static struct intel_quirk intel_quirks[
         * seem to use inverted backlight PWM.
         */
        { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
-       /* Dell XPS13 HD Sandy Bridge */
-       { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
-       /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
-       { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -10870,7 -11037,7 +11055,7 @@@ void i915_redisable_vga(struct drm_devi
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
-       if (HAS_POWER_WELL(dev) &&
+       if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
            (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
                return;
  
@@@ -10931,7 -11098,8 +11116,7 @@@ static void intel_modeset_readout_hw_st
                if (encoder->get_hw_state(encoder, &pipe)) {
                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
                        encoder->base.crtc = &crtc->base;
 -                      if (encoder->get_config)
 -                              encoder->get_config(encoder, &crtc->config);
 +                      encoder->get_config(encoder, &crtc->config);
                } else {
                        encoder->base.crtc = NULL;
                }
@@@ -11091,12 -11259,11 +11276,11 @@@ void intel_modeset_cleanup(struct drm_d
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
  
-       /* destroy backlight, if any, before the connectors */
-       intel_panel_destroy_backlight(dev);
-       /* destroy the sysfs files before encoders/connectors */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       /* destroy the backlight and sysfs files before encoders/connectors */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               intel_panel_destroy_backlight(connector);
                drm_sysfs_connector_remove(connector);
+       }
  
        drm_mode_config_cleanup(dev);
  
@@@ -11150,6 -11317,7 +11334,7 @@@ struct intel_display_error_state 
        } cursor[I915_MAX_PIPES];
  
        struct intel_pipe_error_state {
+               bool power_domain_on;
                u32 source;
        } pipe[I915_MAX_PIPES];
  
        } plane[I915_MAX_PIPES];
  
        struct intel_transcoder_error_state {
+               bool power_domain_on;
                enum transcoder cpu_transcoder;
  
                u32 conf;
@@@ -11197,11 -11366,13 +11383,13 @@@ intel_display_capture_error_state(struc
        if (error == NULL)
                return NULL;
  
-       if (HAS_POWER_WELL(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
  
        for_each_pipe(i) {
-               if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+               error->pipe[i].power_domain_on =
+                       intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+               if (!error->pipe[i].power_domain_on)
                        continue;
  
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
        for (i = 0; i < error->num_transcoders; i++) {
                enum transcoder cpu_transcoder = transcoders[i];
  
-               if (!intel_display_power_enabled(dev,
-                               POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+               error->transcoder[i].power_domain_on =
+                       intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+               if (!error->transcoder[i].power_domain_on)
                        continue;
  
                error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@@ -11268,11 -11440,13 +11457,13 @@@ intel_display_print_error_state(struct 
                return;
  
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
-       if (HAS_POWER_WELL(dev))
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
+               err_printf(m, "  Power: %s\n",
+                          error->pipe[i].power_domain_on ? "on" : "off");
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
  
                err_printf(m, "Plane [%d]:\n", i);
        for (i = 0; i < error->num_transcoders; i++) {
                err_printf(m, "CPU transcoder: %c\n",
                           transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  Power: %s\n",
+                          error->transcoder[i].power_domain_on ? "on" : "off");
                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
@@@ -142,7 -142,7 +142,7 @@@ intel_dp_max_data_rate(int max_link_clo
        return (max_link_clock * max_lanes * 8) / 10;
  }
  
- static int
+ static enum drm_mode_status
  intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
  {
@@@ -404,7 -404,7 +404,7 @@@ intel_dp_aux_ch(struct intel_dp *intel_
        int i, ret, recv_bytes;
        uint32_t status;
        int try, precharge, clock = 0;
-       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+       bool has_aux_irq = true;
        uint32_t timeout;
  
        /* dp aux is extremely sensitive to irq latency, hence request the
@@@ -1774,7 -1774,7 +1774,7 @@@ static void intel_disable_dp(struct int
         * ensure that we have vdd while we switch off the panel. */
        ironlake_edp_panel_vdd_on(intel_dp);
        ironlake_edp_backlight_off(intel_dp);
 -      intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 +      intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        ironlake_edp_panel_off(intel_dp);
  
        /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
@@@ -1845,23 -1845,23 +1845,23 @@@ static void vlv_pre_enable_dp(struct in
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
        struct edp_power_seq power_seq;
        u32 val;
  
        mutex_lock(&dev_priv->dpio_lock);
  
-       val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
  
        mutex_unlock(&dev_priv->dpio_lock);
  
  
        intel_enable_dp(encoder);
  
-       vlv_wait_port_ready(dev_priv, port);
+       vlv_wait_port_ready(dev_priv, dport);
  }
  
  static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
  
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                                 DPIO_PCS_CLK_SOFT_RESET);
  
        /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
        mutex_unlock(&dev_priv->dpio_lock);
  }
  
@@@ -2050,7 -2050,7 +2050,7 @@@ static uint32_t intel_vlv_signal_levels
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
        uint8_t train_set = intel_dp->train_set[0];
-       int port = vlv_dport_to_channel(dport);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
  
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        }
  
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
                         uniqtranscale_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
        mutex_unlock(&dev_priv->dpio_lock);
  
        return 0;
@@@ -3326,19 -3326,11 +3326,19 @@@ intel_trans_dp_port_sel(struct drm_crt
  }
  
  /* check the VBT to see whether the eDP is on DP-D port */
 -bool intel_dpd_is_edp(struct drm_device *dev)
 +bool intel_dp_is_edp(struct drm_device *dev, enum port port)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        union child_device_config *p_child;
        int i;
 +      static const short port_mapping[] = {
 +              [PORT_B] = PORT_IDPB,
 +              [PORT_C] = PORT_IDPC,
 +              [PORT_D] = PORT_IDPD,
 +      };
 +
 +      if (port == PORT_A)
 +              return true;
  
        if (!dev_priv->vbt.child_dev_num)
                return false;
        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
                p_child = dev_priv->vbt.child_dev + i;
  
 -              if (p_child->common.dvo_port == PORT_IDPD &&
 +              if (p_child->common.dvo_port == port_mapping[port] &&
                    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
                    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
                        return true;
@@@ -3624,10 -3616,26 +3624,10 @@@ intel_dp_init_connector(struct intel_di
        intel_dp->DP = I915_READ(intel_dp->output_reg);
        intel_dp->attached_connector = intel_connector;
  
 -      type = DRM_MODE_CONNECTOR_DisplayPort;
 -      /*
 -       * FIXME : We need to initialize built-in panels before external panels.
 -       * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
 -       */
 -      switch (port) {
 -      case PORT_A:
 +      if (intel_dp_is_edp(dev, port))
                type = DRM_MODE_CONNECTOR_eDP;
 -              break;
 -      case PORT_C:
 -              if (IS_VALLEYVIEW(dev))
 -                      type = DRM_MODE_CONNECTOR_eDP;
 -              break;
 -      case PORT_D:
 -              if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
 -                      type = DRM_MODE_CONNECTOR_eDP;
 -              break;
 -      default:        /* silence GCC warning */
 -              break;
 -      }
 +      else
 +              type = DRM_MODE_CONNECTOR_DisplayPort;
  
        /*
         * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
@@@ -156,6 -156,17 +156,17 @@@ struct intel_encoder 
  struct intel_panel {
        struct drm_display_mode *fixed_mode;
        int fitting_mode;
+       /* backlight */
+       struct {
+               bool present;
+               u32 level;
+               u32 max;
+               bool enabled;
+               bool combination_mode;  /* gen 2/4 only */
+               bool active_low_pwm;
+               struct backlight_device *device;
+       } backlight;
  };
  
  struct intel_connector {
@@@ -490,9 -501,9 +501,9 @@@ vlv_dport_to_channel(struct intel_digit
  {
        switch (dport->port) {
        case PORT_B:
-               return 0;
+               return DPIO_CH0;
        case PORT_C:
-               return 1;
+               return DPIO_CH1;
        default:
                BUG();
        }
@@@ -638,7 -649,8 +649,8 @@@ enum transcoder intel_pipe_to_cpu_trans
  void intel_wait_for_vblank(struct drm_device *dev, int pipe);
  void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
  int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
- void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+                        struct intel_digital_port *dport);
  bool intel_get_load_detect_pipe(struct drm_connector *connector,
                                struct drm_display_mode *mode,
                                struct intel_load_detect_pipe *old);
@@@ -694,7 -706,7 +706,7 @@@ void i915_disable_vga_mem(struct drm_de
  void hsw_enable_ips(struct intel_crtc *crtc);
  void hsw_disable_ips(struct intel_crtc *crtc);
  void intel_display_set_init_power(struct drm_device *dev, bool enable);
+ int valleyview_get_vco(struct drm_i915_private *dev_priv);
  
  /* intel_dp.c */
  void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@@ -708,7 -720,7 +720,7 @@@ void intel_dp_encoder_destroy(struct dr
  void intel_dp_check_link_status(struct intel_dp *intel_dp);
  bool intel_dp_compute_config(struct intel_encoder *encoder,
                             struct intel_crtc_config *pipe_config);
 -bool intel_dpd_is_edp(struct drm_device *dev);
 +bool intel_dp_is_edp(struct drm_device *dev, enum port port);
  void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
  void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
  void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@@ -808,7 -820,8 +820,8 @@@ void intel_panel_set_backlight(struct i
  int intel_panel_setup_backlight(struct drm_connector *connector);
  void intel_panel_enable_backlight(struct intel_connector *connector);
  void intel_panel_disable_backlight(struct intel_connector *connector);
- void intel_panel_destroy_backlight(struct drm_device *dev);
+ void intel_panel_destroy_backlight(struct drm_connector *connector);
+ void intel_panel_init_backlight_funcs(struct drm_device *dev);
  enum drm_connector_status intel_panel_detect(struct drm_device *dev);
  
  
@@@ -829,6 -842,8 +842,8 @@@ int intel_power_domains_init(struct drm
  void intel_power_domains_remove(struct drm_device *dev);
  bool intel_display_power_enabled(struct drm_device *dev,
                                 enum intel_display_power_domain domain);
+ bool intel_display_power_enabled_sw(struct drm_device *dev,
+                                   enum intel_display_power_domain domain);
  void intel_display_power_get(struct drm_device *dev,
                             enum intel_display_power_domain domain);
  void intel_display_power_put(struct drm_device *dev,
@@@ -396,13 -396,10 +396,10 @@@ int intel_opregion_notify_adapter(struc
  static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_encoder *encoder;
        struct drm_connector *connector;
-       struct intel_connector *intel_connector = NULL;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+       struct intel_connector *intel_connector;
+       struct intel_panel *panel;
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-       u32 ret = 0;
-       bool found = false;
  
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
  
                return ASLC_BACKLIGHT_FAILED;
  
        mutex_lock(&dev->mode_config.mutex);
        /*
-        * Could match the OpRegion connector here instead, but we'd also need
-        * to verify the connector could handle a backlight call.
+        * Update backlight on all connectors that support backlight (usually
+        * only one).
         */
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-               if (encoder->crtc == crtc) {
-                       found = true;
-                       break;
-               }
-       if (!found) {
-               ret = ASLC_BACKLIGHT_FAILED;
-               goto out;
-       }
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               if (connector->encoder == encoder)
-                       intel_connector = to_intel_connector(connector);
-       if (!intel_connector) {
-               ret = ASLC_BACKLIGHT_FAILED;
-               goto out;
-       }
        DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-       intel_panel_set_backlight(intel_connector, bclp, 255);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               intel_connector = to_intel_connector(connector);
+               panel = &intel_connector->panel;
+               if (panel->backlight.present)
+                       intel_panel_set_backlight(intel_connector, bclp, 255);
+       }
        iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
  
- out:
        mutex_unlock(&dev->mode_config.mutex);
  
-       return ret;
+       return 0;
  }
  
  static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@@ -638,7 -621,7 +621,7 @@@ static void intel_didl_outputs(struct d
        u32 temp;
        int i = 0;
  
 -      handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
 +      handle = ACPI_HANDLE(&dev->pdev->dev);
        if (!handle || acpi_bus_get_device(handle, &acpi_dev))
                return;
  
@@@ -191,7 -191,11 +191,11 @@@ static void sandybridge_blit_fbc_update
        u32 blt_ecoskpd;
  
        /* Make sure blitter notifies FBC of writes */
-       gen6_gt_force_wake_get(dev_priv);
+       /* Blitter is part of Media powerwell on VLV. No impact of
+        * his param in other platforms for now */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
        blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
        blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
                GEN6_BLITTER_LOCK_SHIFT;
                         GEN6_BLITTER_LOCK_SHIFT);
        I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
        POSTING_READ(GEN6_BLITTER_ECOSKPD);
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
  }
  
  static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
        /* Set persistent mode for front-buffer rendering, ala X. */
        dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-       dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+       dpfc_ctl |= DPFC_CTL_FENCE_EN;
+       if (IS_GEN5(dev))
+               dpfc_ctl |= obj->fence_reg;
        I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
  
        I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
@@@ -295,7 -302,7 +302,7 @@@ static void gen7_enable_fbc(struct drm_
  
        sandybridge_blit_fbc_update(dev);
  
-       DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+       DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  }
  
  bool intel_fbc_enabled(struct drm_device *dev)
@@@ -1180,7 -1187,7 +1187,7 @@@ static bool g4x_compute_wm0(struct drm_
  
        adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
        clock = adjusted_mode->crtc_clock;
 -      htotal = adjusted_mode->htotal;
 +      htotal = adjusted_mode->crtc_htotal;
        hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
  
@@@ -1267,7 -1274,7 +1274,7 @@@ static bool g4x_compute_srwm(struct drm
        crtc = intel_get_crtc_for_plane(dev, plane);
        adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
        clock = adjusted_mode->crtc_clock;
 -      htotal = adjusted_mode->htotal;
 +      htotal = adjusted_mode->crtc_htotal;
        hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
  
@@@ -1498,7 -1505,7 +1505,7 @@@ static void i965_update_wm(struct drm_c
                const struct drm_display_mode *adjusted_mode =
                        &to_intel_crtc(crtc)->config.adjusted_mode;
                int clock = adjusted_mode->crtc_clock;
 -              int htotal = adjusted_mode->htotal;
 +              int htotal = adjusted_mode->crtc_htotal;
                int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
                int pixel_size = crtc->fb->bits_per_pixel / 8;
                unsigned long line_time_us;
@@@ -1624,8 -1631,8 +1631,8 @@@ static void i9xx_update_wm(struct drm_c
                const struct drm_display_mode *adjusted_mode =
                        &to_intel_crtc(enabled)->config.adjusted_mode;
                int clock = adjusted_mode->crtc_clock;
 -              int htotal = adjusted_mode->htotal;
 -              int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 +              int htotal = adjusted_mode->crtc_htotal;
 +              int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
                int pixel_size = enabled->fb->bits_per_pixel / 8;
                unsigned long line_time_us;
                int entries;
@@@ -1776,7 -1783,7 +1783,7 @@@ static bool ironlake_compute_srwm(struc
        crtc = intel_get_crtc_for_plane(dev, plane);
        adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
        clock = adjusted_mode->crtc_clock;
 -      htotal = adjusted_mode->htotal;
 +      htotal = adjusted_mode->crtc_htotal;
        hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
  
@@@ -2469,9 -2476,8 +2476,9 @@@ hsw_compute_linetime_wm(struct drm_devi
        /* The WM are computed with base on how long it takes to fill a single
         * row at the given clock rate, multiplied by 8.
         * */
 -      linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
 -      ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
 +      linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
 +                                   mode->crtc_clock);
 +      ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
                                         intel_ddi_get_cdclk_freq(dev_priv));
  
        return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@@ -3430,26 -3436,19 +3437,19 @@@ static void ironlake_disable_drps(struc
   * ourselves, instead of doing a rmw cycle (which might result in us clearing
   * all limits and the gpu stuck at whatever frequency it is at atm).
   */
- static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
  {
        u32 limits;
  
-       limits = 0;
-       if (*val >= dev_priv->rps.max_delay)
-               *val = dev_priv->rps.max_delay;
-       limits |= dev_priv->rps.max_delay << 24;
        /* Only set the down limit when we've reached the lowest level to avoid
         * getting more interrupts, otherwise leave this clear. This prevents a
         * race in the hw when coming out of rc6: There's a tiny window where
         * the hw runs at the minimal clock before selecting the desired
         * frequency, if the down threshold expires in that window we will not
         * receive a down interrupt. */
-       if (*val <= dev_priv->rps.min_delay) {
-               *val = dev_priv->rps.min_delay;
+       limits = dev_priv->rps.max_delay << 24;
+       if (val <= dev_priv->rps.min_delay)
                limits |= dev_priv->rps.min_delay << 16;
-       }
  
        return limits;
  }
@@@ -3549,7 -3548,6 +3549,6 @@@ static void gen6_set_rps_thresholds(str
  void gen6_set_rps(struct drm_device *dev, u8 val)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 limits = gen6_rps_limits(dev_priv, &val);
  
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        /* Make sure we continue to get interrupts
         * until we hit the minimum or maximum frequencies.
         */
-       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+                  gen6_rps_limits(dev_priv, val));
  
        POSTING_READ(GEN6_RPNSWREQ);
  
@@@ -3607,48 -3606,18 +3607,18 @@@ void gen6_rps_boost(struct drm_i915_pri
        mutex_unlock(&dev_priv->rps.hw_lock);
  }
  
- /*
-  * Wait until the previous freq change has completed,
-  * or the timeout elapsed, and then update our notion
-  * of the current GPU frequency.
-  */
- static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
- {
-       u32 pval;
-       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-       if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
-               DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-       pval >>= 8;
-       if (pval != dev_priv->rps.cur_delay)
-               DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
-                                vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
-                                dev_priv->rps.cur_delay,
-                                vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-       dev_priv->rps.cur_delay = pval;
- }
  void valleyview_set_rps(struct drm_device *dev, u8 val)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       gen6_rps_limits(dev_priv, &val);
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
  
-       vlv_update_rps_cur_delay(dev_priv);
        DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.cur_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay,
-                        vlv_gpu_freq(dev_priv->mem_freq, val), val);
+                        vlv_gpu_freq(dev_priv, val), val);
  
        if (val == dev_priv->rps.cur_delay)
                return;
  
        dev_priv->rps.cur_delay = val;
  
-       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+       trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
  }
  
  static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@@ -3775,7 -3744,7 +3745,7 @@@ static void gen8_enable_rps(struct drm_
  
        /* 1c & 1d: Get forcewake during program sequence. Although the driver
         * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        /* 2a: Disable RC states. */
        I915_WRITE(GEN6_RC_CONTROL, 0);
  
        gen6_enable_rps_interrupts(dev);
  
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  static void gen6_enable_rps(struct drm_device *dev)
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
  
-       gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
 -      if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
 +      if (IS_IVYBRIDGE(dev))
                I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
        else
                I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
                        DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
        }
  
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void gen6_update_ring_freq(struct drm_device *dev)
@@@ -4116,7 -4085,8 +4086,8 @@@ static void valleyview_enable_rps(struc
  
        valleyview_setup_pctx(dev);
  
-       gen6_gt_force_wake_get(dev_priv);
+       /* If VLV, Forcewake all wells, else re-direct to regular path */
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
        I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
        for_each_ring(ring, dev_priv, i)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
  
-       I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
  
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-               rc6_mode = GEN7_RC_CTL_TO_MODE;
+               rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
  
        intel_print_rc6_info(dev, rc6_mode);
  
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
  
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-       switch ((val >> 6) & 3) {
-       case 0:
-       case 1:
-               dev_priv->mem_freq = 800;
-               break;
-       case 2:
-               dev_priv->mem_freq = 1066;
-               break;
-       case 3:
-               dev_priv->mem_freq = 1333;
-               break;
-       }
-       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
        DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
  
        dev_priv->rps.cur_delay = (val >> 8) & 0xff;
        DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.cur_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay);
  
        dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
        dev_priv->rps.hw_max = dev_priv->rps.max_delay;
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.max_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
                         dev_priv->rps.max_delay);
  
        dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.rpe_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
  
        dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.min_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
                         dev_priv->rps.min_delay);
  
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-                        vlv_gpu_freq(dev_priv->mem_freq,
-                                     dev_priv->rps.rpe_delay),
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
  
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
  
        gen6_enable_rps_interrupts(dev);
  
-       gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
  }
  
  void ironlake_teardown_rc6(struct drm_device *dev)
@@@ -5463,6 -5415,26 +5416,26 @@@ static void ivybridge_init_clock_gating
  static void valleyview_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       switch ((val >> 6) & 3) {
+       case 0:
+               dev_priv->mem_freq = 800;
+               break;
+       case 1:
+               dev_priv->mem_freq = 1066;
+               break;
+       case 2:
+               dev_priv->mem_freq = 1333;
+               break;
+       case 3:
+               dev_priv->mem_freq = 1333;
+               break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  
@@@ -5642,49 -5614,78 +5615,78 @@@ void intel_suspend_hw(struct drm_devic
                lpt_suspend_hw(dev);
  }
  
- static bool is_always_on_power_domain(struct drm_device *dev,
-                                     enum intel_display_power_domain domain)
- {
-       unsigned long always_on_domains;
-       BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+ #define for_each_power_well(i, power_well, domain_mask, power_domains)        \
+       for (i = 0;                                                     \
+            i < (power_domains)->power_well_count &&                   \
+                ((power_well) = &(power_domains)->power_wells[i]);     \
+            i++)                                                       \
+               if ((power_well)->domains & (domain_mask))
  
-       if (IS_BROADWELL(dev)) {
-               always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
-       } else if (IS_HASWELL(dev)) {
-               always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
-       } else {
-               WARN_ON(1);
-               return true;
-       }
-       return BIT(domain) & always_on_domains;
- }
+ #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+       for (i = (power_domains)->power_well_count - 1;                  \
+            i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+            i--)                                                        \
+               if ((power_well)->domains & (domain_mask))
  
  /**
   * We should only use the power well if we explicitly asked the hardware to
   * enable it, so check if it's enabled and also check if we've requested it to
   * be enabled.
   */
+ static bool hsw_power_well_enabled(struct drm_device *dev,
+                                  struct i915_power_well *power_well)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
+                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+ }
+ bool intel_display_power_enabled_sw(struct drm_device *dev,
+                                   enum intel_display_power_domain domain)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains;
+       power_domains = &dev_priv->power_domains;
+       return power_domains->domain_use_count[domain];
+ }
  bool intel_display_power_enabled(struct drm_device *dev,
                                 enum intel_display_power_domain domain)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       bool is_enabled;
+       int i;
  
-       if (!HAS_POWER_WELL(dev))
-               return true;
+       power_domains = &dev_priv->power_domains;
  
-       if (is_always_on_power_domain(dev, domain))
-               return true;
+       is_enabled = true;
  
-       return I915_READ(HSW_PWR_WELL_DRIVER) ==
-                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+       mutex_lock(&power_domains->lock);
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               if (power_well->always_on)
+                       continue;
+               if (!power_well->is_enabled(dev, power_well)) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+       mutex_unlock(&power_domains->lock);
+       return is_enabled;
  }
  
- static void __intel_set_power_well(struct drm_device *dev, bool enable)
+ static void hsw_set_power_well(struct drm_device *dev,
+                              struct i915_power_well *power_well, bool enable)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
+       unsigned long irqflags;
        uint32_t tmp;
  
        tmp = I915_READ(HSW_PWR_WELL_DRIVER);
                                      HSW_PWR_WELL_STATE_ENABLED), 20))
                                DRM_ERROR("Timeout enabling power well\n");
                }
+               if (IS_BROADWELL(dev)) {
+                       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+                                  dev_priv->de_irq_mask[PIPE_B]);
+                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+                                  ~dev_priv->de_irq_mask[PIPE_B] |
+                                  GEN8_PIPE_VBLANK);
+                       I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+                                  dev_priv->de_irq_mask[PIPE_C]);
+                       I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+                                  ~dev_priv->de_irq_mask[PIPE_C] |
+                                  GEN8_PIPE_VBLANK);
+                       POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+                       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+               }
        } else {
                if (enable_requested) {
-                       unsigned long irqflags;
                        enum pipe p;
  
                        I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
  static void __intel_power_well_get(struct drm_device *dev,
                                   struct i915_power_well *power_well)
  {
-       if (!power_well->count++)
-               __intel_set_power_well(dev, true);
+       if (!power_well->count++ && power_well->set)
+               power_well->set(dev, power_well, true);
  }
  
  static void __intel_power_well_put(struct drm_device *dev,
                                   struct i915_power_well *power_well)
  {
        WARN_ON(!power_well->count);
-       if (!--power_well->count && i915_disable_power_well)
-               __intel_set_power_well(dev, false);
+       if (!--power_well->count && power_well->set && i915_disable_power_well)
+               power_well->set(dev, power_well, false);
  }
  
  void intel_display_power_get(struct drm_device *dev,
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
-       if (!HAS_POWER_WELL(dev))
-               return;
-       if (is_always_on_power_domain(dev, domain))
-               return;
+       struct i915_power_well *power_well;
+       int i;
  
        power_domains = &dev_priv->power_domains;
  
        mutex_lock(&power_domains->lock);
-       __intel_power_well_get(dev, &power_domains->power_wells[0]);
+       for_each_power_well(i, power_well, BIT(domain), power_domains)
+               __intel_power_well_get(dev, power_well);
+       power_domains->domain_use_count[domain]++;
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5767,17 -5785,19 +5786,19 @@@ void intel_display_power_put(struct drm
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
-       if (!HAS_POWER_WELL(dev))
-               return;
-       if (is_always_on_power_domain(dev, domain))
-               return;
+       struct i915_power_well *power_well;
+       int i;
  
        power_domains = &dev_priv->power_domains;
  
        mutex_lock(&power_domains->lock);
-       __intel_power_well_put(dev, &power_domains->power_wells[0]);
+       WARN_ON(!power_domains->domain_use_count[domain]);
+       power_domains->domain_use_count[domain]--;
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+               __intel_power_well_put(dev, power_well);
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5793,10 -5813,7 +5814,7 @@@ void i915_request_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-       mutex_lock(&hsw_pwr->lock);
-       __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
-       mutex_unlock(&hsw_pwr->lock);
+       intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_request_power_well);
  
@@@ -5810,24 -5827,71 +5828,71 @@@ void i915_release_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
-       mutex_lock(&hsw_pwr->lock);
-       __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
-       mutex_unlock(&hsw_pwr->lock);
+       intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_release_power_well);
  
+ static struct i915_power_well i9xx_always_on_power_well[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = POWER_DOMAIN_MASK,
+       },
+ };
+ static struct i915_power_well hsw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+       },
+       {
+               .name = "display",
+               .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+               .is_enabled = hsw_power_well_enabled,
+               .set = hsw_set_power_well,
+       },
+ };
+ static struct i915_power_well bdw_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+       },
+       {
+               .name = "display",
+               .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+               .is_enabled = hsw_power_well_enabled,
+               .set = hsw_set_power_well,
+       },
+ };
+ #define set_power_wells(power_domains, __power_wells) ({              \
+       (power_domains)->power_wells = (__power_wells);                 \
+       (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
+ })
  int intel_power_domains_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
  
        mutex_init(&power_domains->lock);
-       hsw_pwr = power_domains;
  
-       power_well = &power_domains->power_wells[0];
-       power_well->count = 0;
+       /*
+        * The enabling order will be from lower to higher indexed wells,
+        * the disabling order is reversed.
+        */
+       if (IS_HASWELL(dev)) {
+               set_power_wells(power_domains, hsw_power_wells);
+               hsw_pwr = power_domains;
+       } else if (IS_BROADWELL(dev)) {
+               set_power_wells(power_domains, bdw_power_wells);
+               hsw_pwr = power_domains;
+       } else {
+               set_power_wells(power_domains, i9xx_always_on_power_well);
+       }
  
        return 0;
  }
@@@ -5842,15 -5906,13 +5907,13 @@@ static void intel_power_domains_resume(
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
-       if (!HAS_POWER_WELL(dev))
-               return;
+       int i;
  
        mutex_lock(&power_domains->lock);
-       power_well = &power_domains->power_wells[0];
-       __intel_set_power_well(dev, power_well->count > 0);
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->set)
+                       power_well->set(dev, power_well, power_well->count > 0);
+       }
        mutex_unlock(&power_domains->lock);
  }
  
@@@ -5864,13 -5926,13 +5927,13 @@@ void intel_power_domains_init_hw(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       if (!HAS_POWER_WELL(dev))
-               return;
        /* For now, we need the power well to be always enabled. */
        intel_display_set_init_power(dev, true);
        intel_power_domains_resume(dev);
  
+       if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+               return;
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
        if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@@ -6075,59 -6137,48 +6138,48 @@@ int sandybridge_pcode_write(struct drm_
        return 0;
  }
  
- int vlv_gpu_freq(int ddr_freq, int val)
+ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
  {
-       int mult, base;
+       int div;
  
-       switch (ddr_freq) {
+       /* 4 x czclk */
+       switch (dev_priv->mem_freq) {
        case 800:
-               mult = 20;
-               base = 120;
+               div = 10;
                break;
        case 1066:
-               mult = 22;
-               base = 133;
+               div = 12;
                break;
        case 1333:
-               mult = 21;
-               base = 125;
+               div = 16;
                break;
        default:
                return -1;
        }
  
-       return ((val - 0xbd) * mult) + base;
+       return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
  }
  
- int vlv_freq_opcode(int ddr_freq, int val)
+ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
  {
-       int mult, base;
+       int mul;
  
-       switch (ddr_freq) {
+       /* 4 x czclk */
+       switch (dev_priv->mem_freq) {
        case 800:
-               mult = 20;
-               base = 120;
+               mul = 10;
                break;
        case 1066:
-               mult = 22;
-               base = 133;
+               mul = 12;
                break;
        case 1333:
-               mult = 21;
-               base = 125;
+               mul = 16;
                break;
        default:
                return -1;
        }
  
-       val /= mult;
-       val -= base / mult;
-       val += 0xbd;
-       if (val > 0xea)
-               val = 0xea;
-       return val;
+       return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
  }
  
  void intel_pm_init(struct drm_device *dev)
@@@ -64,7 -64,8 +64,8 @@@ static void __gen6_gt_force_wake_reset(
        __raw_posting_read(dev_priv, ECOBUS);
  }
  
- static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
  {
        if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@@ -89,7 -90,8 +90,8 @@@ static void __gen6_gt_force_wake_mt_res
        __raw_posting_read(dev_priv, ECOBUS);
  }
  
- static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
  {
        u32 forcewake_ack;
  
@@@ -121,12 -123,12 +123,12 @@@ static void gen6_gt_check_fifodbg(struc
        u32 gtfifodbg;
  
        gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
-       if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
-            "MMIO read or write has been dropped %x\n", gtfifodbg))
-               __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+       if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+               __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  }
  
- static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
  {
        __raw_i915_write32(dev_priv, FORCEWAKE, 0);
        /* something from same cacheline, but !FORCEWAKE */
        gen6_gt_check_fifodbg(dev_priv);
  }
  
- static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+ static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+                                                       int fw_engine)
  {
        __raw_i915_write32(dev_priv, FORCEWAKE_MT,
                           _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@@ -149,10 -152,10 +152,10 @@@ static int __gen6_gt_wait_for_fifo(stru
  
        if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
                int loop = 500;
-               u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+               u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
                        udelay(10);
-                       fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+                       fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
                }
                if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
                        ++ret;
@@@ -171,38 -174,112 +174,112 @@@ static void vlv_force_wake_reset(struc
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  }
  
- static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
  {
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_VLV) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
  
-       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+               __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_VLV) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for Render to ack.\n");
+       }
  
-       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
-                            FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_VLV) &
+                                               FORCEWAKE_KERNEL) == 0,
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                                  _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+               if (wait_for_atomic((__raw_i915_read32(dev_priv,
+                                               FORCEWAKE_ACK_MEDIA_VLV) &
+                                               FORCEWAKE_KERNEL),
+                                       FORCEWAKE_ACK_TIMEOUT_MS))
+                       DRM_ERROR("Timed out: waiting for media to ack.\n");
+       }
  
        /* WaRsForcewakeWaitTC0:vlv */
        __gen6_gt_wait_for_thread_c0(dev_priv);
  }
  
- static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+                                       int fw_engine)
  {
-       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       /* Check for Render Engine */
+       if (FORCEWAKE_RENDER & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                                       _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       /* Check for Media Engine */
+       if (FORCEWAKE_MEDIA & fw_engine)
+               __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                               _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
+ }
+ void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
+ {
+       unsigned long irqflags;
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (FORCEWAKE_RENDER & fw_engine) {
+               if (dev_priv->uncore.fw_rendercount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               if (dev_priv->uncore.fw_mediacount++ == 0)
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ }
+ void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+                                               int fw_engine)
+ {
+       unsigned long irqflags;
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (FORCEWAKE_RENDER & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+               if (--dev_priv->uncore.fw_rendercount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_RENDER);
+       }
+       if (FORCEWAKE_MEDIA & fw_engine) {
+               WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+               if (--dev_priv->uncore.fw_mediacount == 0)
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv,
+                                                       FORCEWAKE_MEDIA);
+       }
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  static void gen6_force_wake_work(struct work_struct *work)
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0)
-               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
 +static void intel_uncore_forcewake_reset(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (IS_VALLEYVIEW(dev)) {
 +              vlv_force_wake_reset(dev_priv);
 +      } else if (INTEL_INFO(dev)->gen >= 6) {
 +              __gen6_gt_force_wake_reset(dev_priv);
 +              if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 +                      __gen6_gt_force_wake_mt_reset(dev_priv);
 +      }
 +}
 +
  void intel_uncore_early_sanitize(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                dev_priv->ellc_size = 128;
                DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
        }
 -}
  
 -static void intel_uncore_forcewake_reset(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (IS_VALLEYVIEW(dev)) {
 -              vlv_force_wake_reset(dev_priv);
 -      } else if (INTEL_INFO(dev)->gen >= 6) {
 -              __gen6_gt_force_wake_reset(dev_priv);
 -              if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 -                      __gen6_gt_force_wake_mt_reset(dev_priv);
 -      }
 +      intel_uncore_forcewake_reset(dev);
  }
  
  void intel_uncore_sanitize(struct drm_device *dev)
   * be called at the beginning of the sequence followed by a call to
   * gen6_gt_force_wake_put() at the end of the sequence.
   */
- void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
  {
        unsigned long irqflags;
  
        if (!dev_priv->uncore.funcs.force_wake_get)
                return;
  
+       /* Redirect to VLV specific routine */
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               return vlv_force_wake_get(dev_priv, fw_engine);
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (dev_priv->uncore.forcewake_count++ == 0)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
  /*
   * see gen6_gt_force_wake_get()
   */
- void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
  {
        unsigned long irqflags;
  
        if (!dev_priv->uncore.funcs.force_wake_put)
                return;
  
+       /* Redirect to VLV specific routine */
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               return vlv_force_wake_put(dev_priv, fw_engine);
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (--dev_priv->uncore.forcewake_count == 0) {
                dev_priv->uncore.forcewake_count++;
@@@ -379,16 -463,51 +465,51 @@@ gen6_read##x(struct drm_i915_private *d
        REG_READ_HEADER(x); \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                       FORCEWAKE_ALL); \
                val = __raw_i915_read##x(dev_priv, reg); \
                if (dev_priv->uncore.forcewake_count == 0) \
-                       dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                                       FORCEWAKE_ALL); \
+       } else { \
+               val = __raw_i915_read##x(dev_priv, reg); \
+       } \
+       REG_READ_FOOTER; \
+ }
+ #define __vlv_read(x) \
+ static u##x \
+ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       unsigned fwengine = 0; \
+       unsigned *fwcount; \
+       REG_READ_HEADER(x); \
+       if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
+               fwengine = FORCEWAKE_RENDER;            \
+               fwcount = &dev_priv->uncore.fw_rendercount;    \
+       }                                               \
+       else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
+               fwengine = FORCEWAKE_MEDIA;             \
+               fwcount = &dev_priv->uncore.fw_mediacount;     \
+       }  \
+       if (fwengine != 0) {            \
+               if ((*fwcount)++ == 0) \
+                       (dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+                                                               fwengine); \
+               val = __raw_i915_read##x(dev_priv, reg); \
+               if (--(*fwcount) == 0) \
+                       (dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+                                                       fwengine); \
        } else { \
                val = __raw_i915_read##x(dev_priv, reg); \
        } \
        REG_READ_FOOTER; \
  }
  
+ __vlv_read(8)
+ __vlv_read(16)
+ __vlv_read(32)
+ __vlv_read(64)
  __gen6_read(8)
  __gen6_read(16)
  __gen6_read(32)
@@@ -402,6 -521,7 +523,7 @@@ __gen4_read(16
  __gen4_read(32)
  __gen4_read(64)
  
+ #undef __vlv_read
  #undef __gen6_read
  #undef __gen5_read
  #undef __gen4_read
@@@ -489,11 -609,13 +611,13 @@@ gen8_write##x(struct drm_i915_private *
        bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
        REG_WRITE_HEADER; \
        if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+                                                       FORCEWAKE_ALL); \
        } \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (__needs_put) { \
-               dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+                                                       FORCEWAKE_ALL); \
        } \
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  }
@@@ -534,8 -656,8 +658,8 @@@ void intel_uncore_init(struct drm_devic
                          gen6_force_wake_work);
  
        if (IS_VALLEYVIEW(dev)) {
-               dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
-               dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+               dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
        } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
                dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
                dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
                 * forcewake being disabled.
                 */
                mutex_lock(&dev->struct_mutex);
-               __gen6_gt_force_wake_mt_get(dev_priv);
+               __gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-               __gen6_gt_force_wake_mt_put(dev_priv);
+               __gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
  
                if (ecobus & FORCEWAKE_MT_ENABLE) {
                        dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
                        dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
                }
-               dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-               dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-               dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-               dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               if (IS_VALLEYVIEW(dev)) {
+                       dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
+               } else {
+                       dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+                       dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+                       dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+                       dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               }
                break;
        case 5:
                dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
@@@ -687,6 -817,43 +819,43 @@@ int i915_reg_read_ioctl(struct drm_devi
        return 0;
  }
  
+ int i915_get_reset_stats_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_reset_stats *args = data;
+       struct i915_ctx_hang_stats *hs;
+       int ret;
+       if (args->flags || args->pad)
+               return -EINVAL;
+       if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+       if (IS_ERR(hs)) {
+               mutex_unlock(&dev->struct_mutex);
+               return PTR_ERR(hs);
+       }
+       if (capable(CAP_SYS_ADMIN))
+               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       else
+               args->reset_count = 0;
+       args->batch_active = hs->batch_active;
+       args->batch_pending = hs->batch_pending;
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
  static int i965_reset_complete(struct drm_device *dev)
  {
        u8 gdrst;
@@@ -770,12 -937,12 +939,12 @@@ static int gen6_do_reset(struct drm_dev
  
        /* If reset with a user forcewake, try to restore, otherwise turn it off */
        if (dev_priv->uncore.forcewake_count)
-               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+               dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
        else
-               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+               dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
  
        /* Restore fifo count */
-       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
        return ret;
  int intel_gpu_reset(struct drm_device *dev)
  {
        switch (INTEL_INFO(dev)->gen) {
+       case 8:
        case 7:
        case 6: return gen6_do_reset(dev);
        case 5: return ironlake_do_reset(dev);
        }
  }
  
- void intel_uncore_clear_errors(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       /* XXX needs spinlock around caller's grouping */
-       if (HAS_FPGA_DBG_UNCLAIMED(dev))
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
  void intel_uncore_check_errors(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;