#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
+#define DRIVER_DATE "20140620"
enum pipe {
INVALID_PIPE = -1,
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int dpll_offsets[I915_MAX_PIPES];
- int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
bool is_initialized;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_engine_cs *last_ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
struct i915_fbc {
unsigned long size;
+ unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
- struct drm_mm_node *compressed_fb;
+ struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
struct intel_fbc_work {
struct i915_psr {
bool sink_support;
bool source_ok;
+ bool setup_done;
+ bool enabled;
+ bool active;
+ struct delayed_work work;
};
enum intel_pch {
wait_queue_head_t wq;
};
+struct i915_frontbuffer_tracking {
+ struct mutex lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
+
+ struct i915_frontbuffer_tracking fb_tracking;
+
u16 orig_clock;
bool mchbar_need_disable;
void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
unsigned int pin_mappable:1;
unsigned int pin_display:1;
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+
/*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
- (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
- && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
};
extern struct i915_params i915 __read_mostly;
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
u32 stolen_offset,
u32 gtt_offset,
u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,