return 0;
}
+static int intel_dbuf_size(struct drm_i915_private *dev_priv)
+{
+ int ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
+
+ return ddb_size;
+}
+
+static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
+{
+ return intel_dbuf_size(dev_priv) /
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+}
+
/*
* Calculate initial DBuf slice offset, based on slice size
* and mask(i.e if slice size is 1024 and second slice is enabled
return offset;
}
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
-{
- u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
-
- if (INTEL_GEN(dev_priv) < 11)
- return ddb_size - 4; /* 4 blocks for bypass path allocation */
-
- return ddb_size;
-}
-
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
u32 slice_mask = 0;
- u16 ddb_size = intel_get_ddb_size(dev_priv);
+ u16 ddb_size = intel_dbuf_size(dev_priv);
u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u16 slice_size = ddb_size / num_supported_slices;
u16 start_slice;
return 0;
}
- ddb_size = intel_get_ddb_size(dev_priv);
-
- slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ ddb_size = intel_dbuf_size(dev_priv);
+ slice_size = intel_dbuf_slice_size(dev_priv);
/*
* If the state doesn't change the active CRTC's or there is no
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u16 intel_get_ddb_size(struct drm_i915_private *dev_priv);
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,