if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
- /* Use DSM base address instead for stolen memory */
- dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
- if (IS_DG1(uncore->i915)) {
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
- lmem_size = pci_resource_len(pdev, 2);
+ if (HAS_BAR2_SMEM_STOLEN(i915) || IS_DG1(i915)) {
- if (WARN_ON(lmem_size < dsm_base))
- return ERR_PTR(-ENODEV);
+ lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
} else {
resource_size_t lmem_range;
lmem_size *= SZ_1G;
}
- dsm_size = lmem_size - dsm_base;
- if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
+ if (HAS_BAR2_SMEM_STOLEN(i915)) {
+ /*
+ * MTL dsm size is in GGC register.
+ * Also MTL uses offset to DSMBASE in ptes, so i915
+ * uses dsm_base = 0 to setup stolen region.
+ */
+ ret = mtl_get_gms_size(uncore);
+ if (ret < 0) {
+ drm_err(&i915->drm, "invalid MTL GGC register setting\n");
+ return ERR_PTR(ret);
+ }
+
+ dsm_base = 0;
+ dsm_size = (resource_size_t)(ret * SZ_1M);
+
- GEM_BUG_ON(pci_resource_len(pdev, 2) != SZ_256M);
++ GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
+ GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
+ } else {
+ /* Use DSM base address instead for stolen memory */
+ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
+ if (WARN_ON(lmem_size < dsm_base))
+ return ERR_PTR(-ENODEV);
+ dsm_size = lmem_size - dsm_base;
+ }
+
+ io_size = dsm_size;
- if (pci_resource_len(pdev, 2) < dsm_size) {
++ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < dsm_size) {
io_start = 0;
io_size = 0;
- io_start = pci_resource_start(pdev, 2) + SZ_8M;
+ } else if (HAS_BAR2_SMEM_STOLEN(i915)) {
++ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
} else {
- io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
- io_size = dsm_size;
}
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
#include <linux/pm_qos.h>
- #include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
- #include "display/intel_bios.h"
- #include "display/intel_cdclk.h"
#include "display/intel_display.h"
- #include "display/intel_display_power.h"
- #include "display/intel_dmc.h"
- #include "display/intel_dpll_mgr.h"
- #include "display/intel_dsb.h"
- #include "display/intel_fbc.h"
- #include "display/intel_frontbuffer.h"
- #include "display/intel_global_state.h"
- #include "display/intel_gmbus.h"
- #include "display/intel_opregion.h"
+ #include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
-#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
- /* i915_gem.c */
- void i915_gem_init_early(struct drm_i915_private *dev_priv);
- void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-
- static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
- {
- /*
- * A single pass should suffice to release all the freed objects (along
- * most call paths) , but be a little more paranoid in that freeing
- * the objects does take a little amount of time, during which the rcu
- * callbacks could have added new objects into the freed list, and
- * armed the work again.
- */
- while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
- }
-
- static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
- {
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in
- * general we have workers that are armed by RCU and then rearm
- * themselves in their callbacks. To be paranoid, we need to
- * drain the workqueue a second time after waiting for the RCU
- * grace period so that we catch work queued via RCU from the first
- * pass. As neither drain_workqueue() nor flush_workqueue() report
- * a result, we make an assumption that we only don't require more
- * than 3 passes to catch all _recursive_ RCU delayed work.
- *
- */
- int pass = 3;
- do {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- } while (--pass);
- drain_workqueue(i915->wq);
- }
-
- struct i915_vma * __must_check
- i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
- struct i915_gem_ww_ctx *ww,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
- struct i915_vma * __must_check
- i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
- int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
- unsigned long flags);
- #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
- #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
- #define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
- #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
- #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
-
- void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-
- int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-
- int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
- void i915_gem_driver_register(struct drm_i915_private *i915);
- void i915_gem_driver_unregister(struct drm_i915_private *i915);
- void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
- void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-
- int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-
+#define HAS_BAR2_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \
+ GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
return err;
}
+ /*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+ void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+ {
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+ }
+
+ /*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+ void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+ {
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ }
+
+ drain_workqueue(i915->wq);
+ }
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */