From: Tvrtko Ursulin Date: Mon, 3 Oct 2022 16:04:02 +0000 (+0100) Subject: Merge drm/drm-next into drm-intel-gt-next X-Git-Tag: microblaze-v6.6~31^2~19^2~483 X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=97acb6a8fcc4e5c2cdc2693a35acdc5a7461aaa3;p=linux-2.6-microblaze.git Merge drm/drm-next into drm-intel-gt-next Daniele needs 84d4333c1e28 ("misc/mei: Add NULL check to component match callback functions") in order to merge the DG2 HuC patches. Signed-off-by: Tvrtko Ursulin --- 97acb6a8fcc4e5c2cdc2693a35acdc5a7461aaa3 diff --cc drivers/gpu/drm/i915/gem/i915_gem_stolen.c index f512316de396,acc561c0f0aa..910086974454 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@@ -876,8 -810,15 +878,11 @@@ i915_gem_stolen_lmem_setup(struct drm_i if (WARN_ON_ONCE(instance)) return ERR_PTR(-ENODEV); + if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) + return ERR_PTR(-ENXIO); + - /* Use DSM base address instead for stolen memory */ - dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); - if (IS_DG1(uncore->i915)) { + if (HAS_BAR2_SMEM_STOLEN(i915) || IS_DG1(i915)) { - lmem_size = pci_resource_len(pdev, 2); + lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); - if (WARN_ON(lmem_size < dsm_base)) - return ERR_PTR(-ENODEV); } else { resource_size_t lmem_range; @@@ -886,39 -827,13 +891,39 @@@ lmem_size *= SZ_1G; } - dsm_size = lmem_size - dsm_base; - if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { + if (HAS_BAR2_SMEM_STOLEN(i915)) { + /* + * MTL dsm size is in GGC register. + * Also MTL uses offset to DSMBASE in ptes, so i915 + * uses dsm_base = 0 to setup stolen region. + */ + ret = mtl_get_gms_size(uncore); + if (ret < 0) { + drm_err(&i915->drm, "invalid MTL GGC register setting\n"); + return ERR_PTR(ret); + } + + dsm_base = 0; + dsm_size = (resource_size_t)(ret * SZ_1M); + - GEM_BUG_ON(pci_resource_len(pdev, 2) != SZ_256M); ++ GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M); + GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size); + } else { + /* Use DSM base address instead for stolen memory */ + dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); + if (WARN_ON(lmem_size < dsm_base)) + return ERR_PTR(-ENODEV); + dsm_size = lmem_size - dsm_base; + } + + io_size = dsm_size; - if (pci_resource_len(pdev, 2) < dsm_size) { ++ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < dsm_size) { io_start = 0; io_size = 0; + } else if (HAS_BAR2_SMEM_STOLEN(i915)) { - io_start = pci_resource_start(pdev, 2) + SZ_8M; ++ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M; } else { - io_start = pci_resource_start(pdev, 2) + dsm_base; + io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; - io_size = dsm_size; } min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : diff --cc drivers/gpu/drm/i915/gt/intel_ggtt.c index 51669b4aae74,30cf5c3369d9..b31fe0fb013f --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@@ -929,8 -931,11 +931,11 @@@ static int gen8_gmch_probe(struct i915_ unsigned int size; u16 snb_gmch_ctl; - if (!HAS_LMEM(i915)) { + if (!HAS_LMEM(i915) && !HAS_BAR2_SMEM_STOLEN(i915)) { - ggtt->gmadr = pci_resource(pdev, 2); + if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + return -ENXIO; + + ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); } diff --cc drivers/gpu/drm/i915/i915_drv.h index ec3b2ebae25b,bdc81db76dbd..ca0609dc5fb0 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@@ -34,23 -34,13 +34,12 @@@ #include - #include #include - #include "display/intel_bios.h" - #include "display/intel_cdclk.h" #include "display/intel_display.h" - #include "display/intel_display_power.h" - #include "display/intel_dmc.h" - #include "display/intel_dpll_mgr.h" - #include "display/intel_dsb.h" - #include "display/intel_fbc.h" - #include "display/intel_frontbuffer.h" - #include "display/intel_global_state.h" - #include "display/intel_gmbus.h" - #include "display/intel_opregion.h" + #include "display/intel_display_core.h" #include "gem/i915_gem_context_types.h" -#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_shrinker.h" #include "gem/i915_gem_stolen.h" @@@ -1427,82 -976,6 +975,9 @@@ IS_SUBPLATFORM(const struct drm_i915_pr #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) +#define HAS_BAR2_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \ + GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) + - /* i915_gem.c */ - void i915_gem_init_early(struct drm_i915_private *dev_priv); - void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); - - static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) - { - /* - * A single pass should suffice to release all the freed objects (along - * most call paths) , but be a little more paranoid in that freeing - * the objects does take a little amount of time, during which the rcu - * callbacks could have added new objects into the freed list, and - * armed the work again. - */ - while (atomic_read(&i915->mm.free_count)) { - flush_work(&i915->mm.free_work); - flush_delayed_work(&i915->bdev.wq); - rcu_barrier(); - } - } - - static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) - { - /* - * Similar to objects above (see i915_gem_drain_freed-objects), in - * general we have workers that are armed by RCU and then rearm - * themselves in their callbacks. To be paranoid, we need to - * drain the workqueue a second time after waiting for the RCU - * grace period so that we catch work queued via RCU from the first - * pass. As neither drain_workqueue() nor flush_workqueue() report - * a result, we make an assumption that we only don't require more - * than 3 passes to catch all _recursive_ RCU delayed work. - * - */ - int pass = 3; - do { - flush_workqueue(i915->wq); - rcu_barrier(); - i915_gem_drain_freed_objects(i915); - } while (--pass); - drain_workqueue(i915->wq); - } - - struct i915_vma * __must_check - i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, - struct i915_gem_ww_ctx *ww, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - - struct i915_vma * __must_check - i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - - int i915_gem_object_unbind(struct drm_i915_gem_object *obj, - unsigned long flags); - #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) - #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) - #define I915_GEM_OBJECT_UNBIND_TEST BIT(2) - #define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) - #define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) - - void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); - - int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); - - int __must_check i915_gem_init(struct drm_i915_private *dev_priv); - void i915_gem_driver_register(struct drm_i915_private *i915); - void i915_gem_driver_unregister(struct drm_i915_private *i915); - void i915_gem_driver_remove(struct drm_i915_private *dev_priv); - void i915_gem_driver_release(struct drm_i915_private *dev_priv); - - int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); - /* intel_device_info.c */ static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) diff --cc drivers/gpu/drm/i915/i915_gem.c index b37daf9d4bd0,f18cc6270b2b..55d605c0c55d --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@@ -1089,10 -1089,45 +1089,47 @@@ out return err; } + /* + * A single pass should suffice to release all the freed objects (along most + * call paths), but be a little more paranoid in that freeing the objects does + * take a little amount of time, during which the rcu callbacks could have added + * new objects into the freed list, and armed the work again. + */ + void i915_gem_drain_freed_objects(struct drm_i915_private *i915) + { + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); + rcu_barrier(); + } + } + + /* + * Similar to objects above (see i915_gem_drain_freed-objects), in general we + * have workers that are armed by RCU and then rearm themselves in their + * callbacks. To be paranoid, we need to drain the workqueue a second time after + * waiting for the RCU grace period so that we catch work queued via RCU from + * the first pass. As neither drain_workqueue() nor flush_workqueue() report a + * result, we make an assumption that we only don't require more than 3 passes + * to catch all _recursive_ RCU delayed work. + */ + void i915_gem_drain_workqueue(struct drm_i915_private *i915) + { + int i; + + for (i = 0; i < 3; i++) { + flush_workqueue(i915->wq); + rcu_barrier(); + i915_gem_drain_freed_objects(i915); + } + + drain_workqueue(i915->wq); + } + int i915_gem_init(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */