Merge tag 'drm-msm-next-2021-06-23b' of https://gitlab.freedesktop.org/drm/msm into...
authorDave Airlie <airlied@redhat.com>
Wed, 23 Jun 2021 21:15:17 +0000 (07:15 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 23 Jun 2021 21:21:16 +0000 (07:21 +1000)
* devcoredump support for display errors
* dpu: irq cleanup/refactor
* dpu: dt bindings conversion to yaml
* dsi: dt bindings conversion to yaml
* mdp5: alpha/blend_mode/zpos support
* a6xx: cached coherent buffer support
* a660 support
* gpu iova fault improvements:
   - info about which block triggered the fault, etc
   - generation of gpu devcoredump on fault
* assortment of other cleanups and fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGs4=qsGBBbyn-4JWqW4-YUSTKh67X3DsPQ=T2D9aXKqNA@mail.gmail.com
1  2 
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_uapi.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/msm_gem.c

@@@ -1,6 -1,7 +1,7 @@@
  /*
   * Copyright (C) 2014 Red Hat
   * Copyright (C) 2014 Intel Corp.
+  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
   *
   * Permission is hereby granted, free of charge, to any person obtaining a
   * copy of this software and associated documentation files (the "Software"),
@@@ -385,8 -386,7 +386,8 @@@ static int drm_atomic_crtc_check(const 
  
        /* The state->enable vs. state->mode_blob checks can be WARN_ON,
         * as this is a kernel-internal detail that userspace should never
 -       * be able to trigger. */
 +       * be able to trigger.
 +       */
        if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
            WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
                DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
@@@ -1303,8 -1303,8 +1304,8 @@@ int drm_atomic_check_only(struct drm_at
        struct drm_crtc_state *new_crtc_state;
        struct drm_connector *conn;
        struct drm_connector_state *conn_state;
 -      unsigned requested_crtc = 0;
 -      unsigned affected_crtc = 0;
 +      unsigned int requested_crtc = 0;
 +      unsigned int affected_crtc = 0;
        int i, ret = 0;
  
        DRM_DEBUG_ATOMIC("checking %p\n", state);
@@@ -1609,9 -1609,20 +1610,20 @@@ commit
  }
  EXPORT_SYMBOL(__drm_atomic_helper_set_config);
  
- void drm_atomic_print_state(const struct drm_atomic_state *state)
+ /**
+  * drm_atomic_print_new_state - prints drm atomic state
+  * @state: atomic configuration to check
+  * @p: drm printer
+  *
+  * This functions prints the drm atomic state snapshot using the drm printer
+  * which is passed to it. This snapshot can be used for debugging purposes.
+  *
+  * Note that this function looks into the new state objects and hence its not
+  * safe to be used after the call to drm_atomic_helper_commit_hw_done().
+  */
+ void drm_atomic_print_new_state(const struct drm_atomic_state *state,
+               struct drm_printer *p)
  {
-       struct drm_printer p = drm_info_printer(state->dev->dev);
        struct drm_plane *plane;
        struct drm_plane_state *plane_state;
        struct drm_crtc *crtc;
        struct drm_connector_state *connector_state;
        int i;
  
+       if (!p) {
+               DRM_ERROR("invalid drm printer\n");
+               return;
+       }
        DRM_DEBUG_ATOMIC("checking %p\n", state);
  
        for_each_new_plane_in_state(state, plane, plane_state, i)
-               drm_atomic_plane_print_state(&p, plane_state);
+               drm_atomic_plane_print_state(p, plane_state);
  
        for_each_new_crtc_in_state(state, crtc, crtc_state, i)
-               drm_atomic_crtc_print_state(&p, crtc_state);
+               drm_atomic_crtc_print_state(p, crtc_state);
  
        for_each_new_connector_in_state(state, connector, connector_state, i)
-               drm_atomic_connector_print_state(&p, connector_state);
+               drm_atomic_connector_print_state(p, connector_state);
  }
+ EXPORT_SYMBOL(drm_atomic_print_new_state);
  
  static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
                             bool take_locks)
@@@ -2,6 -2,7 +2,7 @@@
   * Copyright (C) 2014 Red Hat
   * Copyright (C) 2014 Intel Corp.
   * Copyright (C) 2018 Intel Corp.
+  * Copyright (c) 2020, The Linux Foundation. All rights reserved.
   *
   * Permission is hereby granted, free of charge, to any person obtaining a
   * copy of this software and associated documentation files (the "Software"),
@@@ -78,8 -79,8 +79,8 @@@ int drm_atomic_set_mode_for_crtc(struc
                drm_mode_convert_to_umode(&umode, mode);
                state->mode_blob =
                        drm_property_create_blob(state->crtc->dev,
 -                                               sizeof(umode),
 -                                               &umode);
 +                                               sizeof(umode),
 +                                               &umode);
                if (IS_ERR(state->mode_blob))
                        return PTR_ERR(state->mode_blob);
  
@@@ -114,7 -115,7 +115,7 @@@ EXPORT_SYMBOL(drm_atomic_set_mode_for_c
   * Zero on success, error code on failure. Cannot return -EDEADLK.
   */
  int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
 -                                      struct drm_property_blob *blob)
 +                                    struct drm_property_blob *blob)
  {
        struct drm_crtc *crtc = state->crtc;
  
@@@ -1321,6 -1322,7 +1322,7 @@@ int drm_mode_atomic_ioctl(struct drm_de
        struct drm_out_fence_state *fence_state;
        int ret = 0;
        unsigned int i, j, num_fences;
+       struct drm_printer p = drm_info_printer(dev->dev);
  
        /* disallow for drivers not supporting atomic: */
        if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@@ -1453,7 -1455,7 +1455,7 @@@ retry
                ret = drm_atomic_nonblocking_commit(state);
        } else {
                if (drm_debug_enabled(DRM_UT_STATE))
-                       drm_atomic_print_state(state);
+                       drm_atomic_print_new_state(state, &p);
  
                ret = drm_atomic_commit(state);
        }
@@@ -19,6 -19,7 +19,7 @@@
  #include "msm_drv.h"
  #include "msm_mmu.h"
  #include "msm_gem.h"
+ #include "disp/msm_disp_snapshot.h"
  
  #include "dpu_kms.h"
  #include "dpu_core_irq.h"
@@@ -798,6 -799,51 +799,51 @@@ static void dpu_irq_uninstall(struct ms
        dpu_core_irq_uninstall(dpu_kms);
  }
  
+ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
+ {
+       int i;
+       struct dpu_kms *dpu_kms;
+       struct dpu_mdss_cfg *cat;
+       struct dpu_hw_mdp *top;
+       dpu_kms = to_dpu_kms(kms);
+       cat = dpu_kms->catalog;
+       top = dpu_kms->hw_mdp;
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       /* dump CTL sub-blocks HW regs info */
+       for (i = 0; i < cat->ctl_count; i++)
+               msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
+                               dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
+       /* dump DSPP sub-blocks HW regs info */
+       for (i = 0; i < cat->dspp_count; i++)
+               msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
+                               dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
+       /* dump INTF sub-blocks HW regs info */
+       for (i = 0; i < cat->intf_count; i++)
+               msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
+                               dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
+       /* dump PP sub-blocks HW regs info */
+       for (i = 0; i < cat->pingpong_count; i++)
+               msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
+                               dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
+       /* dump SSPP sub-blocks HW regs info */
+       for (i = 0; i < cat->sspp_count; i++)
+               msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
+                               dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+       msm_disp_snapshot_add_block(disp_state, top->hw.length,
+                       dpu_kms->mmio + top->hw.blk_off, "top");
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
  static const struct msm_kms_funcs kms_funcs = {
        .hw_init         = dpu_kms_hw_init,
        .irq_preinstall  = dpu_irq_preinstall,
        .round_pixclk    = dpu_kms_round_pixclk,
        .destroy         = dpu_kms_destroy,
        .set_encoder_mode = _dpu_kms_set_encoder_mode,
+       .snapshot        = dpu_kms_mdp_snapshot,
  #ifdef CONFIG_DEBUG_FS
        .debugfs_init    = dpu_kms_debugfs_init,
  #endif
@@@ -1020,6 -1067,11 +1067,6 @@@ static int dpu_kms_hw_init(struct msm_k
                        dpu_kms->catalog->caps->max_mixer_width * 2;
        dev->mode_config.max_height = 4096;
  
 -      /*
 -       * Support format modifiers for compression etc.
 -       */
 -      dev->mode_config.allow_fb_modifiers = true;
 -
        dev->max_vblank_count = 0xffffffff;
        /* Disable vblank irqs aggressively for power-saving */
        dev->vblank_disable_immediate = true;
@@@ -1089,21 -1141,21 +1136,21 @@@ static int dpu_bind(struct device *dev
        if (!dpu_kms)
                return -ENOMEM;
  
-       dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
-       if (IS_ERR(dpu_kms->opp_table))
-               return PTR_ERR(dpu_kms->opp_table);
+       ret = devm_pm_opp_set_clkname(dev, "core");
+       if (ret)
+               return ret;
        /* OPP table is optional */
-       ret = dev_pm_opp_of_add_table(dev);
+       ret = devm_pm_opp_of_add_table(dev);
        if (ret && ret != -ENODEV) {
                dev_err(dev, "invalid OPP table in device tree\n");
-               goto put_clkname;
+               return ret;
        }
  
        mp = &dpu_kms->mp;
        ret = msm_dss_parse_clock(pdev, mp);
        if (ret) {
                DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
-               goto err;
+               return ret;
        }
  
        platform_set_drvdata(pdev, dpu_kms);
        ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
        if (ret) {
                DPU_ERROR("failed to init kms, ret=%d\n", ret);
-               goto err;
+               return ret;
        }
        dpu_kms->dev = ddev;
        dpu_kms->pdev = pdev;
        dpu_kms->rpm_enabled = true;
  
        priv->kms = &dpu_kms->base;
-       return ret;
- err:
-       dev_pm_opp_of_remove_table(dev);
- put_clkname:
-       dev_pm_opp_put_clkname(dpu_kms->opp_table);
        return ret;
  }
  
@@@ -1140,9 -1188,6 +1183,6 @@@ static void dpu_unbind(struct device *d
  
        if (dpu_kms->rpm_enabled)
                pm_runtime_disable(&pdev->dev);
-       dev_pm_opp_of_remove_table(dev);
-       dev_pm_opp_put_clkname(dpu_kms->opp_table);
  }
  
  static const struct component_ops dpu_ops = {
@@@ -77,8 -77,6 +77,6 @@@ struct dp_ctrl_private 
        struct dp_parser *parser;
        struct dp_catalog *catalog;
  
-       struct opp_table *opp_table;
        struct completion idle_comp;
        struct completion video_comp;
  };
@@@ -1103,7 -1101,7 +1101,7 @@@ static int dp_ctrl_link_train_1(struct 
        tries = 0;
        old_v_level = ctrl->link->phy_params.v_level;
        for (tries = 0; tries < maximum_retries; tries++) {
 -              drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
 +              drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
  
                ret = dp_ctrl_read_link_status(ctrl, link_status);
                if (ret)
@@@ -1184,7 -1182,7 +1182,7 @@@ static int dp_ctrl_link_lane_down_shift
  static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
  {
        dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
 -      drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
 +      drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
  }
  
  static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
        dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
  
        for (tries = 0; tries <= maximum_retries; tries++) {
 -              drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
 +              drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
  
                ret = dp_ctrl_read_link_status(ctrl, link_status);
                if (ret)
        return ret;
  }
  
+ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+ {
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+       int ret;
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+       /* set dongle to D3 (power off) mode */
+       dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+       if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) {
+               ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+               if (ret) {
+                       DRM_ERROR("Failed to disable pclk. ret=%d\n", ret);
+                       return ret;
+               }
+       }
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+               return ret;
+       }
+       phy_power_off(phy);
+       /* aux channel down, reinit phy */
+       phy_exit(phy);
+       phy_init(phy);
+       DRM_DEBUG_DP("DP off link/stream done\n");
+       return ret;
+ }
+ void dp_ctrl_off_phy(struct dp_ctrl *dp_ctrl)
+ {
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+       dp_catalog_ctrl_reset(ctrl->catalog);
+       phy_exit(phy);
+       DRM_DEBUG_DP("DP off phy done\n");
+ }
  int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
  {
        struct dp_ctrl_private *ctrl;
@@@ -1886,20 -1941,17 +1941,17 @@@ struct dp_ctrl *dp_ctrl_get(struct devi
                return ERR_PTR(-ENOMEM);
        }
  
-       ctrl->opp_table = dev_pm_opp_set_clkname(dev, "ctrl_link");
-       if (IS_ERR(ctrl->opp_table)) {
+       ret = devm_pm_opp_set_clkname(dev, "ctrl_link");
+       if (ret) {
                dev_err(dev, "invalid DP OPP table in device tree\n");
-               /* caller do PTR_ERR(ctrl->opp_table) */
-               return (struct dp_ctrl *)ctrl->opp_table;
+               /* caller do PTR_ERR(opp_table) */
+               return (struct dp_ctrl *)ERR_PTR(ret);
        }
  
        /* OPP table is optional */
-       ret = dev_pm_opp_of_add_table(dev);
-       if (ret) {
+       ret = devm_pm_opp_of_add_table(dev);
+       if (ret)
                dev_err(dev, "failed to add DP OPP table\n");
-               dev_pm_opp_put_clkname(ctrl->opp_table);
-               ctrl->opp_table = NULL;
-       }
  
        init_completion(&ctrl->idle_comp);
        init_completion(&ctrl->video_comp);
  
        return &ctrl->dp_ctrl;
  }
- void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
- {
-       struct dp_ctrl_private *ctrl;
-       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-       if (ctrl->opp_table) {
-               dev_pm_opp_of_remove_table(ctrl->dev);
-               dev_pm_opp_put_clkname(ctrl->opp_table);
-               ctrl->opp_table = NULL;
-       }
- }
@@@ -211,6 -211,13 +211,13 @@@ void msm_gem_put_pages(struct drm_gem_o
        msm_gem_unlock(obj);
  }
  
+ static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+ {
+       if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+               return pgprot_writecombine(prot);
+       return prot;
+ }
  int msm_gem_mmap_obj(struct drm_gem_object *obj,
                struct vm_area_struct *vma)
  {
  
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_flags |= VM_MIXEDMAP;
-       if (msm_obj->flags & MSM_BO_WC) {
-               vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-       } else if (msm_obj->flags & MSM_BO_UNCACHED) {
-               vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-       } else {
-               /*
-                * Shunt off cached objs to shmem file so they have their own
-                * address_space (so unmap_mapping_range does what we want,
-                * in particular in the case of mmap'd dmabufs)
-                */
-               vma->vm_pgoff = 0;
-               vma_set_file(vma, obj->filp);
-               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       }
+       vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
  
        return 0;
  }
@@@ -372,7 -364,7 +364,7 @@@ static void del_vma(struct msm_gem_vma 
        kfree(vma);
  }
  
- /**
+ /*
   * If close is true, this also closes the VMA (releasing the allocated
   * iova range) in addition to removing the iommu mapping.  In the eviction
   * case (!close), we keep the iova allocated, but only remove the iommu
@@@ -451,6 -443,9 +443,9 @@@ static int msm_gem_pin_iova(struct drm_
        if (msm_obj->flags & MSM_BO_MAP_PRIV)
                prot |= IOMMU_PRIV;
  
+       if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+               prot |= IOMMU_CACHE;
        GEM_WARN_ON(!msm_gem_is_locked(obj));
  
        if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@@ -653,7 -648,7 +648,7 @@@ static void *get_vaddr(struct drm_gem_o
                        goto fail;
                }
                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+                               VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
                if (msm_obj->vaddr == NULL) {
                        ret = -ENOMEM;
                        goto fail;
@@@ -773,7 -768,7 +768,7 @@@ void msm_gem_purge(struct drm_gem_objec
                        0, (loff_t)-1);
  }
  
- /**
+ /*
   * Unpin the backing pages and make them available to be swapped out.
   */
  void msm_gem_evict(struct drm_gem_object *obj)
@@@ -817,9 -812,9 +812,9 @@@ int msm_gem_sync_object(struct drm_gem_
        struct dma_fence *fence;
        int i, ret;
  
 -      fobj = dma_resv_get_list(obj->resv);
 +      fobj = dma_resv_shared_list(obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
 -              fence = dma_resv_get_excl(obj->resv);
 +              fence = dma_resv_excl_fence(obj->resv);
                /* don't need to wait on our own fences, since ring is fifo */
                if (fence && (fence->context != fctx->context)) {
                        ret = dma_fence_wait(fence, true);
@@@ -915,7 -910,8 +910,7 @@@ int msm_gem_cpu_prep(struct drm_gem_obj
                op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
        long ret;
  
 -      ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 -                                                true,  remain);
 +      ret = dma_resv_wait_timeout(obj->resv, write, true,  remain);
        if (ret == 0)
                return remain == 0 ? -EBUSY : -ETIMEDOUT;
        else if (ret < 0)
@@@ -1024,7 -1020,7 +1019,7 @@@ void msm_gem_describe(struct drm_gem_ob
        }
  
        rcu_read_lock();
 -      fobj = rcu_dereference(robj->fence);
 +      fobj = dma_resv_shared_list(robj);
        if (fobj) {
                unsigned int i, shared_count = fobj->shared_count;
  
                }
        }
  
 -      fence = rcu_dereference(robj->fence_excl);
 +      fence = dma_resv_excl_fence(robj);
        if (fence)
                describe_fence(fence, "Exclusive", m);
        rcu_read_unlock();
@@@ -1163,6 -1159,7 +1158,7 @@@ static int msm_gem_new_impl(struct drm_
                uint32_t size, uint32_t flags,
                struct drm_gem_object **obj)
  {
+       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj;
  
        switch (flags & MSM_BO_CACHE_MASK) {
        case MSM_BO_CACHED:
        case MSM_BO_WC:
                break;
+       case MSM_BO_CACHED_COHERENT:
+               if (priv->has_cached_coherent)
+                       break;
+               /* fallthrough */
        default:
                DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));