Merge tag 'drm-misc-next-2021-06-09' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Thu, 10 Jun 2021 01:03:50 +0000 (11:03 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 10 Jun 2021 01:28:09 +0000 (11:28 +1000)
drm-misc-next for 5.14:

UAPI Changes:

 * drm/panfrost: Export AFBC_FEATURES register to userspace

Cross-subsystem Changes:

 * dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes
   in callers; Cleanups

Core Changes:

 * Add prefetching memcpy for WC

 * Avoid circular dependency on CONFIG_FB

 * Cleanups

 * Documentation fixes throughout DRM

 * ttm: Make struct ttm_resource the base of all managers + changes
   in all users of TTM; Add a generic memcpy for page-based iomem; Remove
   use of VM_MIXEDMAP; Cleanups

Driver Changes:

 * drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings

 * drm/hyperv: Add DRM driver for HyperV graphics output

 * drm/msm: Fix module dependencies

 * drm/panel: KD53T133: Support rotation

 * drm/pl111: Fix module dependencies

 * drm/qxl: Fixes

 * drm/stm: Cleanups

 * drm/sun4i: Be explicit about format modifiers

 * drm/vc4: Use struct gpio_desc; Cleanups

 * drm/vgem: Cleanups

 * drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy

 * fbdev/mach64: Cleanups

 * fbdev/mb862xx: Use DEVICE_ATTR_RO

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YMBw3DF2b9udByfT@linux-uq9g
15 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/radeon/radeon_cs.c

@@@ -221,10 -232,9 +233,10 @@@ int amdgpu_gtt_mgr_recover(struct ttm_r
        struct drm_mm_node *mm_node;
        int r = 0;
  
 +      adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
-               node = container_of(mm_node, struct amdgpu_gtt_node, node);
+               node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
                r = amdgpu_ttm_recover_gart(node->tbo);
                if (r)
                        break;
index d607f31,0000000..f6aff7c
mode 100644,000000..100644
--- /dev/null
@@@ -1,190 -1,0 +1,195 @@@
-                                 struct ttm_resource *mem)
 +// SPDX-License-Identifier: GPL-2.0 OR MIT
 +/*
 + * Copyright 2016-2021 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: Christian König, Felix Kuehling
 + */
 +
 +#include "amdgpu.h"
 +
 +static inline struct amdgpu_preempt_mgr *
 +to_preempt_mgr(struct ttm_resource_manager *man)
 +{
 +      return container_of(man, struct amdgpu_preempt_mgr, manager);
 +}
 +
 +/**
 + * DOC: mem_info_preempt_used
 + *
 + * The amdgpu driver provides a sysfs API for reporting current total amount of
 + * used preemptible memory.
 + * The file mem_info_preempt_used is used for this, and returns the current
 + * used size of the preemptible block, in bytes
 + */
 +static ssize_t mem_info_preempt_used_show(struct device *dev,
 +                                        struct device_attribute *attr,
 +                                        char *buf)
 +{
 +      struct drm_device *ddev = dev_get_drvdata(dev);
 +      struct amdgpu_device *adev = drm_to_adev(ddev);
 +      struct ttm_resource_manager *man;
 +
 +      man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
 +      return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
 +}
 +
 +static DEVICE_ATTR_RO(mem_info_preempt_used);
 +
 +/**
 + * amdgpu_preempt_mgr_new - allocate a new node
 + *
 + * @man: TTM memory type manager
 + * @tbo: TTM BO we need this range for
 + * @place: placement flags and restrictions
 + * @mem: the resulting mem object
 + *
 + * Dummy, just count the space used without allocating resources or any limit.
 + */
 +static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
 +                                struct ttm_buffer_object *tbo,
 +                                const struct ttm_place *place,
-       atomic64_add(mem->num_pages, &mgr->used);
++                                struct ttm_resource **res)
 +{
 +      struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 +
-       mem->mm_node = NULL;
-       mem->start = AMDGPU_BO_INVALID_OFFSET;
++      *res = kzalloc(sizeof(**res), GFP_KERNEL);
++      if (*res)
++              return -ENOMEM;
 +
-                                  struct ttm_resource *mem)
++      ttm_resource_init(tbo, place, *res);
++      (*res)->start = AMDGPU_BO_INVALID_OFFSET;
++
++      atomic64_add((*res)->num_pages, &mgr->used);
 +      return 0;
 +}
 +
 +/**
 + * amdgpu_preempt_mgr_del - free ranges
 + *
 + * @man: TTM memory type manager
 + * @mem: TTM memory object
 + *
 + * Free the allocated GTT again.
 + */
 +static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
-       atomic64_sub(mem->num_pages, &mgr->used);
++                                 struct ttm_resource *res)
 +{
 +      struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 +
++      atomic64_sub(res->num_pages, &mgr->used);
++      kfree(res);
 +}
 +
 +/**
 + * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
 + *
 + * @man: TTM memory type manager
 + *
 + * Return how many bytes are used in the GTT domain
 + */
 +uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
 +{
 +      struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 +      s64 result = atomic64_read(&mgr->used);
 +
 +      return (result > 0 ? result : 0) * PAGE_SIZE;
 +}
 +
 +/**
 + * amdgpu_preempt_mgr_debug - dump VRAM table
 + *
 + * @man: TTM memory type manager
 + * @printer: DRM printer to use
 + *
 + * Dump the table content using printk.
 + */
 +static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
 +                                   struct drm_printer *printer)
 +{
 +      struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
 +
 +      drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
 +                 man->size, (u64)atomic64_read(&mgr->used));
 +}
 +
 +static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
 +      .alloc = amdgpu_preempt_mgr_new,
 +      .free = amdgpu_preempt_mgr_del,
 +      .debug = amdgpu_preempt_mgr_debug
 +};
 +
 +/**
 + * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Allocate and initialize the GTT manager.
 + */
 +int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
 +      struct ttm_resource_manager *man = &mgr->manager;
 +      int ret;
 +
 +      man->use_tt = true;
 +      man->func = &amdgpu_preempt_mgr_func;
 +
 +      ttm_resource_manager_init(man, (1 << 30));
 +
 +      atomic64_set(&mgr->used, 0);
 +
 +      ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
 +      if (ret) {
 +              DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
 +              return ret;
 +      }
 +
 +      ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
 +                             &mgr->manager);
 +      ttm_resource_manager_set_used(man, true);
 +      return 0;
 +}
 +
 +/**
 + * amdgpu_preempt_mgr_fini - free and destroy GTT manager
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Destroy and free the GTT manager, returns -EBUSY if ranges are still
 + * allocated inside it.
 + */
 +void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
 +      struct ttm_resource_manager *man = &mgr->manager;
 +      int ret;
 +
 +      ttm_resource_manager_set_used(man, false);
 +
 +      ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
 +      if (ret)
 +              return;
 +
 +      device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
 +
 +      ttm_resource_manager_cleanup(man);
 +      ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
 +}
@@@ -460,11 -460,10 +462,11 @@@ static int amdgpu_bo_move(struct ttm_bu
  {
        struct amdgpu_device *adev;
        struct amdgpu_bo *abo;
-       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = bo->resource;
        int r;
  
 -      if (new_mem->mem_type == TTM_PL_TT) {
 +      if (new_mem->mem_type == TTM_PL_TT ||
 +          new_mem->mem_type == AMDGPU_PL_PREEMPT) {
                r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
                if (r)
                        return r;
@@@ -965,39 -962,38 +968,39 @@@ int amdgpu_ttm_alloc_gart(struct ttm_bu
  
        addr = amdgpu_gmc_agp_addr(bo);
        if (addr != AMDGPU_BO_INVALID_OFFSET) {
-               bo->mem.start = addr >> PAGE_SHIFT;
-       } else {
+               bo->resource->start = addr >> PAGE_SHIFT;
+               return 0;
+       }
  
-               /* allocate GART space */
-               placement.num_placement = 1;
-               placement.placement = &placements;
-               placement.num_busy_placement = 1;
-               placement.busy_placement = &placements;
-               placements.fpfn = 0;
-               placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
-               placements.mem_type = TTM_PL_TT;
-               placements.flags = bo->mem.placement;
-               r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
-               if (unlikely(r))
-                       return r;
+       /* allocate GART space */
+       placement.num_placement = 1;
+       placement.placement = &placements;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &placements;
+       placements.fpfn = 0;
+       placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+       placements.mem_type = TTM_PL_TT;
+       placements.flags = bo->resource->placement;
+       r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+       if (unlikely(r))
+               return r;
  
-               /* compute PTE flags for this buffer object */
-               flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+       /* compute PTE flags for this buffer object */
+       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
  
-               /* Bind pages */
-               gtt->offset = (u64)tmp.start << PAGE_SHIFT;
-               r = amdgpu_ttm_gart_bind(adev, bo, flags);
-               if (unlikely(r)) {
-                       ttm_resource_free(bo, &tmp);
-                       return r;
-               }
-               amdgpu_gart_invalidate_tlb(adev);
-               ttm_resource_free(bo, &bo->mem);
-               bo->mem = tmp;
+       /* Bind pages */
+       gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+       r = amdgpu_ttm_gart_bind(adev, bo, flags);
+       if (unlikely(r)) {
+               ttm_resource_free(bo, &tmp);
+               return r;
        }
  
++      amdgpu_gart_invalidate_tlb(adev);
+       ttm_resource_free(bo, &bo->resource);
+       ttm_bo_assign_mem(bo, tmp);
        return 0;
  }
  
@@@ -1354,16 -1353,7 +1361,16 @@@ static bool amdgpu_ttm_bo_eviction_valu
                }
        }
  
-       switch (bo->mem.mem_type) {
+       switch (bo->resource->mem_type) {
 +      case AMDGPU_PL_PREEMPT:
 +              /* Preemptible BOs don't own system resources managed by the
 +               * driver (pages, VRAM, GART space). They point to resources
 +               * owned by someone else (e.g. pageable memory in user mode
 +               * or a DMABuf). They are used in a preemptible context so we
 +               * can guarantee no deadlocks and good QoS in case of MMU
 +               * notifiers or DMABuf move notifiers from the resource owner.
 +               */
 +              return false;
        case TTM_PL_TT:
                if (amdgpu_bo_is_amdgpu_bo(bo) &&
                    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@@ -1942,12 -1924,7 +1949,12 @@@ int amdgpu_fill_buffer(struct amdgpu_b
                return -EINVAL;
        }
  
-       if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
++      if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
 +              DRM_ERROR("Trying to clear preemptible memory.\n");
 +              return -EINVAL;
 +      }
 +
-       if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+       if (bo->tbo.resource->mem_type == TTM_PL_TT) {
                r = amdgpu_ttm_alloc_gart(&bo->tbo);
                if (r)
                        return r;
@@@ -658,10 -657,11 +658,11 @@@ void amdgpu_vm_move_to_lru_tail(struct 
                if (!bo->parent)
                        continue;
  
-               ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
+               ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
                                        &vm->lru_bulk_move);
 -              if (bo->shadow)
 -                      ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
 -                                              bo->shadow->tbo.resource,
 +              if (shadow)
-                       ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
++                      ttm_bo_move_to_lru_tail(&shadow->tbo,
++                                              shadow->tbo.resource,
                                                &vm->lru_bulk_move);
        }
        spin_unlock(&adev->mman.bdev.lru_lock);
@@@ -1858,12 -1818,11 +1859,12 @@@ int amdgpu_vm_bo_update(struct amdgpu_d
                        struct drm_gem_object *gobj = dma_buf->priv;
                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
  
-                       if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+                       if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
                                bo = gem_to_amdgpu_bo(gobj);
                }
-               mem = &bo->tbo.mem;
+               mem = bo->tbo.resource;
 -              if (mem->mem_type == TTM_PL_TT)
 +              if (mem->mem_type == TTM_PL_TT ||
 +                  mem->mem_type == AMDGPU_PL_PREEMPT)
                        pages_addr = bo->tbo.ttm->dma_address;
        }
  
Simple merge
Simple merge