Merge tag 'drm-misc-fixes-2020-08-04' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Tue, 11 Aug 2020 01:59:19 +0000 (11:59 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 11 Aug 2020 02:00:30 +0000 (12:00 +1000)
 * backmerge from drm-fixes at v5.8-rc7
 * add orientation quirk for ASUS T103HAF
 * drm/omap: force runtime PM suspend on system suspend
 * drm/tidss: fix modeset init for DPI panels
 * re-added docs for drm_gem_flink_ioctl()
 * ttm: fix page-offset calculation within TTM

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804125510.GA29670@linux-uq9g
1  2 
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/tidss/tidss_kms.c
drivers/gpu/drm/ttm/ttm_bo_vm.c

@@@ -235,7 -235,7 +235,7 @@@ drm_gem_object_handle_put_unlocked(stru
        mutex_unlock(&dev->object_name_lock);
  
        if (final)
 -              drm_gem_object_put_unlocked(obj);
 +              drm_gem_object_put(obj);
  }
  
  /*
@@@ -331,7 -331,7 +331,7 @@@ int drm_gem_dumb_map_offset(struct drm_
  
        *offset = drm_vma_node_offset_addr(&obj->vma_node);
  out:
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
  
        return ret;
  }
@@@ -548,10 -548,6 +548,10 @@@ static void drm_gem_check_release_pagev
   * set during initialization. If you have special zone constraints, set them
   * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
   * to keep pages in the required zone during swap-in.
 + *
 + * This function is only valid on objects initialized with
 + * drm_gem_object_init(), but not for those initialized with
 + * drm_gem_private_object_init() only.
   */
  struct page **drm_gem_get_pages(struct drm_gem_object *obj)
  {
        struct pagevec pvec;
        int i, npages;
  
 +
 +      if (WARN_ON(!obj->filp))
 +              return ERR_PTR(-EINVAL);
 +
        /* This is the shared memory object that backs the GEM resource */
        mapping = obj->filp->f_mapping;
  
@@@ -717,8 -709,6 +717,8 @@@ int drm_gem_objects_lookup(struct drm_f
        if (!objs)
                return -ENOMEM;
  
 +      *objs_out = objs;
 +
        handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
        if (!handles) {
                ret = -ENOMEM;
        }
  
        ret = objects_lookup(filp, handles, count, objs);
 -      *objs_out = objs;
 -
  out:
        kvfree(handles);
        return ret;
@@@ -793,7 -785,7 +793,7 @@@ long drm_gem_dma_resv_wait(struct drm_f
        else if (ret > 0)
                ret = 0;
  
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
  
        return ret;
  }
@@@ -868,7 -860,7 +868,7 @@@ drm_gem_flink_ioctl(struct drm_device *
  
  err:
        mutex_unlock(&dev->object_name_lock);
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
        return ret;
  }
  
   * @file_priv: drm file-private structure
   *
   * Open an object using the global name, returning a handle and the size.
+  *
+  * This handle (of course) holds a reference to the object, so the object
+  * will not go away until the handle is deleted.
   */
  int
  drm_gem_open_ioctl(struct drm_device *dev, void *data,
        args->size = obj->size;
  
  err:
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
        return ret;
  }
  
@@@ -971,6 -966,7 +974,6 @@@ EXPORT_SYMBOL(drm_gem_object_release)
   * @kref: kref of the object to free
   *
   * Called after the last reference to the object has been lost.
 - * Must be called holding &drm_device.struct_mutex.
   *
   * Frees the object
   */
@@@ -981,15 -977,50 +984,15 @@@ drm_gem_object_free(struct kref *kref
                container_of(kref, struct drm_gem_object, refcount);
        struct drm_device *dev = obj->dev;
  
 -      if (obj->funcs) {
 +      if (obj->funcs)
                obj->funcs->free(obj);
 -      } else if (dev->driver->gem_free_object_unlocked) {
 +      else if (dev->driver->gem_free_object_unlocked)
                dev->driver->gem_free_object_unlocked(obj);
 -      } else if (dev->driver->gem_free_object) {
 -              WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 -
 -              dev->driver->gem_free_object(obj);
 -      }
  }
  EXPORT_SYMBOL(drm_gem_object_free);
  
  /**
 - * drm_gem_object_put_unlocked - drop a GEM buffer object reference
 - * @obj: GEM buffer object
 - *
 - * This releases a reference to @obj. Callers must not hold the
 - * &drm_device.struct_mutex lock when calling this function.
 - *
 - * See also __drm_gem_object_put().
 - */
 -void
 -drm_gem_object_put_unlocked(struct drm_gem_object *obj)
 -{
 -      struct drm_device *dev;
 -
 -      if (!obj)
 -              return;
 -
 -      dev = obj->dev;
 -
 -      if (dev->driver->gem_free_object) {
 -              might_lock(&dev->struct_mutex);
 -              if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
 -                              &dev->struct_mutex))
 -                      mutex_unlock(&dev->struct_mutex);
 -      } else {
 -              kref_put(&obj->refcount, drm_gem_object_free);
 -      }
 -}
 -EXPORT_SYMBOL(drm_gem_object_put_unlocked);
 -
 -/**
 - * drm_gem_object_put - release a GEM buffer object reference
 + * drm_gem_object_put_locked - release a GEM buffer object reference
   * @obj: GEM buffer object
   *
   * This releases a reference to @obj. Callers must hold the
   * driver doesn't use &drm_device.struct_mutex for anything.
   *
   * For drivers not encumbered with legacy locking use
 - * drm_gem_object_put_unlocked() instead.
 + * drm_gem_object_put() instead.
   */
  void
 -drm_gem_object_put(struct drm_gem_object *obj)
 +drm_gem_object_put_locked(struct drm_gem_object *obj)
  {
        if (obj) {
                WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
                kref_put(&obj->refcount, drm_gem_object_free);
        }
  }
 -EXPORT_SYMBOL(drm_gem_object_put);
 +EXPORT_SYMBOL(drm_gem_object_put_locked);
  
  /**
   * drm_gem_vm_open - vma->ops->open implementation for GEM
@@@ -1036,7 -1067,7 +1039,7 @@@ void drm_gem_vm_close(struct vm_area_st
  {
        struct drm_gem_object *obj = vma->vm_private_data;
  
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
  }
  EXPORT_SYMBOL(drm_gem_vm_close);
  
@@@ -1085,7 -1116,7 +1088,7 @@@ int drm_gem_mmap_obj(struct drm_gem_obj
        if (obj->funcs && obj->funcs->mmap) {
                ret = obj->funcs->mmap(obj, vma);
                if (ret) {
 -                      drm_gem_object_put_unlocked(obj);
 +                      drm_gem_object_put(obj);
                        return ret;
                }
                WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
                else if (dev->driver->gem_vm_ops)
                        vma->vm_ops = dev->driver->gem_vm_ops;
                else {
 -                      drm_gem_object_put_unlocked(obj);
 +                      drm_gem_object_put(obj);
                        return -EINVAL;
                }
  
@@@ -1161,13 -1192,13 +1164,13 @@@ int drm_gem_mmap(struct file *filp, str
                return -EINVAL;
  
        if (!drm_vma_node_is_allowed(node, priv)) {
 -              drm_gem_object_put_unlocked(obj);
 +              drm_gem_object_put(obj);
                return -EACCES;
        }
  
        if (node->readonly) {
                if (vma->vm_flags & VM_WRITE) {
 -                      drm_gem_object_put_unlocked(obj);
 +                      drm_gem_object_put(obj);
                        return -EINVAL;
                }
  
        ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
                               vma);
  
 -      drm_gem_object_put_unlocked(obj);
 +      drm_gem_object_put(obj);
  
        return ret;
  }
@@@ -1197,6 -1228,8 +1200,6 @@@ void drm_gem_print_info(struct drm_prin
  
        if (obj->funcs && obj->funcs->print_info)
                obj->funcs->print_info(p, indent, obj);
 -      else if (obj->dev->driver->gem_print_info)
 -              obj->dev->driver->gem_print_info(p, indent, obj);
  }
  
  int drm_gem_pin(struct drm_gem_object *obj)
@@@ -548,7 -548,6 +548,7 @@@ static int venc_bridge_attach(struct dr
  
  static enum drm_mode_status
  venc_bridge_mode_valid(struct drm_bridge *bridge,
 +                     const struct drm_display_info *info,
                       const struct drm_display_mode *mode)
  {
        switch (venc_get_videomode(mode)) {
@@@ -903,6 -902,7 +903,7 @@@ static int venc_runtime_resume(struct d
  static const struct dev_pm_ops venc_pm_ops = {
        .runtime_suspend = venc_runtime_suspend,
        .runtime_resume = venc_runtime_resume,
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
  };
  
  static const struct of_device_id venc_of_match[] = {
@@@ -1,6 -1,6 +1,6 @@@
  // SPDX-License-Identifier: GPL-2.0
  /*
 - * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
 + * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
   * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
   */
  
@@@ -154,7 -154,7 +154,7 @@@ static int tidss_dispc_modeset_init(str
                                break;
                        case DISPC_VP_DPI:
                                enc_type = DRM_MODE_ENCODER_DPI;
-                               conn_type = DRM_MODE_CONNECTOR_LVDS;
+                               conn_type = DRM_MODE_CONNECTOR_DPI;
                                break;
                        default:
                                WARN_ON(1);
  int tidss_modeset_init(struct tidss_device *tidss)
  {
        struct drm_device *ddev = &tidss->ddev;
 -      unsigned int i;
        int ret;
  
        dev_dbg(tidss->dev, "%s\n", __func__);
        if (ret)
                return ret;
  
 -      /* Start with vertical blanking interrupt reporting disabled. */
 -      for (i = 0; i < tidss->num_crtcs; ++i)
 -              drm_crtc_vblank_reset(tidss->crtcs[i]);
 -
        drm_mode_config_reset(ddev);
  
        dev_dbg(tidss->dev, "%s done\n", __func__);
@@@ -351,11 -351,6 +351,11 @@@ vm_fault_t ttm_bo_vm_fault_reserved(str
  
                };
  
 +              if (ttm_tt_create(bo, true)) {
 +                      ret = VM_FAULT_OOM;
 +                      goto out_io_unlock;
 +              }
 +
                ttm = bo->ttm;
                if (ttm_tt_populate(bo->ttm, &ctx)) {
                        ret = VM_FAULT_OOM;
@@@ -510,8 -505,10 +510,10 @@@ static int ttm_bo_vm_access_kmap(struc
  int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
                     void *buf, int len, int write)
  {
-       unsigned long offset = (addr) - vma->vm_start;
        struct ttm_buffer_object *bo = vma->vm_private_data;
+       unsigned long offset = (addr) - vma->vm_start +
+               ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+                << PAGE_SHIFT);
        int ret;
  
        if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)