#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_inactive(struct msm_gem_object *msm_obj);
+static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->pages;
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- msm_gem_lock(obj);
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
-
if (!IS_ERR(p)) {
msm_obj->pin_count++;
- update_inactive(msm_obj);
+ update_lru(obj);
}
+ return p;
+}
+
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+{
+ struct page **p;
+
+ msm_gem_lock(obj);
+ p = msm_gem_pin_pages_locked(obj);
msm_gem_unlock(obj);
+
return p;
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
return prot;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
out_unlock:
msm_gem_unlock(obj);
out:
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
goto fail;
}
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->vaddr;
* between inactive lists
*/
if (msm_obj->active_count == 0)
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- update_inactive(msm_obj);
+ update_lru(obj);
drm_gem_free_mmap_offset(obj);
put_pages(obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
GEM_WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) {
- update_inactive(msm_obj);
+ update_lru(obj);
}
}
-static void update_inactive(struct msm_gem_object *msm_obj)
+static void update_lru(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
mutex_unlock(&priv->mm_lock);
}
+bool msm_gem_active(struct drm_gem_object *obj)
+{
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+
+ if (to_msm_bo(obj)->pin_count)
+ return true;
+
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+}
+
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
#endif
/* don't call directly! Use drm_gem_object_put() */
-void msm_gem_free_object(struct drm_gem_object *obj)
+static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
list_del(&msm_obj->mm_list);
mutex_unlock(&priv->mm_lock);
- msm_gem_lock(obj);
-
/* object should not be on active list: */
GEM_WARN_ON(is_active(msm_obj));
put_iova_vmas(obj);
- /* dma_buf_detach() grabs resv lock, so we need to unlock
- * prior to drm_prime_gem_destroy
- */
- msm_gem_unlock(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
- msm_gem_unlock(obj);
}
drm_gem_object_release(obj);
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
- case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
case MSM_BO_WC:
break;