drm/etnaviv: rework MMU handling
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
index d7cc184..5ca2077 100644 (file)
@@ -13,7 +13,6 @@
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
 #include "state.xml.h"
 #include "state_hi.xml.h"
 
 
 #define MMUv2_MAX_STLB_ENTRIES         1024
 
-struct etnaviv_iommuv2_domain {
-       struct etnaviv_iommu_domain base;
-       /* P(age) T(able) A(rray) */
-       u64 *pta_cpu;
-       dma_addr_t pta_dma;
+struct etnaviv_iommuv2_context {
+       struct etnaviv_iommu_context base;
+       unsigned short id;
        /* M(aster) TLB aka first level pagetable */
        u32 *mtlb_cpu;
        dma_addr_t mtlb_dma;
@@ -41,41 +38,62 @@ struct etnaviv_iommuv2_domain {
        dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
 };
 
-static struct etnaviv_iommuv2_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv2_context *
+to_v2_context(struct etnaviv_iommu_context *context)
 {
-       return container_of(domain, struct etnaviv_iommuv2_domain, base);
+       return container_of(context, struct etnaviv_iommuv2_context, base);
 }
 
+static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
+{
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+       int i;
+
+       drm_mm_takedown(&context->mm);
+
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+               if (v2_context->stlb_cpu[i])
+                       dma_free_wc(context->global->dev, SZ_4K,
+                                   v2_context->stlb_cpu[i],
+                                   v2_context->stlb_dma[i]);
+       }
+
+       dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
+                   v2_context->mtlb_dma);
+
+       clear_bit(v2_context->id, context->global->v2.pta_alloc);
+
+       vfree(v2_context);
+}
 static int
-etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
                            int stlb)
 {
-       if (etnaviv_domain->stlb_cpu[stlb])
+       if (v2_context->stlb_cpu[stlb])
                return 0;
 
-       etnaviv_domain->stlb_cpu[stlb] =
-                       dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
-                                    &etnaviv_domain->stlb_dma[stlb],
+       v2_context->stlb_cpu[stlb] =
+                       dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
+                                    &v2_context->stlb_dma[stlb],
                                     GFP_KERNEL);
 
-       if (!etnaviv_domain->stlb_cpu[stlb])
+       if (!v2_context->stlb_cpu[stlb])
                return -ENOMEM;
 
-       memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
+       memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
                 SZ_4K / sizeof(u32));
 
-       etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
-                                                     MMUv2_PTE_PRESENT;
+       v2_context->mtlb_cpu[stlb] =
+                       v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
+
        return 0;
 }
 
-static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
                               unsigned long iova, phys_addr_t paddr,
                               size_t size, int prot)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(domain);
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
        int mtlb_entry, stlb_entry, ret;
        u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
 
@@ -91,20 +109,19 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
        mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
        stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
 
-       ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
+       ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
        if (ret)
                return ret;
 
-       etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+       v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
 
        return 0;
 }
 
-static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
                                    unsigned long iova, size_t size)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(domain);
+       struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
        int mtlb_entry, stlb_entry;
 
        if (size != SZ_4K)
@@ -118,118 +135,35 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
        return SZ_4K;
 }
 
-static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
-{
-       int ret;
-
-       /* allocate scratch page */
-       etnaviv_domain->base.bad_page_cpu =
-                       dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
-                                    &etnaviv_domain->base.bad_page_dma,
-                                    GFP_KERNEL);
-       if (!etnaviv_domain->base.bad_page_cpu) {
-               ret = -ENOMEM;
-               goto fail_mem;
-       }
-
-       memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
-                SZ_4K / sizeof(u32));
-
-       etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
-                                              SZ_4K, &etnaviv_domain->pta_dma,
-                                              GFP_KERNEL);
-       if (!etnaviv_domain->pta_cpu) {
-               ret = -ENOMEM;
-               goto fail_mem;
-       }
-
-       etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
-                                               SZ_4K, &etnaviv_domain->mtlb_dma,
-                                               GFP_KERNEL);
-       if (!etnaviv_domain->mtlb_cpu) {
-               ret = -ENOMEM;
-               goto fail_mem;
-       }
-
-       memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
-                MMUv2_MAX_STLB_ENTRIES);
-
-       return 0;
-
-fail_mem:
-       if (etnaviv_domain->base.bad_page_cpu)
-               dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                           etnaviv_domain->base.bad_page_cpu,
-                           etnaviv_domain->base.bad_page_dma);
-
-       if (etnaviv_domain->pta_cpu)
-               dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                           etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
-       if (etnaviv_domain->mtlb_cpu)
-               dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                           etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
-       return ret;
-}
-
-static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
-{
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(domain);
-       int i;
-
-       dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                   etnaviv_domain->base.bad_page_cpu,
-                   etnaviv_domain->base.bad_page_dma);
-
-       dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                   etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
-       dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                   etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
-       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
-               if (etnaviv_domain->stlb_cpu[i])
-                       dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
-                                   etnaviv_domain->stlb_cpu[i],
-                                   etnaviv_domain->stlb_dma[i]);
-       }
-
-       vfree(etnaviv_domain);
-}
-
-static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(domain);
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
        size_t dump_size = SZ_4K;
        int i;
 
        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
-               if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
                        dump_size += SZ_4K;
 
        return dump_size;
 }
 
-static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(domain);
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
        int i;
 
-       memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+       memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
        buf += SZ_4K;
        for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
-               if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
-                       memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+                       memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
 }
 
-static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
+       struct etnaviv_iommu_context *context)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                       to_etnaviv_domain(gpu->mmu->domain);
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
        u16 prefetch;
 
        /* If the MMU is already enabled the state is still there. */
@@ -237,8 +171,8 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
                return;
 
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
-                               (u32)etnaviv_domain->mtlb_dma,
-                               (u32)etnaviv_domain->base.bad_page_dma);
+                               (u32)v2_context->mtlb_dma,
+                               (u32)context->global->bad_page_dma);
        etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
                             prefetch);
        etnaviv_gpu_wait_idle(gpu, 100);
@@ -246,10 +180,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
        gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
 }
 
-static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
+       struct etnaviv_iommu_context *context)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain =
-                               to_etnaviv_domain(gpu->mmu->domain);
+       struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
        u16 prefetch;
 
        /* If the MMU is already enabled the state is still there. */
@@ -257,26 +191,26 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
                return;
 
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
-                 lower_32_bits(etnaviv_domain->pta_dma));
+                 lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
-                 upper_32_bits(etnaviv_domain->pta_dma));
+                 upper_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
 
        gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
-                 lower_32_bits(etnaviv_domain->base.bad_page_dma));
+                 lower_32_bits(context->global->bad_page_dma));
        gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
-                 lower_32_bits(etnaviv_domain->base.bad_page_dma));
+                 lower_32_bits(context->global->bad_page_dma));
        gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
                  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
-                 upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
+                 upper_32_bits(context->global->bad_page_dma)) |
                  VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
-                 upper_32_bits(etnaviv_domain->base.bad_page_dma)));
+                 upper_32_bits(context->global->bad_page_dma)));
 
-       etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
-                                    VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+       context->global->v2.pta_cpu[0] = v2_context->mtlb_dma |
+                                        VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
 
        /* trigger a PTA load through the FE */
-       prefetch = etnaviv_buffer_config_pta(gpu);
+       prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
        etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
                             prefetch);
        etnaviv_gpu_wait_idle(gpu, 100);
@@ -284,14 +218,15 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
        gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
 }
 
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
+                                   struct etnaviv_iommu_context *context)
 {
        switch (gpu->sec_mode) {
        case ETNA_SEC_NONE:
-               etnaviv_iommuv2_restore_nonsec(gpu);
+               etnaviv_iommuv2_restore_nonsec(gpu, context);
                break;
        case ETNA_SEC_KERNEL:
-               etnaviv_iommuv2_restore_sec(gpu);
+               etnaviv_iommuv2_restore_sec(gpu, context);
                break;
        default:
                WARN(1, "unhandled GPU security mode\n");
@@ -299,39 +234,56 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
        }
 }
 
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
-       .free = etnaviv_iommuv2_domain_free,
+const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
+       .free = etnaviv_iommuv2_free,
        .map = etnaviv_iommuv2_map,
        .unmap = etnaviv_iommuv2_unmap,
        .dump_size = etnaviv_iommuv2_dump_size,
        .dump = etnaviv_iommuv2_dump,
+       .restore = etnaviv_iommuv2_restore,
 };
 
-struct etnaviv_iommu_domain *
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
 {
-       struct etnaviv_iommuv2_domain *etnaviv_domain;
-       struct etnaviv_iommu_domain *domain;
-       int ret;
+       struct etnaviv_iommuv2_context *v2_context;
+       struct etnaviv_iommu_context *context;
 
-       etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
-       if (!etnaviv_domain)
+       v2_context = vzalloc(sizeof(*v2_context));
+       if (!v2_context)
                return NULL;
 
-       domain = &etnaviv_domain->base;
+       mutex_lock(&global->lock);
+       v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
+                                            ETNAVIV_PTA_ENTRIES);
+       if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
+               set_bit(v2_context->id, global->v2.pta_alloc);
+       } else {
+               mutex_unlock(&global->lock);
+               goto out_free;
+       }
+       mutex_unlock(&global->lock);
 
-       domain->dev = gpu->dev;
-       domain->base = SZ_4K;
-       domain->size = (u64)SZ_1G * 4 - SZ_4K;
-       domain->ops = &etnaviv_iommuv2_ops;
+       v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
+                                           &v2_context->mtlb_dma, GFP_KERNEL);
+       if (!v2_context->mtlb_cpu)
+               goto out_free_id;
 
-       ret = etnaviv_iommuv2_init(etnaviv_domain);
-       if (ret)
-               goto out_free;
+       memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
+                MMUv2_MAX_STLB_ENTRIES);
+
+       context = &v2_context->base;
+       context->global = global;
+       kref_init(&context->refcount);
+       mutex_init(&context->lock);
+       INIT_LIST_HEAD(&context->mappings);
+       drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
 
-       return &etnaviv_domain->base;
+       return context;
 
+out_free_id:
+       clear_bit(v2_context->id, global->v2.pta_alloc);
 out_free:
-       vfree(etnaviv_domain);
+       vfree(v2_context);
        return NULL;
 }