drm/msm/a6xx: update/fix CP_PROTECT initialization
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / adreno / a6xx_gpu.c
index 1306618..9702bec 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/devfreq.h>
+#include <linux/nvmem-consumer.h>
 #include <linux/soc/qcom/llcc-qcom.h>
 
 #define GPU_PAS_ID 13
@@ -156,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
         * GPU registers so we need to add 0x1a800 to the register value on A630
         * to get the right value from PM4.
         */
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_start));
 
        /* Invalidate CCU depth and color */
@@ -186,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_end));
-       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+       get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
                rbmemptr_stats(ring, index, alwayson_end));
 
        /* Write the fence to the scratch register */
@@ -205,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
        OUT_RING(ring, submit->seqno);
 
        trace_msm_gpu_submit_flush(submit,
-               gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
-                       REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+               gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+                       REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
 
        a6xx_flush(gpu, ring);
 }
@@ -461,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
        gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
 }
 
+/* For a615, a616, a618, A619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+       A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+       A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+       A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+       A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+       A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+       A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+       A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+       A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+       A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+       A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+       A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+       A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+       A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+       A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+       A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+       A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+       A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+       A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+       A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+       A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+       A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+       A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+       A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+       A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+       A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+       A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+       A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+       A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+       A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+       A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+       A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       const u32 *regs = a6xx_protect;
+       unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
+
+       BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+       BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+
+       if (adreno_is_a650(adreno_gpu)) {
+               regs = a650_protect;
+               count = ARRAY_SIZE(a650_protect);
+               count_max = 48;
+       }
+
+       /*
+        * Enable access protection to privileged registers, fault on an access
+        * protect violation and select the last span to protect from the start
+        * address all the way to the end of the register address space
+        */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+       for (i = 0; i < count - 1; i++)
+               gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+       /* last CP_PROTECT to have "infinite" length on the last entry */
+       gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
 static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -521,28 +629,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
                struct drm_gem_object *obj)
 {
+       struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+       struct msm_gpu *gpu = &adreno_gpu->base;
        u32 *buf = msm_gem_get_vaddr(obj);
+       bool ret = false;
 
        if (IS_ERR(buf))
-               return;
+               return false;
 
        /*
-        * If the lowest nibble is 0xa that is an indication that this microcode
-        * has been patched. The actual version is in dword [3] but we only care
-        * about the patchlevel which is the lowest nibble of dword [3]
-        *
-        * Otherwise check that the firmware is greater than or equal to 1.90
-        * which was the first version that had this fix built in
+        * Targets up to a640 (a618, a630 and a640) need to check for a
+        * microcode version that is patched to support the whereami opcode or
+        * one that is new enough to include it by default.
         */
-       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
-               a6xx_gpu->has_whereami = true;
-       else if ((buf[0] & 0xfff) > 0x190)
-               a6xx_gpu->has_whereami = true;
+       if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
+               adreno_is_a640(adreno_gpu)) {
+               /*
+                * If the lowest nibble is 0xa that is an indication that this
+                * microcode has been patched. The actual version is in dword
+                * [3] but we only care about the patchlevel which is the lowest
+                * nibble of dword [3]
+                *
+                * Otherwise check that the firmware is greater than or equal
+                * to 1.90 which was the first version that had this fix built
+                * in
+                */
+               if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+                       (buf[0] & 0xfff) >= 0x190) {
+                       a6xx_gpu->has_whereami = true;
+                       ret = true;
+                       goto out;
+               }
 
+               DRM_DEV_ERROR(&gpu->pdev->dev,
+                       "a630 SQE ucode is too old. Have version %x need at least %x\n",
+                       buf[0] & 0xfff, 0x190);
+       }  else {
+               /*
+                * a650 tier targets don't need whereami but still need to be
+                * equal to or newer than 0.95 for other security fixes
+                */
+               if (adreno_is_a650(adreno_gpu)) {
+                       if ((buf[0] & 0xfff) >= 0x095) {
+                               ret = true;
+                               goto out;
+                       }
+
+                       DRM_DEV_ERROR(&gpu->pdev->dev,
+                               "a650 SQE ucode is too old. Have version %x need at least %x\n",
+                               buf[0] & 0xfff, 0x095);
+               }
+
+               /*
+                * When a660 is added those targets should return true here
+                * since those have all the critical security fixes built in
+                * from the start
+                */
+       }
+out:
        msm_gem_put_vaddr(obj);
+       return ret;
 }
 
 static int a6xx_ucode_init(struct msm_gpu *gpu)
@@ -565,7 +718,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
-               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
+               if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+                       msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+                       drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+                       a6xx_gpu->sqe_bo = NULL;
+                       return -EPERM;
+               }
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -724,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        }
 
        /* Protect registers from the CP */
-       gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
-
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
-               A6XX_PROTECT_RDONLY(0x600, 0x51));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
-               A6XX_PROTECT_RDONLY(0xfc00, 0x3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
-               A6XX_PROTECT_RDONLY(0x0, 0x4f9));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
-               A6XX_PROTECT_RDONLY(0x501, 0xa));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
-               A6XX_PROTECT_RDONLY(0x511, 0x44));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
-               A6XX_PROTECT_RW(0xbe20, 0x11f3));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
-                       A6XX_PROTECT_RDONLY(0x980, 0x4));
-       gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+       a6xx_set_cp_protect(gpu);
 
        /* Enable expanded apriv for targets that support it */
        if (gpu->hw_apriv) {
@@ -1101,10 +1226,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
 {
        struct device_node *phandle;
 
-       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
-       if (IS_ERR(a6xx_gpu->llc_mmio))
-               return;
-
        /*
         * There is a different programming path for targets with an mmu500
         * attached, so detect if that is the case
@@ -1114,10 +1235,15 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
                of_device_is_compatible(phandle, "arm,mmu-500"));
        of_node_put(phandle);
 
+       if (a6xx_gpu->have_mmu500)
+               a6xx_gpu->llc_mmio = NULL;
+       else
+               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+
        a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
        a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
 
-       if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
+       if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
                a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
 }
 
@@ -1169,14 +1295,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       static DEFINE_MUTEX(perfcounter_oob);
+
+       mutex_lock(&perfcounter_oob);
 
        /* Force the GPU power on so we can read this register */
-       a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+       a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
-       a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+       a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+       mutex_unlock(&perfcounter_oob);
        return 0;
 }
 
@@ -1208,6 +1338,10 @@ static void a6xx_destroy(struct msm_gpu *gpu)
        a6xx_gmu_remove(a6xx_gpu);
 
        adreno_gpu_cleanup(adreno_gpu);
+
+       if (a6xx_gpu->opp_table)
+               dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table);
+
        kfree(a6xx_gpu);
 }
 
@@ -1239,6 +1373,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct iommu_domain *iommu;
+       struct msm_mmu *mmu;
+       struct msm_gem_address_space *aspace;
+       u64 start, size;
+
+       iommu = iommu_domain_alloc(&platform_bus_type);
+       if (!iommu)
+               return NULL;
+
+       /*
+        * This allows GPU to set the bus attributes required to use system
+        * cache on behalf of the iommu page table walker.
+        */
+       if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+               adreno_set_llc_attributes(iommu);
+
+       mmu = msm_iommu_new(&pdev->dev, iommu);
+       if (IS_ERR(mmu)) {
+               iommu_domain_free(iommu);
+               return ERR_CAST(mmu);
+       }
+
+       /*
+        * Use the aperture start or SZ_16M, whichever is greater. This will
+        * ensure that we align with the allocated pagetable range while still
+        * allowing room in the lower 32 bits for GMEM and whatnot
+        */
+       start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+       size = iommu->geometry.aperture_end - start + 1;
+
+       aspace = msm_gem_address_space_create(mmu, "gpu",
+               start & GENMASK_ULL(48, 0), size);
+
+       if (IS_ERR(aspace) && !IS_ERR(mmu))
+               mmu->funcs->destroy(mmu);
+
+       return aspace;
+}
+
 static struct msm_gem_address_space *
 a6xx_create_private_address_space(struct msm_gpu *gpu)
 {
@@ -1264,6 +1442,69 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
 }
 
+static u32 a618_get_speed_bin(u32 fuse)
+{
+       if (fuse == 0)
+               return 0;
+       else if (fuse == 169)
+               return 1;
+       else if (fuse == 174)
+               return 2;
+
+       return UINT_MAX;
+}
+
+static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse)
+{
+       u32 val = UINT_MAX;
+
+       if (revn == 618)
+               val = a618_get_speed_bin(fuse);
+
+       if (val == UINT_MAX) {
+               DRM_DEV_ERROR(dev,
+                       "missing support for speed-bin: %u. Some OPPs may not be supported by hardware",
+                       fuse);
+               return UINT_MAX;
+       }
+
+       return (1 << val);
+}
+
+static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
+               u32 revn)
+{
+       struct opp_table *opp_table;
+       u32 supp_hw = UINT_MAX;
+       u16 speedbin;
+       int ret;
+
+       ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
+       /*
+        * -ENOENT means that the platform doesn't support speedbin which is
+        * fine
+        */
+       if (ret == -ENOENT) {
+               return 0;
+       } else if (ret) {
+               DRM_DEV_ERROR(dev,
+                             "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+                             ret);
+               goto done;
+       }
+       speedbin = le16_to_cpu(speedbin);
+
+       supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
+
+done:
+       opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+       if (IS_ERR(opp_table))
+               return PTR_ERR(opp_table);
+
+       a6xx_gpu->opp_table = opp_table;
+       return 0;
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -1285,7 +1526,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
 #endif
-               .create_address_space = adreno_iommu_create_address_space,
+               .create_address_space = a6xx_create_address_space,
                .create_private_address_space = a6xx_create_private_address_space,
                .get_rptr = a6xx_get_rptr,
        },
@@ -1325,6 +1566,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
 
        a6xx_llc_slices_init(pdev, a6xx_gpu);
 
+       ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn);
+       if (ret) {
+               a6xx_destroy(&(a6xx_gpu->base.base));
+               return ERR_PTR(ret);
+       }
+
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret) {
                a6xx_destroy(&(a6xx_gpu->base.base));