phys_addr_t paddr, size_t size, int prot)
 {
        struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
-       const struct iommu_gather_ops *tlb = cfg->tlb;
-       void *cookie = data->iop.cookie;
+       struct io_pgtable *iop = &data->iop;
        int ret;
 
        /* If no access, then nothing to do */
         * Synchronise all PTE updates for the new mapping before there's
         * a chance for anything to kick off a table walk for the new iova.
         */
-       if (cfg->quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
-               tlb->tlb_add_flush(iova, size, ARM_V7S_BLOCK_SIZE(2), false,
-                                  cookie);
-               tlb->tlb_sync(cookie);
+       if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
+               io_pgtable_tlb_add_flush(iop, iova, size,
+                                        ARM_V7S_BLOCK_SIZE(2), false);
+               io_pgtable_tlb_sync(iop);
        } else {
                wmb();
        }
                               unsigned long iova, int idx, int lvl,
                               arm_v7s_iopte *ptep)
 {
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
-       void *cookie = data->iop.cookie;
+       struct io_pgtable *iop = &data->iop;
        arm_v7s_iopte pte;
        size_t size = ARM_V7S_BLOCK_SIZE(lvl);
        int i;
                pte += size;
        }
 
-       __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, cfg);
+       __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
 
        size *= ARM_V7S_CONT_PAGES;
-       cfg->tlb->tlb_add_flush(iova, size, size, true, cookie);
-       cfg->tlb->tlb_sync(cookie);
+       io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+       io_pgtable_tlb_sync(iop);
 }
 
 static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
        unsigned long blk_start, blk_end, blk_size;
        phys_addr_t blk_paddr;
        arm_v7s_iopte table = 0;
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
        int prot = arm_v7s_pte_to_prot(*ptep, 1);
 
        blk_size = ARM_V7S_BLOCK_SIZE(1);
                }
        }
 
-       __arm_v7s_set_pte(ptep, table, 1, cfg);
+       __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg);
        iova &= ~(blk_size - 1);
-       cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
+       io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
        return size;
 }
 
                            arm_v7s_iopte *ptep)
 {
        arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
-       const struct iommu_gather_ops *tlb = cfg->tlb;
-       void *cookie = data->iop.cookie;
+       struct io_pgtable *iop = &data->iop;
        int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
 
        /* Something went horribly wrong and we ran out of page table */
        if (num_entries) {
                size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
 
-               __arm_v7s_set_pte(ptep, 0, num_entries, cfg);
+               __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
 
                for (i = 0; i < num_entries; i++) {
                        if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
                                /* Also flush any partial walks */
-                               tlb->tlb_add_flush(iova, blk_size,
-                                                  ARM_V7S_BLOCK_SIZE(lvl + 1),
-                                                  false, cookie);
-                               tlb->tlb_sync(cookie);
+                               io_pgtable_tlb_add_flush(iop, iova, blk_size,
+                                       ARM_V7S_BLOCK_SIZE(lvl + 1), false);
+                               io_pgtable_tlb_sync(iop);
                                ptep = iopte_deref(pte[i], lvl);
                                __arm_v7s_free_table(ptep, lvl + 1, data);
                        } else {
-                               tlb->tlb_add_flush(iova, blk_size, blk_size,
-                                                  true, cookie);
+                               io_pgtable_tlb_add_flush(iop, iova, blk_size,
+                                                        blk_size, true);
                        }
                        iova += blk_size;
                }
 static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
                         size_t size)
 {
-       size_t unmapped;
        struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
-       struct io_pgtable *iop = &data->iop;
+       size_t unmapped;
 
        unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
        if (unmapped)
-               iop->cfg.tlb->tlb_sync(iop->cookie);
+               io_pgtable_tlb_sync(&data->iop);
 
        return unmapped;
 }
 
        unsigned long blk_start, blk_end;
        phys_addr_t blk_paddr;
        arm_lpae_iopte table = 0;
-       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
        blk_start = iova & ~(blk_size - 1);
        blk_end = blk_start + blk_size;
                }
        }
 
-       __arm_lpae_set_pte(ptep, table, cfg);
+       __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
        iova &= ~(blk_size - 1);
-       cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
+       io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
        return size;
 }
 
                            arm_lpae_iopte *ptep)
 {
        arm_lpae_iopte pte;
-       const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
-       void *cookie = data->iop.cookie;
+       struct io_pgtable *iop = &data->iop;
        size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 
        /* Something went horribly wrong and we ran out of page table */
 
        /* If the size matches this level, we're in the right place */
        if (size == blk_size) {
-               __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
+               __arm_lpae_set_pte(ptep, 0, &iop->cfg);
 
                if (!iopte_leaf(pte, lvl)) {
                        /* Also flush any partial walks */
-                       tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
-                                          false, cookie);
-                       tlb->tlb_sync(cookie);
+                       io_pgtable_tlb_add_flush(iop, iova, size,
+                                               ARM_LPAE_GRANULE(data), false);
+                       io_pgtable_tlb_sync(iop);
                        ptep = iopte_deref(pte, data);
                        __arm_lpae_free_pgtable(data, lvl + 1, ptep);
                } else {
-                       tlb->tlb_add_flush(iova, size, size, true, cookie);
+                       io_pgtable_tlb_add_flush(iop, iova, size, size, true);
                }
 
                return size;
 {
        size_t unmapped;
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
-       struct io_pgtable *iop = &data->iop;
        arm_lpae_iopte *ptep = data->pgd;
        int lvl = ARM_LPAE_START_LVL(data);
 
        unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
        if (unmapped)
-               iop->cfg.tlb->tlb_sync(iop->cookie);
+               io_pgtable_tlb_sync(&data->iop);
 
        return unmapped;
 }