iova: Delete copy_reserved_iova()
authorJohn Garry <john.garry@huawei.com>
Wed, 6 Jan 2021 13:35:07 +0000 (21:35 +0800)
committerJoerg Roedel <jroedel@suse.de>
Wed, 27 Jan 2021 11:27:36 +0000 (12:27 +0100)
Since commit c588072bba6b ("iommu/vt-d: Convert intel iommu driver to the
iommu ops"), function copy_reserved_iova() is not referenced, so delete
it.

Signed-off-by: John Garry <john.garry@huawei.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/1609940111-28563-3-git-send-email-john.garry@huawei.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iova.c
include/linux/iova.h

index 5f2caa9..9b114cd 100644 (file)
@@ -710,36 +710,6 @@ finish:
 }
 EXPORT_SYMBOL_GPL(reserve_iova);
 
-/**
- * copy_reserved_iova - copies the reserved between domains
- * @from: - source domain from where to copy
- * @to: - destination domin where to copy
- * This function copies reserved iova's from one domain to
- * other.
- */
-void
-copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
-{
-       unsigned long flags;
-       struct rb_node *node;
-
-       spin_lock_irqsave(&from->iova_rbtree_lock, flags);
-       for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
-               struct iova *iova = rb_entry(node, struct iova, node);
-               struct iova *new_iova;
-
-               if (iova->pfn_lo == IOVA_ANCHOR)
-                       continue;
-
-               new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
-               if (!new_iova)
-                       pr_err("Reserve iova range %lx@%lx failed\n",
-                              iova->pfn_lo, iova->pfn_lo);
-       }
-       spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
-}
-EXPORT_SYMBOL_GPL(copy_reserved_iova);
-
 /*
  * Magazine caches for IOVA ranges.  For an introduction to magazines,
  * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
index 2b76e0b..c834c01 100644 (file)
@@ -150,7 +150,6 @@ unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
                              unsigned long limit_pfn, bool flush_rcache);
 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
        unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        unsigned long start_pfn);
 int init_iova_flush_queue(struct iova_domain *iovad,
@@ -211,11 +210,6 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
        return NULL;
 }
 
-static inline void copy_reserved_iova(struct iova_domain *from,
-                                     struct iova_domain *to)
-{
-}
-
 static inline void init_iova_domain(struct iova_domain *iovad,
                                    unsigned long granule,
                                    unsigned long start_pfn)