iommu: avoid taking iova_rbtree_lock twice
[linux-2.6-microblaze.git] / drivers / iommu / iova.c
index 30d969a..ff59d8a 100644 (file)
@@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad);
 static void free_iova_rcaches(struct iova_domain *iovad);
 static void fq_destroy_all_entries(struct iova_domain *iovad);
 static void fq_flush_timeout(struct timer_list *t);
+static void free_global_cached_iovas(struct iova_domain *iovad);
 
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -184,8 +185,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        struct rb_node *curr, *prev;
        struct iova *curr_iova;
        unsigned long flags;
-       unsigned long new_pfn;
+       unsigned long new_pfn, retry_pfn;
        unsigned long align_mask = ~0UL;
+       unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
 
        if (size_aligned)
                align_mask <<= fls_long(size - 1);
@@ -198,15 +200,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 
        curr = __get_cached_rbnode(iovad, limit_pfn);
        curr_iova = rb_entry(curr, struct iova, node);
+       retry_pfn = curr_iova->pfn_hi + 1;
+
+retry:
        do {
-               limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
-               new_pfn = (limit_pfn - size) & align_mask;
+               high_pfn = min(high_pfn, curr_iova->pfn_lo);
+               new_pfn = (high_pfn - size) & align_mask;
                prev = curr;
                curr = rb_prev(curr);
                curr_iova = rb_entry(curr, struct iova, node);
-       } while (curr && new_pfn <= curr_iova->pfn_hi);
-
-       if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+       } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
+
+       if (high_pfn < size || new_pfn < low_pfn) {
+               if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
+                       high_pfn = limit_pfn;
+                       low_pfn = retry_pfn;
+                       curr = &iovad->anchor.node;
+                       curr_iova = rb_entry(curr, struct iova, node);
+                       goto retry;
+               }
                iovad->max32_alloc_size = size;
                goto iova32_full;
        }
@@ -390,10 +402,14 @@ EXPORT_SYMBOL_GPL(__free_iova);
 void
 free_iova(struct iova_domain *iovad, unsigned long pfn)
 {
-       struct iova *iova = find_iova(iovad, pfn);
+       unsigned long flags;
+       struct iova *iova;
 
+       spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+       iova = private_find_iova(iovad, pfn);
        if (iova)
-               __free_iova(iovad, iova);
+               private_free_iova(iovad, iova);
+       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 
 }
 EXPORT_SYMBOL_GPL(free_iova);
@@ -431,6 +447,7 @@ retry:
                flush_rcache = false;
                for_each_online_cpu(cpu)
                        free_cpu_cached_iovas(cpu, iovad);
+               free_global_cached_iovas(iovad);
                goto retry;
        }
 
@@ -1046,5 +1063,25 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
        }
 }
 
+/*
+ * free all the IOVA ranges of global cache
+ */
+static void free_global_cached_iovas(struct iova_domain *iovad)
+{
+       struct iova_rcache *rcache;
+       unsigned long flags;
+       int i, j;
+
+       for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+               rcache = &iovad->rcaches[i];
+               spin_lock_irqsave(&rcache->lock, flags);
+               for (j = 0; j < rcache->depot_size; ++j) {
+                       iova_magazine_free_pfns(rcache->depot[j], iovad);
+                       iova_magazine_free(rcache->depot[j]);
+               }
+               rcache->depot_size = 0;
+               spin_unlock_irqrestore(&rcache->lock, flags);
+       }
+}
 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
 MODULE_LICENSE("GPL");