Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / iommu / iova.c
index b6cf5f1..9e8bc80 100644 (file)
@@ -121,8 +121,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
                spin_lock_init(&fq->lock);
        }
 
-       smp_wmb();
-
        iovad->fq = queue;
 
        timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -521,6 +519,7 @@ retry:
 
        return new_iova->pfn_lo;
 }
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
 
 /**
  * free_iova_fast - free iova pfn range into rcache
@@ -538,6 +537,7 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
 
        free_iova(iovad, pfn);
 }
+EXPORT_SYMBOL_GPL(free_iova_fast);
 
 #define fq_ring_for_each(i, fq) \
        for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -633,10 +633,20 @@ void queue_iova(struct iova_domain *iovad,
                unsigned long pfn, unsigned long pages,
                unsigned long data)
 {
-       struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
+       struct iova_fq *fq;
        unsigned long flags;
        unsigned idx;
 
+       /*
+        * Order against the IOMMU driver's pagetable update from unmapping
+        * @pte, to guarantee that iova_domain_flush() observes that if called
+        * from a different CPU before we release the lock below. Full barrier
+        * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
+        * written fq state here.
+        */
+       smp_mb();
+
+       fq = raw_cpu_ptr(iovad->fq);
        spin_lock_irqsave(&fq->lock, flags);
 
        /*