iommu/amd: Lock code paths traversing protection_domain->dev_list
authorJoerg Roedel <jroedel@suse.de>
Wed, 25 Sep 2019 13:23:00 +0000 (15:23 +0200)
committerJoerg Roedel <jroedel@suse.de>
Sat, 28 Sep 2019 12:44:13 +0000 (14:44 +0200)
The traversing of this list requires protection_domain->lock to be taken
to avoid nasty races with attach/detach code. Make sure the lock is held
on all code-paths traversing this list.

Reported-by: Filippo Sironi <sironi@amazon.de>
Fixes: 92d420ec028d ("iommu/amd: Relax locking in dma_ops path")
Reviewed-by: Filippo Sironi <sironi@amazon.de>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index bac4e20..9c26976 100644 (file)
@@ -1334,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
                dma_addr_t iova, size_t size)
 {
        if (unlikely(amd_iommu_np_cache)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&domain->lock, flags);
                domain_flush_pages(domain, iova, size);
                domain_flush_complete(domain);
+               spin_unlock_irqrestore(&domain->lock, flags);
        }
 }
 
@@ -1700,8 +1704,13 @@ static int iommu_map_page(struct protection_domain *dom,
        ret = 0;
 
 out:
-       if (updated)
+       if (updated) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dom->lock, flags);
                update_domain(dom);
+               spin_unlock_irqrestore(&dom->lock, flags);
+       }
 
        /* Everything flushed out, free pages now */
        free_page_list(freelist);
@@ -1857,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
 
 static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&dom->domain.lock, flags);
        domain_flush_tlb(&dom->domain);
        domain_flush_complete(&dom->domain);
+       spin_unlock_irqrestore(&dom->domain.lock, flags);
 }
 
 static void iova_domain_flush_tlb(struct iova_domain *iovad)
@@ -2414,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
 {
        dma_addr_t offset = paddr & ~PAGE_MASK;
        dma_addr_t address, start, ret;
+       unsigned long flags;
        unsigned int pages;
        int prot = 0;
        int i;
@@ -2451,8 +2465,10 @@ out_unmap:
                iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
        }
 
+       spin_lock_irqsave(&dma_dom->domain.lock, flags);
        domain_flush_tlb(&dma_dom->domain);
        domain_flush_complete(&dma_dom->domain);
+       spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
 
        dma_ops_free_iova(dma_dom, address, pages);
 
@@ -2481,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
        }
 
        if (amd_iommu_unmap_flush) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dma_dom->domain.lock, flags);
                domain_flush_tlb(&dma_dom->domain);
                domain_flush_complete(&dma_dom->domain);
+               spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
                dma_ops_free_iova(dma_dom, dma_addr, pages);
        } else {
                pages = __roundup_pow_of_two(pages);
@@ -3246,9 +3266,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct protection_domain *dom = to_pdomain(domain);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dom->lock, flags);
        domain_flush_tlb_pde(dom);
        domain_flush_complete(dom);
+       spin_unlock_irqrestore(&dom->lock, flags);
 }
 
 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,