iommu/dma-iommu: Handle deferred devices
authorTom Murphy <murphyt7@tcd.ie>
Sun, 8 Sep 2019 16:56:39 +0000 (09:56 -0700)
committerJoerg Roedel <jroedel@suse.de>
Tue, 15 Oct 2019 09:31:04 +0000 (11:31 +0200)
Handle devices which defer their attach to the iommu in the dma-iommu api

Signed-off-by: Tom Murphy <murphyt7@tcd.ie>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index cc3bf5c..b58b04b 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pci.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
 
 struct iommu_dma_msi_page {
        struct list_head        list;
@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
        return iova_reserve_iommu_regions(dev, domain);
 }
 
+static int iommu_dma_deferred_attach(struct device *dev,
+               struct iommu_domain *domain)
+{
+       const struct iommu_ops *ops = domain->ops;
+
+       if (!is_kdump_kernel())
+               return 0;
+
+       if (unlikely(ops->is_attach_deferred &&
+                       ops->is_attach_deferred(domain, dev)))
+               return iommu_attach_device(domain, dev);
+
+       return 0;
+}
+
 /**
  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
  *                    page flags.
@@ -470,6 +486,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
        size_t iova_off = iova_offset(iovad, phys);
        dma_addr_t iova;
 
+       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+               return DMA_MAPPING_ERROR;
+
        size = iova_align(iovad, size + iova_off);
 
        iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
@@ -579,6 +598,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 
        *dma_handle = DMA_MAPPING_ERROR;
 
+       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+               return NULL;
+
        min_size = alloc_sizes & -alloc_sizes;
        if (min_size < PAGE_SIZE) {
                min_size = PAGE_SIZE;
@@ -711,7 +733,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
        int prot = dma_info_to_prot(dir, coherent, attrs);
        dma_addr_t dma_handle;
 
-       dma_handle =__iommu_dma_map(dev, phys, size, prot);
+       dma_handle = __iommu_dma_map(dev, phys, size, prot);
        if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            dma_handle != DMA_MAPPING_ERROR)
                arch_sync_dma_for_device(dev, phys, size, dir);
@@ -821,6 +843,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
        unsigned long mask = dma_get_seg_boundary(dev);
        int i;
 
+       if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+               return 0;
+
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
                iommu_dma_sync_sg_for_device(dev, sg, nents, dir);