struct iommu_domain *fq_domain;
};
+static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
{
const struct iommu_ops *ops = domain->ops;
- if (!is_kdump_kernel())
- return 0;
-
if (unlikely(ops->is_attach_deferred &&
ops->is_attach_deferred(domain, dev)))
return iommu_attach_device(domain, dev);
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_dma_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
*dma_handle = DMA_MAPPING_ERROR;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_dma_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
- if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_dma_deferred_attach(dev, domain))
return 0;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
static int iommu_dma_init(void)
{
+ if (is_kdump_kernel())
+ static_branch_enable(&iommu_deferred_attach_enabled);
+
return iova_cache_get();
}
arch_initcall(iommu_dma_init);