}
__setup("intel_iommu=", intel_iommu_setup);
-static struct kmem_cache *iommu_domain_cache;
-static struct kmem_cache *iommu_devinfo_cache;
-
void *alloc_pgtable_page(int node)
{
struct page *page;
free_page((unsigned long)vaddr);
}
-static inline void *alloc_domain_mem(void)
-{
- return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
-}
-
-static void free_domain_mem(void *vaddr)
-{
- kmem_cache_free(iommu_domain_cache, vaddr);
-}
-
-static inline void * alloc_devinfo_mem(void)
-{
- return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
-}
-
-static inline void free_devinfo_mem(void *vaddr)
-{
- kmem_cache_free(iommu_devinfo_cache, vaddr);
-}
-
static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
{
struct dmar_domain *domain;
- domain = alloc_domain_mem();
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- memset(domain, 0, sizeof(*domain));
domain->nid = NUMA_NO_NODE;
if (first_level_by_default(type))
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
put_pages_list(&freelist);
}
- free_domain_mem(domain);
+ kfree(domain);
}
/*
unsigned long flags;
int ret;
- info = alloc_devinfo_mem();
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
info->segment = pci_domain_nr(pdev->bus);
}
- info->ats_supported = info->pasid_supported = info->pri_supported = 0;
- info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
- info->ats_qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
- info->pasid_table = NULL;
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
+ kfree(info);
return NULL;
}
return ret;
}
-static inline int iommu_domain_cache_init(void)
-{
- int ret = 0;
-
- iommu_domain_cache = kmem_cache_create("iommu_domain",
- sizeof(struct dmar_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
- if (!iommu_domain_cache) {
- pr_err("Couldn't create iommu_domain cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static inline int iommu_devinfo_cache_init(void)
-{
- int ret = 0;
-
- iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_devinfo_cache) {
- pr_err("Couldn't create devinfo cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static int __init iommu_init_mempool(void)
-{
- int ret;
-
- ret = iommu_domain_cache_init();
- if (ret)
- goto domain_error;
-
- ret = iommu_devinfo_cache_init();
- if (!ret)
- return ret;
-
- kmem_cache_destroy(iommu_domain_cache);
-domain_error:
-
- return -ENOMEM;
-}
-
-static void __init iommu_exit_mempool(void)
-{
- kmem_cache_destroy(iommu_devinfo_cache);
- kmem_cache_destroy(iommu_domain_cache);
-}
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
- if (iommu_init_mempool()) {
- if (force_on)
- panic("tboot: Failed to initialize iommu memory\n");
- return -ENOMEM;
- }
-
down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
out_free_dmar:
intel_iommu_free_dmars();
up_write(&dmar_global_lock);
- iommu_exit_mempool();
return ret;
}
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
- free_devinfo_mem(info);
+ kfree(info);
}
static void dmar_remove_one_dev_info(struct device *dev)