/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
iommu_free_pages(buf);
- buf = NULL;
+ return NULL;
}
return buf;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp)
-{
- int order = get_order(size);
-
- return iommu_alloc_pages(gfp, order);
-}
-
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp);
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
if (!cptep)
return -ENOMEM;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ data->pgd[i] =
+ iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(DT_SIZE));
+ sun50i_domain->dt =
+ iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
if (!sun50i_domain->dt)
goto err_free_domain;