From c69d89aff393a212b9635c95990173b48d8bd74d Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Wed, 23 Sep 2020 12:13:45 +0000 Subject: [PATCH] iommu/amd: Use 4K page for completion wait write-back semaphore IOMMU SNP support requires the completion wait write-back semaphore to be implemented using a 4K-aligned page, where the page address is to be programmed into the newly introduced MMIO base/range registers. This new scheme uses a per-iommu atomic variable to store the current semaphore value, which is incremented for every completion wait command. Since this new scheme is also compatible with non-SNP mode, generalize the driver to use 4K page for completion-wait semaphore in both modes. Signed-off-by: Suravee Suthikulpanit Cc: Brijesh Singh Link: https://lore.kernel.org/r/20200923121347.25365-2-suravee.suthikulpanit@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu_types.h | 3 ++- drivers/iommu/amd/init.c | 18 ++++++++++++++++++ drivers/iommu/amd/iommu.c | 23 +++++++++++------------ 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 30a5d412255a..4c80483e78ec 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -595,7 +595,8 @@ struct amd_iommu { #endif u32 flags; - volatile u64 __aligned(8) cmd_sem; + volatile u64 *cmd_sem; + u64 cmd_sem_val; #ifdef CONFIG_AMD_IOMMU_DEBUGFS /* DebugFS Info */ diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 908d8e89764c..35f220704bef 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -813,6 +813,19 @@ static int iommu_init_ga(struct amd_iommu *iommu) return ret; } +static int __init alloc_cwwb_sem(struct amd_iommu *iommu) +{ + iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL); + + return iommu->cmd_sem ? 0 : -ENOMEM; +} + +static void __init free_cwwb_sem(struct amd_iommu *iommu) +{ + if (iommu->cmd_sem) + free_page((unsigned long)iommu->cmd_sem); +} + static void iommu_enable_xt(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP @@ -1395,6 +1408,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, static void __init free_iommu_one(struct amd_iommu *iommu) { + free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); free_ppr_log(iommu); @@ -1481,6 +1495,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) int ret; raw_spin_lock_init(&iommu->lock); + iommu->cmd_sem_val = 0; /* Add IOMMU to internal data structures */ list_add_tail(&iommu->list, &amd_iommu_list); @@ -1541,6 +1556,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (!iommu->mmio_base) return -ENOMEM; + if (alloc_cwwb_sem(iommu)) + return -ENOMEM; + if (alloc_command_buffer(iommu)) return -ENOMEM; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ba9f3dbc5b94..a23e14e8a942 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -792,11 +792,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) * ****************************************************************************/ -static int wait_on_sem(volatile u64 *sem) +static int wait_on_sem(struct amd_iommu *iommu, u64 data) { int i = 0; - while (*sem == 0 && i < LOOP_TIMEOUT) { + while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) { udelay(1); i += 1; } @@ -827,16 +827,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) +static void build_completion_wait(struct iommu_cmd *cmd, + struct amd_iommu *iommu, + u64 data) { - u64 paddr = iommu_virt_to_phys((void *)address); - - WARN_ON(address & 0x7ULL); + u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[1] = upper_32_bits(paddr); - cmd->data[2] = 1; + cmd->data[2] = data; CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } @@ -1045,22 +1045,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu) struct iommu_cmd cmd; unsigned long flags; int ret; + u64 data; if (!iommu->need_sync) return 0; - - build_completion_wait(&cmd, (u64)&iommu->cmd_sem); - raw_spin_lock_irqsave(&iommu->lock, flags); - iommu->cmd_sem = 0; + data = ++iommu->cmd_sem_val; + build_completion_wait(&cmd, iommu, data); ret = __iommu_queue_command_sync(iommu, &cmd, false); if (ret) goto out_unlock; - ret = wait_on_sem(&iommu->cmd_sem); + ret = wait_on_sem(iommu, data); out_unlock: raw_spin_unlock_irqrestore(&iommu->lock, flags); -- 2.20.1