Commit c69d89af authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Use 4K page for completion wait write-back semaphore

IOMMU SNP support requires the completion wait write-back semaphore to be
implemented using a 4K-aligned page, where the page address is to be
programmed into the newly introduced MMIO base/range registers.

This new scheme uses a per-iommu atomic variable to store the current
semaphore value, which is incremented for every completion wait command.

Since this new scheme is also compatible with non-SNP mode,
generalize the driver to use 4K page for completion-wait semaphore in
both modes.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Link: https://lore.kernel.org/r/20200923121347.25365-2-suravee.suthikulpanit@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 06ce8a62
...@@ -595,7 +595,8 @@ struct amd_iommu { ...@@ -595,7 +595,8 @@ struct amd_iommu {
#endif #endif
u32 flags; u32 flags;
volatile u64 __aligned(8) cmd_sem; volatile u64 *cmd_sem;
u64 cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS #ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */ /* DebugFS Info */
......
...@@ -813,6 +813,19 @@ static int iommu_init_ga(struct amd_iommu *iommu) ...@@ -813,6 +813,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret; return ret;
} }
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
return iommu->cmd_sem ? 0 : -ENOMEM;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
free_page((unsigned long)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu) static void iommu_enable_xt(struct amd_iommu *iommu)
{ {
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
...@@ -1395,6 +1408,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, ...@@ -1395,6 +1408,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
static void __init free_iommu_one(struct amd_iommu *iommu) static void __init free_iommu_one(struct amd_iommu *iommu)
{ {
free_cwwb_sem(iommu);
free_command_buffer(iommu); free_command_buffer(iommu);
free_event_buffer(iommu); free_event_buffer(iommu);
free_ppr_log(iommu); free_ppr_log(iommu);
...@@ -1481,6 +1495,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1481,6 +1495,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
int ret; int ret;
raw_spin_lock_init(&iommu->lock); raw_spin_lock_init(&iommu->lock);
iommu->cmd_sem_val = 0;
/* Add IOMMU to internal data structures */ /* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list); list_add_tail(&iommu->list, &amd_iommu_list);
...@@ -1541,6 +1556,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1541,6 +1556,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base) if (!iommu->mmio_base)
return -ENOMEM; return -ENOMEM;
if (alloc_cwwb_sem(iommu))
return -ENOMEM;
if (alloc_command_buffer(iommu)) if (alloc_command_buffer(iommu))
return -ENOMEM; return -ENOMEM;
......
...@@ -792,11 +792,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) ...@@ -792,11 +792,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
* *
****************************************************************************/ ****************************************************************************/
static int wait_on_sem(volatile u64 *sem) static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{ {
int i = 0; int i = 0;
while (*sem == 0 && i < LOOP_TIMEOUT) { while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
udelay(1); udelay(1);
i += 1; i += 1;
} }
...@@ -827,16 +827,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu, ...@@ -827,16 +827,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
} }
static void build_completion_wait(struct iommu_cmd *cmd, u64 address) static void build_completion_wait(struct iommu_cmd *cmd,
struct amd_iommu *iommu,
u64 data)
{ {
u64 paddr = iommu_virt_to_phys((void *)address); u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
WARN_ON(address & 0x7ULL);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr); cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = 1; cmd->data[2] = data;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
} }
...@@ -1045,22 +1045,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu) ...@@ -1045,22 +1045,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
struct iommu_cmd cmd; struct iommu_cmd cmd;
unsigned long flags; unsigned long flags;
int ret; int ret;
u64 data;
if (!iommu->need_sync) if (!iommu->need_sync)
return 0; return 0;
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
raw_spin_lock_irqsave(&iommu->lock, flags); raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0; data = ++iommu->cmd_sem_val;
build_completion_wait(&cmd, iommu, data);
ret = __iommu_queue_command_sync(iommu, &cmd, false); ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = wait_on_sem(&iommu->cmd_sem); ret = wait_on_sem(iommu, data);
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&iommu->lock, flags); raw_spin_unlock_irqrestore(&iommu->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment