Commit 4174879b authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'x86/amd-avic' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu into HEAD

Merge IOMMU bits for virtualization of interrupt injection into
virtual machines.
parents 8e3562f6 d98de49a
...@@ -460,6 +460,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -460,6 +460,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
driver will print ACPI tables for AMD IOMMU during driver will print ACPI tables for AMD IOMMU during
IOMMU initialization. IOMMU initialization.
amd_iommu_intr= [HW,X86-64]
Specifies one of the following AMD IOMMU interrupt
remapping modes:
legacy - Use legacy interrupt remapping mode.
vapic - Use virtual APIC mode, which allows IOMMU
to inject interrupts directly into guest.
This mode requires kvm-amd.avic=1.
(Default when IOMMU HW support is present.)
amijoy.map= [HW,JOY] Amiga joystick support amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b> Format: <a>,<b>
......
...@@ -137,6 +137,7 @@ struct iommu_dev_data { ...@@ -137,6 +137,7 @@ struct iommu_dev_data {
bool pri_tlp; /* PASID TLB required for bool pri_tlp; /* PASID TLB required for
PPR completions */ PPR completions */
u32 errata; /* Bitmap for errata to apply */ u32 errata; /* Bitmap for errata to apply */
bool use_vapic; /* Enable device to use vapic mode */
}; };
/* /*
...@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) ...@@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
} }
} }
#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);
int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
static void iommu_poll_ga_log(struct amd_iommu *iommu)
{
u32 head, tail, cnt = 0;
if (iommu->ga_log == NULL)
return;
head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
while (head != tail) {
volatile u64 *raw;
u64 log_entry;
raw = (u64 *)(iommu->ga_log + head);
cnt++;
/* Avoid memcpy function-call overhead */
log_entry = *raw;
/* Update head pointer of hardware ring-buffer */
head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
/* Handle GA entry */
switch (GA_REQ_TYPE(log_entry)) {
case GA_GUEST_NR:
if (!iommu_ga_log_notifier)
break;
pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
__func__, GA_DEVID(log_entry),
GA_TAG(log_entry));
if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
pr_err("AMD-Vi: GA log notifier failed.\n");
break;
default:
break;
}
}
}
#endif /* CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
(MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
irqreturn_t amd_iommu_int_thread(int irq, void *data) irqreturn_t amd_iommu_int_thread(int irq, void *data)
{ {
struct amd_iommu *iommu = (struct amd_iommu *) data; struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) { while (status & AMD_IOMMU_INT_MASK) {
/* Enable EVT and PPR interrupts again */ /* Enable EVT and PPR and GA interrupts again */
writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK), writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET); iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & MMIO_STATUS_EVT_INT_MASK) { if (status & MMIO_STATUS_EVT_INT_MASK) {
...@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) ...@@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
iommu_poll_ppr_log(iommu); iommu_poll_ppr_log(iommu);
} }
#ifdef CONFIG_IRQ_REMAP
if (status & MMIO_STATUS_GALOG_INT_MASK) {
pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
iommu_poll_ga_log(iommu);
}
#endif
/* /*
* Hardware bug: ERBT1312 * Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1 * When re-enabling interrupt (by writing 1
...@@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, ...@@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
if (!iommu) if (!iommu)
return; return;
#ifdef CONFIG_IRQ_REMAP
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
(dom->type == IOMMU_DOMAIN_UNMANAGED))
dev_data->use_vapic = 0;
#endif
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
} }
...@@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, ...@@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
ret = attach_device(dev, domain); ret = attach_device(dev, domain);
#ifdef CONFIG_IRQ_REMAP
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
if (dom->type == IOMMU_DOMAIN_UNMANAGED)
dev_data->use_vapic = 1;
else
dev_data->use_vapic = 0;
}
#endif
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
return ret; return ret;
...@@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info); ...@@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info);
* *
*****************************************************************************/ *****************************************************************************/
union irte {
u32 val;
struct {
u32 valid : 1,
no_fault : 1,
int_type : 3,
rq_eoi : 1,
dm : 1,
rsvd_1 : 1,
destination : 8,
vector : 8,
rsvd_2 : 8;
} fields;
};
struct irq_2_irte {
u16 devid; /* Device ID for IRTE table */
u16 index; /* Index into IRTE table*/
};
struct amd_ir_data {
struct irq_2_irte irq_2_irte;
union irte irte_entry;
union {
struct msi_msg msi_entry;
};
};
static struct irq_chip amd_ir_chip; static struct irq_chip amd_ir_chip;
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
...@@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) ...@@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
amd_iommu_dev_table[devid].data[2] = dte; amd_iommu_dev_table[devid].data[2] = dte;
} }
#define IRTE_ALLOCATED (~1U)
static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
{ {
struct irq_remap_table *table = NULL; struct irq_remap_table *table = NULL;
...@@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) ...@@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
goto out; goto out;
} }
memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32)); if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
memset(table->table, 0,
MAX_IRQS_PER_TABLE * sizeof(u32));
else
memset(table->table, 0,
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
if (ioapic) { if (ioapic) {
int i; int i;
for (i = 0; i < 32; ++i) for (i = 0; i < 32; ++i)
table->table[i] = IRTE_ALLOCATED; iommu->irte_ops->set_allocated(table, i);
} }
irq_lookup_table[devid] = table; irq_lookup_table[devid] = table;
...@@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count) ...@@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count)
struct irq_remap_table *table; struct irq_remap_table *table;
unsigned long flags; unsigned long flags;
int index, c; int index, c;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
return -ENODEV;
table = get_irq_table(devid, false); table = get_irq_table(devid, false);
if (!table) if (!table)
...@@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count) ...@@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count)
for (c = 0, index = table->min_index; for (c = 0, index = table->min_index;
index < MAX_IRQS_PER_TABLE; index < MAX_IRQS_PER_TABLE;
++index) { ++index) {
if (table->table[index] == 0) if (!iommu->irte_ops->is_allocated(table, index))
c += 1; c += 1;
else else
c = 0; c = 0;
if (c == count) { if (c == count) {
for (; c != 0; --c) for (; c != 0; --c)
table->table[index - c + 1] = IRTE_ALLOCATED; iommu->irte_ops->set_allocated(table, index - c + 1);
index -= count - 1; index -= count - 1;
goto out; goto out;
...@@ -3672,7 +3734,42 @@ static int alloc_irq_index(u16 devid, int count) ...@@ -3672,7 +3734,42 @@ static int alloc_irq_index(u16 devid, int count)
return index; return index;
} }
static int modify_irte(u16 devid, int index, union irte irte) static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
struct amd_ir_data *data)
{
struct irq_remap_table *table;
struct amd_iommu *iommu;
unsigned long flags;
struct irte_ga *entry;
iommu = amd_iommu_rlookup_table[devid];
if (iommu == NULL)
return -EINVAL;
table = get_irq_table(devid, false);
if (!table)
return -ENOMEM;
spin_lock_irqsave(&table->lock, flags);
entry = (struct irte_ga *)table->table;
entry = &entry[index];
entry->lo.fields_remap.valid = 0;
entry->hi.val = irte->hi.val;
entry->lo.val = irte->lo.val;
entry->lo.fields_remap.valid = 1;
if (data)
data->ref = entry;
spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
return 0;
}
static int modify_irte(u16 devid, int index, union irte *irte)
{ {
struct irq_remap_table *table; struct irq_remap_table *table;
struct amd_iommu *iommu; struct amd_iommu *iommu;
...@@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte) ...@@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&table->lock, flags); spin_lock_irqsave(&table->lock, flags);
table->table[index] = irte.val; table->table[index] = irte->val;
spin_unlock_irqrestore(&table->lock, flags); spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid); iommu_flush_irt(iommu, devid);
...@@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index) ...@@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index)
return; return;
spin_lock_irqsave(&table->lock, flags); spin_lock_irqsave(&table->lock, flags);
table->table[index] = 0; iommu->irte_ops->clear_allocated(table, index);
spin_unlock_irqrestore(&table->lock, flags); spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid); iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
} }
static void irte_prepare(void *entry,
u32 delivery_mode, u32 dest_mode,
u8 vector, u32 dest_apicid, int devid)
{
union irte *irte = (union irte *) entry;
irte->val = 0;
irte->fields.vector = vector;
irte->fields.int_type = delivery_mode;
irte->fields.destination = dest_apicid;
irte->fields.dm = dest_mode;
irte->fields.valid = 1;
}
static void irte_ga_prepare(void *entry,
u32 delivery_mode, u32 dest_mode,
u8 vector, u32 dest_apicid, int devid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
struct iommu_dev_data *dev_data = search_dev_data(devid);
irte->lo.val = 0;
irte->hi.val = 0;
irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
irte->lo.fields_remap.int_type = delivery_mode;
irte->lo.fields_remap.dm = dest_mode;
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
irte->lo.fields_remap.valid = 1;
}
static void irte_activate(void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 1;
modify_irte(devid, index, irte);
}
static void irte_ga_activate(void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 1;
modify_irte_ga(devid, index, irte, NULL);
}
static void irte_deactivate(void *entry, u16 devid, u16 index)
{
union irte *irte = (union irte *) entry;
irte->fields.valid = 0;
modify_irte(devid, index, irte);
}
static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
{
struct irte_ga *irte = (struct irte_ga *) entry;
irte->lo.fields_remap.valid = 0;
modify_irte_ga(devid, index, irte, NULL);
}
static void irte_set_affinity(void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
union irte *irte = (union irte *) entry;
irte->fields.vector = vector;
irte->fields.destination = dest_apicid;
modify_irte(devid, index, irte);
}
static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
struct iommu_dev_data *dev_data = search_dev_data(devid);
if (!dev_data || !dev_data->use_vapic) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
irte->lo.fields_remap.guest_mode = 0;
modify_irte_ga(devid, index, irte, NULL);
}
}
#define IRTE_ALLOCATED (~1U)
static void irte_set_allocated(struct irq_remap_table *table, int index)
{
table->table[index] = IRTE_ALLOCATED;
}
static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
{
struct irte_ga *ptr = (struct irte_ga *)table->table;
struct irte_ga *irte = &ptr[index];
memset(&irte->lo.val, 0, sizeof(u64));
memset(&irte->hi.val, 0, sizeof(u64));
irte->hi.fields.vector = 0xff;
}
static bool irte_is_allocated(struct irq_remap_table *table, int index)
{
union irte *ptr = (union irte *)table->table;
union irte *irte = &ptr[index];
return irte->val != 0;
}
static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
{
struct irte_ga *ptr = (struct irte_ga *)table->table;
struct irte_ga *irte = &ptr[index];
return irte->hi.fields.vector != 0;
}
static void irte_clear_allocated(struct irq_remap_table *table, int index)
{
table->table[index] = 0;
}
static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
{
struct irte_ga *ptr = (struct irte_ga *)table->table;
struct irte_ga *irte = &ptr[index];
memset(&irte->lo.val, 0, sizeof(u64));
memset(&irte->hi.val, 0, sizeof(u64));
}
static int get_devid(struct irq_alloc_info *info) static int get_devid(struct irq_alloc_info *info)
{ {
int devid = -1; int devid = -1;
...@@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, ...@@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
{ {
struct irq_2_irte *irte_info = &data->irq_2_irte; struct irq_2_irte *irte_info = &data->irq_2_irte;
struct msi_msg *msg = &data->msi_entry; struct msi_msg *msg = &data->msi_entry;
union irte *irte = &data->irte_entry;
struct IO_APIC_route_entry *entry; struct IO_APIC_route_entry *entry;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
return;
data->irq_2_irte.devid = devid; data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle; data->irq_2_irte.index = index + sub_handle;
iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
/* Setup IRTE for IOMMU */ apic->irq_dest_mode, irq_cfg->vector,
irte->val = 0; irq_cfg->dest_apicid, devid);
irte->fields.vector = irq_cfg->vector;
irte->fields.int_type = apic->irq_delivery_mode;
irte->fields.destination = irq_cfg->dest_apicid;
irte->fields.dm = apic->irq_dest_mode;
irte->fields.valid = 1;
switch (info->type) { switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC: case X86_IRQ_ALLOC_TYPE_IOAPIC:
...@@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, ...@@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
} }
} }
struct amd_irte_ops irte_32_ops = {
.prepare = irte_prepare,
.activate = irte_activate,
.deactivate = irte_deactivate,
.set_affinity = irte_set_affinity,
.set_allocated = irte_set_allocated,
.is_allocated = irte_is_allocated,
.clear_allocated = irte_clear_allocated,
};
struct amd_irte_ops irte_128_ops = {
.prepare = irte_ga_prepare,
.activate = irte_ga_activate,
.deactivate = irte_ga_deactivate,
.set_affinity = irte_ga_set_affinity,
.set_allocated = irte_ga_set_allocated,
.is_allocated = irte_ga_is_allocated,
.clear_allocated = irte_ga_clear_allocated,
};
static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg) unsigned int nr_irqs, void *arg)
{ {
struct irq_alloc_info *info = arg; struct irq_alloc_info *info = arg;
struct irq_data *irq_data; struct irq_data *irq_data;
struct amd_ir_data *data; struct amd_ir_data *data = NULL;
struct irq_cfg *cfg; struct irq_cfg *cfg;
int i, ret, devid; int i, ret, devid;
int index = -1; int index = -1;
...@@ -3902,6 +4150,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -3902,6 +4150,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!data) if (!data)
goto out_free_data; goto out_free_data;
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
else
data->entry = kzalloc(sizeof(struct irte_ga),
GFP_KERNEL);
if (!data->entry) {
kfree(data);
goto out_free_data;
}
irq_data->hwirq = (devid << 16) + i; irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data; irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip; irq_data->chip = &amd_ir_chip;
...@@ -3938,6 +4196,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, ...@@ -3938,6 +4196,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
data = irq_data->chip_data; data = irq_data->chip_data;
irte_info = &data->irq_2_irte; irte_info = &data->irq_2_irte;
free_irte(irte_info->devid, irte_info->index); free_irte(irte_info->devid, irte_info->index);
kfree(data->entry);
kfree(data); kfree(data);
} }
} }
...@@ -3949,8 +4208,11 @@ static void irq_remapping_activate(struct irq_domain *domain, ...@@ -3949,8 +4208,11 @@ static void irq_remapping_activate(struct irq_domain *domain,
{ {
struct amd_ir_data *data = irq_data->chip_data; struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte; struct irq_2_irte *irte_info = &data->irq_2_irte;
struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
modify_irte(irte_info->devid, irte_info->index, data->irte_entry); if (iommu)
iommu->irte_ops->activate(data->entry, irte_info->devid,
irte_info->index);
} }
static void irq_remapping_deactivate(struct irq_domain *domain, static void irq_remapping_deactivate(struct irq_domain *domain,
...@@ -3958,10 +4220,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain, ...@@ -3958,10 +4220,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain,
{ {
struct amd_ir_data *data = irq_data->chip_data; struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte; struct irq_2_irte *irte_info = &data->irq_2_irte;
union irte entry; struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
entry.val = 0; if (iommu)
modify_irte(irte_info->devid, irte_info->index, data->irte_entry); iommu->irte_ops->deactivate(data->entry, irte_info->devid,
irte_info->index);
} }
static struct irq_domain_ops amd_ir_domain_ops = { static struct irq_domain_ops amd_ir_domain_ops = {
...@@ -3971,6 +4234,70 @@ static struct irq_domain_ops amd_ir_domain_ops = { ...@@ -3971,6 +4234,70 @@ static struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate, .deactivate = irq_remapping_deactivate,
}; };
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
/* Note:
* This device has never been set up for guest mode.
* we should not modify the IRTE
*/
if (!dev_data || !dev_data->use_vapic)
return 0;
pi_data->ir_data = ir_data;
/* Note:
* SVM tries to set up for VAPIC mode, but we are in
* legacy mode. So, we force legacy mode instead.
*/
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
__func__);
pi_data->is_guest_mode = false;
}
iommu = amd_iommu_rlookup_table[irte_info->devid];
if (iommu == NULL)
return -EINVAL;
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
/* Setting */
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
irte->hi.fields.vector = vcpu_pi_info->vector;
irte->lo.fields_vapic.guest_mode = 1;
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
ir_data->cached_ga_tag = pi_data->ga_tag;
} else {
/* Un-Setting */
struct irq_cfg *cfg = irqd_cfg(data);
irte->hi.val = 0;
irte->lo.val = 0;
irte->hi.fields.vector = cfg->vector;
irte->lo.fields_remap.guest_mode = 0;
irte->lo.fields_remap.destination = cfg->dest_apicid;
irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
irte->lo.fields_remap.dm = apic->irq_dest_mode;
/*
* This communicates the ga_tag back to the caller
* so that it can do all the necessary clean up.
*/
ir_data->cached_ga_tag = 0;
}
return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
}
static int amd_ir_set_affinity(struct irq_data *data, static int amd_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force) const struct cpumask *mask, bool force)
{ {
...@@ -3978,8 +4305,12 @@ static int amd_ir_set_affinity(struct irq_data *data, ...@@ -3978,8 +4305,12 @@ static int amd_ir_set_affinity(struct irq_data *data,
struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct irq_cfg *cfg = irqd_cfg(data); struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data; struct irq_data *parent = data->parent_data;
struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
int ret; int ret;
if (!iommu)
return -ENODEV;
ret = parent->chip->irq_set_affinity(parent, mask, force); ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret; return ret;
...@@ -3988,9 +4319,8 @@ static int amd_ir_set_affinity(struct irq_data *data, ...@@ -3988,9 +4319,8 @@ static int amd_ir_set_affinity(struct irq_data *data,
* Atomically updates the IRTE with the new destination, vector * Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache. * and flushes the interrupt entry cache.
*/ */
ir_data->irte_entry.fields.vector = cfg->vector; iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
ir_data->irte_entry.fields.destination = cfg->dest_apicid; irte_info->index, cfg->vector, cfg->dest_apicid);
modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
/* /*
* After this point, all the interrupts will start arriving * After this point, all the interrupts will start arriving
...@@ -4012,6 +4342,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) ...@@ -4012,6 +4342,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
static struct irq_chip amd_ir_chip = { static struct irq_chip amd_ir_chip = {
.irq_ack = ir_ack_apic_edge, .irq_ack = ir_ack_apic_edge,
.irq_set_affinity = amd_ir_set_affinity, .irq_set_affinity = amd_ir_set_affinity,
.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
.irq_compose_msi_msg = ir_compose_msi_msg, .irq_compose_msi_msg = ir_compose_msi_msg,
}; };
...@@ -4026,4 +4357,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) ...@@ -4026,4 +4357,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
return 0; return 0;
} }
int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
unsigned long flags;
struct amd_iommu *iommu;
struct irq_remap_table *irt;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!ref || !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
return -ENODEV;
irt = get_irq_table(devid, false);
if (!irt)
return -ENODEV;
spin_lock_irqsave(&irt->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
ref->lo.fields_vapic.destination = cpu;
ref->lo.fields_vapic.is_run = is_run;
barrier();
}
spin_unlock_irqrestore(&irt->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
return 0;
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif #endif
...@@ -84,6 +84,7 @@ ...@@ -84,6 +84,7 @@
#define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_LINT1 0x80
#define ACPI_DEVFLAG_ATSDIS 0x10000000 #define ACPI_DEVFLAG_ATSDIS 0x10000000
#define LOOP_TIMEOUT 100000
/* /*
* ACPI table definitions * ACPI table definitions
* *
...@@ -145,6 +146,8 @@ struct ivmd_header { ...@@ -145,6 +146,8 @@ struct ivmd_header {
bool amd_iommu_dump; bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly; bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static bool amd_iommu_detected; static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled; static bool __initdata amd_iommu_disabled;
static int amd_iommu_target_ivhd_type; static int amd_iommu_target_ivhd_type;
...@@ -386,6 +389,10 @@ static void iommu_disable(struct amd_iommu *iommu) ...@@ -386,6 +389,10 @@ static void iommu_disable(struct amd_iommu *iommu)
iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
/* Disable IOMMU GA_LOG */
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
/* Disable IOMMU hardware itself */ /* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN); iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
} }
...@@ -671,6 +678,99 @@ static void __init free_ppr_log(struct amd_iommu *iommu) ...@@ -671,6 +678,99 @@ static void __init free_ppr_log(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
} }
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
if (iommu->ga_log)
free_pages((unsigned long)iommu->ga_log,
get_order(GA_LOG_SIZE));
if (iommu->ga_log_tail)
free_pages((unsigned long)iommu->ga_log_tail,
get_order(8));
#endif
}
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
u32 status, i;
if (!iommu->ga_log)
return -EINVAL;
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
/* Check if already running */
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
return 0;
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
for (i = 0; i < LOOP_TIMEOUT; ++i) {
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
}
if (i >= LOOP_TIMEOUT)
return -EINVAL;
#endif /* CONFIG_IRQ_REMAP */
return 0;
}
#ifdef CONFIG_IRQ_REMAP
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
u64 entry;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(GA_LOG_SIZE));
if (!iommu->ga_log)
goto err_out;
iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(8));
if (!iommu->ga_log_tail)
goto err_out;
entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
&entry, sizeof(entry));
writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
return 0;
err_out:
free_ga_log(iommu);
return -EINVAL;
}
#endif /* CONFIG_IRQ_REMAP */
static int iommu_init_ga(struct amd_iommu *iommu)
{
int ret = 0;
#ifdef CONFIG_IRQ_REMAP
/* Note: We have already checked GASup from IVRS table.
* Now, we need to make sure that GAMSup is set.
*/
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
!iommu_feature(iommu, FEATURE_GAM_VAPIC))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
ret = iommu_init_ga_log(iommu);
#endif /* CONFIG_IRQ_REMAP */
return ret;
}
static void iommu_enable_gt(struct amd_iommu *iommu) static void iommu_enable_gt(struct amd_iommu *iommu)
{ {
if (!iommu_feature(iommu, FEATURE_GT)) if (!iommu_feature(iommu, FEATURE_GT))
...@@ -1144,6 +1244,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) ...@@ -1144,6 +1244,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
free_command_buffer(iommu); free_command_buffer(iommu);
free_event_buffer(iommu); free_event_buffer(iommu);
free_ppr_log(iommu); free_ppr_log(iommu);
free_ga_log(iommu);
iommu_unmap_mmio_space(iommu); iommu_unmap_mmio_space(iommu);
} }
...@@ -1258,6 +1359,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1258,6 +1359,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET; iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break; break;
case 0x11: case 0x11:
case 0x40: case 0x40:
...@@ -1265,6 +1368,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) ...@@ -1265,6 +1368,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET; iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1432,6 +1537,7 @@ static int iommu_init_pci(struct amd_iommu *iommu) ...@@ -1432,6 +1537,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
{ {
int cap_ptr = iommu->cap_ptr; int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high; u32 range, misc, low, high;
int ret;
iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff); iommu->devid & 0xff);
...@@ -1488,6 +1594,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) ...@@ -1488,6 +1594,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
return -ENOMEM; return -ENOMEM;
ret = iommu_init_ga(iommu);
if (ret)
return ret;
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true; amd_iommu_np_cache = true;
...@@ -1545,16 +1655,24 @@ static void print_iommu_info(void) ...@@ -1545,16 +1655,24 @@ static void print_iommu_info(void)
dev_name(&iommu->dev->dev), iommu->cap_ptr); dev_name(&iommu->dev->dev), iommu->cap_ptr);
if (iommu->cap & (1 << IOMMU_CAP_EFR)) { if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pr_info("AMD-Vi: Extended features: "); pr_info("AMD-Vi: Extended features (%#llx):\n",
iommu->features);
for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i))) if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]); pr_cont(" %s", feat_str[i]);
} }
if (iommu->features & FEATURE_GAM_VAPIC)
pr_cont(" GA_vAPIC");
pr_cont("\n"); pr_cont("\n");
} }
} }
if (irq_remapping_enabled) if (irq_remapping_enabled) {
pr_info("AMD-Vi: Interrupt remapping enabled\n"); pr_info("AMD-Vi: Interrupt remapping enabled\n");
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
pr_info("AMD-Vi: virtual APIC enabled\n");
}
} }
static int __init amd_iommu_init_pci(void) static int __init amd_iommu_init_pci(void)
...@@ -1645,6 +1763,8 @@ static int iommu_init_msi(struct amd_iommu *iommu) ...@@ -1645,6 +1763,8 @@ static int iommu_init_msi(struct amd_iommu *iommu)
if (iommu->ppr_log != NULL) if (iommu->ppr_log != NULL)
iommu_feature_enable(iommu, CONTROL_PPFINT_EN); iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
iommu_ga_log_enable(iommu);
return 0; return 0;
} }
...@@ -1862,6 +1982,24 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) ...@@ -1862,6 +1982,24 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
iommu->stored_addr_lo | 1); iommu->stored_addr_lo | 1);
} }
static void iommu_enable_ga(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
switch (amd_iommu_guest_ir) {
case AMD_IOMMU_GUEST_IR_VAPIC:
iommu_feature_enable(iommu, CONTROL_GAM_EN);
/* Fall through */
case AMD_IOMMU_GUEST_IR_LEGACY_GA:
iommu_feature_enable(iommu, CONTROL_GA_EN);
iommu->irte_ops = &irte_128_ops;
break;
default:
iommu->irte_ops = &irte_32_ops;
break;
}
#endif
}
/* /*
* This function finally enables all IOMMUs found in the system after * This function finally enables all IOMMUs found in the system after
* they have been initialized * they have been initialized
...@@ -1877,9 +2015,15 @@ static void early_enable_iommus(void) ...@@ -1877,9 +2015,15 @@ static void early_enable_iommus(void)
iommu_enable_command_buffer(iommu); iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu); iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu); iommu_set_exclusion_range(iommu);
iommu_enable_ga(iommu);
iommu_enable(iommu); iommu_enable(iommu);
iommu_flush_all_caches(iommu); iommu_flush_all_caches(iommu);
} }
#ifdef CONFIG_IRQ_REMAP
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
#endif
} }
static void enable_iommus_v2(void) static void enable_iommus_v2(void)
...@@ -1905,6 +2049,11 @@ static void disable_iommus(void) ...@@ -1905,6 +2049,11 @@ static void disable_iommus(void)
for_each_iommu(iommu) for_each_iommu(iommu)
iommu_disable(iommu); iommu_disable(iommu);
#ifdef CONFIG_IRQ_REMAP
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
#endif
} }
/* /*
...@@ -2059,7 +2208,7 @@ static int __init early_amd_iommu_init(void) ...@@ -2059,7 +2208,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base; struct acpi_table_header *ivrs_base;
acpi_size ivrs_size; acpi_size ivrs_size;
acpi_status status; acpi_status status;
int i, ret = 0; int i, remap_cache_sz, ret = 0;
if (!amd_iommu_detected) if (!amd_iommu_detected)
return -ENODEV; return -ENODEV;
...@@ -2157,8 +2306,12 @@ static int __init early_amd_iommu_init(void) ...@@ -2157,8 +2306,12 @@ static int __init early_amd_iommu_init(void)
* remapping tables. * remapping tables.
*/ */
ret = -ENOMEM; ret = -ENOMEM;
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
else
remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
MAX_IRQS_PER_TABLE * sizeof(u32), remap_cache_sz,
IRQ_TABLE_ALIGNMENT, IRQ_TABLE_ALIGNMENT,
0, NULL); 0, NULL);
if (!amd_iommu_irq_cache) if (!amd_iommu_irq_cache)
...@@ -2413,6 +2566,21 @@ static int __init parse_amd_iommu_dump(char *str) ...@@ -2413,6 +2566,21 @@ static int __init parse_amd_iommu_dump(char *str)
return 1; return 1;
} }
static int __init parse_amd_iommu_intr(char *str)
{
for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
}
if (strncmp(str, "vapic", 5) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
break;
}
}
return 1;
}
static int __init parse_amd_iommu_options(char *str) static int __init parse_amd_iommu_options(char *str)
{ {
for (; *str; ++str) { for (; *str; ++str) {
...@@ -2521,6 +2689,7 @@ static int __init parse_ivrs_acpihid(char *str) ...@@ -2521,6 +2689,7 @@ static int __init parse_ivrs_acpihid(char *str)
__setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu_dump", parse_amd_iommu_dump);
__setup("amd_iommu=", parse_amd_iommu_options); __setup("amd_iommu=", parse_amd_iommu_options);
__setup("amd_iommu_intr=", parse_amd_iommu_intr);
__setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_ioapic", parse_ivrs_ioapic);
__setup("ivrs_hpet", parse_ivrs_hpet); __setup("ivrs_hpet", parse_ivrs_hpet);
__setup("ivrs_acpihid", parse_ivrs_acpihid); __setup("ivrs_acpihid", parse_ivrs_acpihid);
......
...@@ -38,6 +38,7 @@ extern int amd_iommu_enable(void); ...@@ -38,6 +38,7 @@ extern int amd_iommu_enable(void);
extern void amd_iommu_disable(void); extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int); extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void); extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;
/* IOMMUv2 specific functions */ /* IOMMUv2 specific functions */
struct iommu_domain; struct iommu_domain;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -69,6 +70,8 @@ ...@@ -69,6 +70,8 @@
#define MMIO_EXCL_LIMIT_OFFSET 0x0028 #define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030 #define MMIO_EXT_FEATURES 0x0030
#define MMIO_PPR_LOG_OFFSET 0x0038 #define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
#define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_HEAD_OFFSET 0x2010
...@@ -76,6 +79,8 @@ ...@@ -76,6 +79,8 @@
#define MMIO_STATUS_OFFSET 0x2020 #define MMIO_STATUS_OFFSET 0x2020
#define MMIO_PPR_HEAD_OFFSET 0x2030 #define MMIO_PPR_HEAD_OFFSET 0x2030
#define MMIO_PPR_TAIL_OFFSET 0x2038 #define MMIO_PPR_TAIL_OFFSET 0x2038
#define MMIO_GA_HEAD_OFFSET 0x2040
#define MMIO_GA_TAIL_OFFSET 0x2048
#define MMIO_CNTR_CONF_OFFSET 0x4000 #define MMIO_CNTR_CONF_OFFSET 0x4000
#define MMIO_CNTR_REG_OFFSET 0x40000 #define MMIO_CNTR_REG_OFFSET 0x40000
#define MMIO_REG_END_OFFSET 0x80000 #define MMIO_REG_END_OFFSET 0x80000
...@@ -92,6 +97,7 @@ ...@@ -92,6 +97,7 @@
#define FEATURE_GA (1ULL<<7) #define FEATURE_GA (1ULL<<7)
#define FEATURE_HE (1ULL<<8) #define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9) #define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_PASID_SHIFT 32 #define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
...@@ -110,6 +116,9 @@ ...@@ -110,6 +116,9 @@
#define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6) #define MMIO_STATUS_PPR_INT_MASK (1 << 6)
#define MMIO_STATUS_GALOG_RUN_MASK (1 << 8)
#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9)
#define MMIO_STATUS_GALOG_INT_MASK (1 << 10)
/* event logging constants */ /* event logging constants */
#define EVENT_ENTRY_SIZE 0x10 #define EVENT_ENTRY_SIZE 0x10
...@@ -146,6 +155,10 @@ ...@@ -146,6 +155,10 @@
#define CONTROL_PPFINT_EN 0x0eULL #define CONTROL_PPFINT_EN 0x0eULL
#define CONTROL_PPR_EN 0x0fULL #define CONTROL_PPR_EN 0x0fULL
#define CONTROL_GT_EN 0x10ULL #define CONTROL_GT_EN 0x10ULL
#define CONTROL_GA_EN 0x11ULL
#define CONTROL_GAM_EN 0x19ULL
#define CONTROL_GALOG_EN 0x1CULL
#define CONTROL_GAINT_EN 0x1DULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0 #define CTRL_INV_TO_NONE 0
...@@ -224,6 +237,19 @@ ...@@ -224,6 +237,19 @@
#define PPR_REQ_FAULT 0x01 #define PPR_REQ_FAULT 0x01
/* Constants for GA Log handling */
#define GA_LOG_ENTRIES 512
#define GA_LOG_SIZE_SHIFT 56
#define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT)
#define GA_ENTRY_SIZE 8
#define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES)
#define GA_TAG(x) (u32)(x & 0xffffffffULL)
#define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL)
#define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
#define GA_GUEST_NR 0x1
#define PAGE_MODE_NONE 0x00 #define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_2_LEVEL 0x02
...@@ -329,6 +355,12 @@ ...@@ -329,6 +355,12 @@
#define IOMMU_CAP_NPCACHE 26 #define IOMMU_CAP_NPCACHE 26
#define IOMMU_CAP_EFR 27 #define IOMMU_CAP_EFR 27
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
/* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_GASUP_SHIFT 7
#define MAX_DOMAIN_ID 65536 #define MAX_DOMAIN_ID 65536
/* Protection domain flags */ /* Protection domain flags */
...@@ -400,6 +432,7 @@ struct amd_iommu_fault { ...@@ -400,6 +432,7 @@ struct amd_iommu_fault {
struct iommu_domain; struct iommu_domain;
struct irq_domain; struct irq_domain;
struct amd_irte_ops;
/* /*
* This structure contains generic data for IOMMU protection domains * This structure contains generic data for IOMMU protection domains
...@@ -490,6 +523,12 @@ struct amd_iommu { ...@@ -490,6 +523,12 @@ struct amd_iommu {
/* Base of the PPR log, if present */ /* Base of the PPR log, if present */
u8 *ppr_log; u8 *ppr_log;
/* Base of the GA log, if present */
u8 *ga_log;
/* Tail of the GA log, if present */
u8 *ga_log_tail;
/* true if interrupts for this IOMMU are already enabled */ /* true if interrupts for this IOMMU are already enabled */
bool int_enabled; bool int_enabled;
...@@ -523,6 +562,8 @@ struct amd_iommu { ...@@ -523,6 +562,8 @@ struct amd_iommu {
#ifdef CONFIG_IRQ_REMAP #ifdef CONFIG_IRQ_REMAP
struct irq_domain *ir_domain; struct irq_domain *ir_domain;
struct irq_domain *msi_domain; struct irq_domain *msi_domain;
struct amd_irte_ops *irte_ops;
#endif #endif
}; };
...@@ -681,4 +722,112 @@ static inline int get_hpet_devid(int id) ...@@ -681,4 +722,112 @@ static inline int get_hpet_devid(int id)
return -EINVAL; return -EINVAL;
} }
enum amd_iommu_intr_mode_type {
AMD_IOMMU_GUEST_IR_LEGACY,
/* This mode is not visible to users. It is used when
* we cannot fully enable vAPIC and fallback to only support
* legacy interrupt remapping via 128-bit IRTE.
*/
AMD_IOMMU_GUEST_IR_LEGACY_GA,
AMD_IOMMU_GUEST_IR_VAPIC,
};
#define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \
x == AMD_IOMMU_GUEST_IR_LEGACY_GA)
#define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC)
union irte {
u32 val;
struct {
u32 valid : 1,
no_fault : 1,
int_type : 3,
rq_eoi : 1,
dm : 1,
rsvd_1 : 1,
destination : 8,
vector : 8,
rsvd_2 : 8;
} fields;
};
union irte_ga_lo {
u64 val;
/* For int remapping */
struct {
u64 valid : 1,
no_fault : 1,
/* ------ */
int_type : 3,
rq_eoi : 1,
dm : 1,
/* ------ */
guest_mode : 1,
destination : 8,
rsvd : 48;
} fields_remap;
/* For guest vAPIC */
struct {
u64 valid : 1,
no_fault : 1,
/* ------ */
ga_log_intr : 1,
rsvd1 : 3,
is_run : 1,
/* ------ */
guest_mode : 1,
destination : 8,
rsvd2 : 16,
ga_tag : 32;
} fields_vapic;
};
union irte_ga_hi {
u64 val;
struct {
u64 vector : 8,
rsvd_1 : 4,
ga_root_ptr : 40,
rsvd_2 : 12;
} fields;
};
struct irte_ga {
union irte_ga_lo lo;
union irte_ga_hi hi;
};
struct irq_2_irte {
u16 devid; /* Device ID for IRTE table */
u16 index; /* Index into IRTE table*/
};
struct amd_ir_data {
u32 cached_ga_tag;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
void *entry; /* Pointer to union irte or struct irte_ga */
void *ref; /* Pointer to the actual irte */
};
struct amd_irte_ops {
void (*prepare)(void *, u32, u32, u8, u32, int);
void (*activate)(void *, u16, u16);
void (*deactivate)(void *, u16, u16);
void (*set_affinity)(void *, u16, u16, u8, u32);
void *(*get)(struct irq_remap_table *, int);
void (*set_allocated)(struct irq_remap_table *, int);
bool (*is_allocated)(struct irq_remap_table *, int);
void (*clear_allocated)(struct irq_remap_table *, int);
};
#ifdef CONFIG_IRQ_REMAP
extern struct amd_irte_ops irte_32_ops;
extern struct amd_irte_ops irte_128_ops;
#endif
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
...@@ -22,6 +22,20 @@ ...@@ -22,6 +22,20 @@
#include <linux/types.h> #include <linux/types.h>
/*
* This is mainly used to communicate information back-and-forth
* between SVM and IOMMU for setting up and tearing down posted
* interrupt
*/
struct amd_iommu_pi_data {
u32 ga_tag;
u32 prev_ga_tag;
u64 base;
bool is_guest_mode;
struct vcpu_data *vcpu_data;
void *ir_data;
};
#ifdef CONFIG_AMD_IOMMU #ifdef CONFIG_AMD_IOMMU
struct task_struct; struct task_struct;
...@@ -168,11 +182,34 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid); ...@@ -168,11 +182,34 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb); amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
#else
static inline int amd_iommu_detect(void) { return -ENODEV; } static inline int amd_iommu_detect(void) { return -ENODEV; }
#endif #endif /* CONFIG_AMD_IOMMU */
#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP)
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
extern int
amd_iommu_update_ga(int cpu, bool is_run, void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
static inline int
amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
return 0;
}
static inline int
amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
return 0;
}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */ #endif /* _ASM_X86_AMD_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment