Commit 4c1bad6a authored by Joerg Roedel's avatar Joerg Roedel

iommu/vt-d: Convert IR set_affinity function to remap_ops

The function to set interrupt affinity with interrupt
remapping enabled is Intel specific too. So move it to the
irq_remap_ops too.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Acked-by: default avatarYinghai Lu <yinghai@kernel.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 0c3f173a
...@@ -40,6 +40,9 @@ extern int intr_setup_ioapic_entry(int irq, ...@@ -40,6 +40,9 @@ extern int intr_setup_ioapic_entry(int irq,
struct IO_APIC_route_entry *entry, struct IO_APIC_route_entry *entry,
unsigned int destination, int vector, unsigned int destination, int vector,
struct io_apic_irq_attr *attr); struct io_apic_irq_attr *attr);
extern int intr_set_affinity(struct irq_data *data,
const struct cpumask *mask,
bool force);
#else /* CONFIG_IRQ_REMAP */ #else /* CONFIG_IRQ_REMAP */
...@@ -59,6 +62,12 @@ static inline int intr_setup_ioapic_entry(int irq, ...@@ -59,6 +62,12 @@ static inline int intr_setup_ioapic_entry(int irq,
{ {
return -ENODEV; return -ENODEV;
} }
static inline int intr_set_affinity(struct irq_data *data,
const struct cpumask *mask,
bool force)
{
return 0;
}
#endif /* CONFIG_IRQ_REMAP */ #endif /* CONFIG_IRQ_REMAP */
#endif /* __X86_INTR_REMAPPING_H */ #endif /* __X86_INTR_REMAPPING_H */
...@@ -2327,71 +2327,6 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, ...@@ -2327,71 +2327,6 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
return ret; return ret;
} }
#ifdef CONFIG_IRQ_REMAP
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
* For both level and edge triggered, irq migration is a simple atomic
* update(of vector and cpu destination) of IRTE and flush the hardware cache.
*
* For level triggered, we eliminate the io-apic RTE modification (with the
* updated vector information), by using a virtual vector (io-apic pin number).
* Real vector that is used for interrupting cpu will be coming from
* the interrupt-remapping table entry.
*
* As the migration is a simple atomic update of IRTE, the same mechanism
* is used to migrate MSI irq's in the presence of interrupt-remapping.
*/
static int
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
if (get_irte(irq, &irte))
return -EBUSY;
if (assign_irq_vector(irq, cfg, mask))
return -EBUSY;
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
/*
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
modify_irte(irq, &irte);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
cpumask_copy(data->affinity, mask);
return 0;
}
#else
static inline int
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
return 0;
}
#endif
asmlinkage void smp_irq_move_cleanup_interrupt(void) asmlinkage void smp_irq_move_cleanup_interrupt(void)
{ {
unsigned vector, me; unsigned vector, me;
...@@ -2636,7 +2571,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip) ...@@ -2636,7 +2571,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
chip->irq_eoi = ir_ack_apic_level; chip->irq_eoi = ir_ack_apic_level;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
chip->irq_set_affinity = ir_ioapic_set_affinity; chip->irq_set_affinity = intr_set_affinity;
#endif #endif
} }
#endif /* CONFIG_IRQ_REMAP */ #endif /* CONFIG_IRQ_REMAP */
...@@ -3826,7 +3761,7 @@ void __init setup_ioapic_dest(void) ...@@ -3826,7 +3761,7 @@ void __init setup_ioapic_dest(void)
mask = apic->target_cpus(); mask = apic->target_cpus();
if (intr_remapping_enabled) if (intr_remapping_enabled)
ir_ioapic_set_affinity(idata, mask, false); intr_set_affinity(idata, mask, false);
else else
ioapic_set_affinity(idata, mask, false); ioapic_set_affinity(idata, mask, false);
} }
......
...@@ -901,6 +901,59 @@ static int intel_setup_ioapic_entry(int irq, ...@@ -901,6 +901,59 @@ static int intel_setup_ioapic_entry(int irq,
return 0; return 0;
} }
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
* For both level and edge triggered, irq migration is a simple atomic
* update(of vector and cpu destination) of IRTE and flush the hardware cache.
*
* For level triggered, we eliminate the io-apic RTE modification (with the
* updated vector information), by using a virtual vector (io-apic pin number).
* Real vector that is used for interrupting cpu will be coming from
* the interrupt-remapping table entry.
*
* As the migration is a simple atomic update of IRTE, the same mechanism
* is used to migrate MSI irq's in the presence of interrupt-remapping.
*/
static int
intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
if (get_irte(irq, &irte))
return -EBUSY;
if (assign_irq_vector(irq, cfg, mask))
return -EBUSY;
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
/*
* Atomically updates the IRTE with the new destination, vector
* and flushes the interrupt entry cache.
*/
modify_irte(irq, &irte);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
* vector allocation.
*/
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
cpumask_copy(data->affinity, mask);
return 0;
}
struct irq_remap_ops intel_irq_remap_ops = { struct irq_remap_ops intel_irq_remap_ops = {
.supported = intel_intr_remapping_supported, .supported = intel_intr_remapping_supported,
...@@ -910,4 +963,5 @@ struct irq_remap_ops intel_irq_remap_ops = { ...@@ -910,4 +963,5 @@ struct irq_remap_ops intel_irq_remap_ops = {
.hardware_reenable = reenable_intr_remapping, .hardware_reenable = reenable_intr_remapping,
.enable_faulting = enable_drhd_fault_handling, .enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry, .setup_ioapic_entry = intel_setup_ioapic_entry,
.set_affinity = intel_ioapic_set_affinity,
}; };
...@@ -110,3 +110,12 @@ int intr_setup_ioapic_entry(int irq, ...@@ -110,3 +110,12 @@ int intr_setup_ioapic_entry(int irq,
return remap_ops->setup_ioapic_entry(irq, entry, destination, return remap_ops->setup_ioapic_entry(irq, entry, destination,
vector, attr); vector, attr);
} }
int intr_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
if (!remap_ops || !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
}
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
struct IO_APIC_route_entry; struct IO_APIC_route_entry;
struct io_apic_irq_attr; struct io_apic_irq_attr;
struct irq_data;
struct cpumask;
extern int disable_intremap; extern int disable_intremap;
extern int disable_sourceid_checking; extern int disable_sourceid_checking;
...@@ -54,6 +56,10 @@ struct irq_remap_ops { ...@@ -54,6 +56,10 @@ struct irq_remap_ops {
int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *, int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *,
unsigned int, int, unsigned int, int,
struct io_apic_irq_attr *); struct io_apic_irq_attr *);
/* Set the CPU affinity of a remapped interrupt */
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
bool force);
}; };
extern struct irq_remap_ops intel_irq_remap_ops; extern struct irq_remap_ops intel_irq_remap_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment