Commit 43af9872 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 apic updates from Thomas Gleixner:
 "This udpate contains:

   - rework the irq vector array to store a pointer to the irq
     descriptor instead of the irq number to avoid a lookup of the irq
     descriptor in the irq entry path

   - lguest interrupt handling cleanups

   - conversion of the local apic timer to the new clockevent callbacks

   - preparatory changes for the irq argument removal of interrupt flow
     handlers"

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/irq: Do not dereference irq descriptor before checking it
  tools/lguest: Clean up include dir
  tools/lguest: Fix redefinition of struct virtio_pci_cfg_cap
  x86/irq: Store irq descriptor in vector array
  genirq: Provide irq_desc_has_action
  x86/irq: Get rid of an indentation level
  x86/irq: Rename VECTOR_UNDEFINED to VECTOR_UNUSED
  x86/irq: Replace numeric constant
  x86/irq: Protect smp_cleanup_move
  x86/lguest: Do not setup unused irq vectors
  x86/lguest: Clean up lguest_setup_irq
  x86/apic: Drop local_irq_save/restore in timer callbacks
  x86/apic: Migrate apic timer to new set_state interface
  x86/irq: Use access helper irq_data_get_affinity_mask()
  x86/irq: Use accessor irq_data_get_irq_handler_data()
  x86/irq: Use accessor irq_data_get_node()
parents 17e6b00a a47d4576
...@@ -182,10 +182,10 @@ extern char irq_entries_start[]; ...@@ -182,10 +182,10 @@ extern char irq_entries_start[];
#define trace_irq_entries_start irq_entries_start #define trace_irq_entries_start irq_entries_start
#endif #endif
#define VECTOR_UNDEFINED (-1) #define VECTOR_UNUSED NULL
#define VECTOR_RETRIGGERED (-2) #define VECTOR_RETRIGGERED ((void *)~0UL)
typedef int vector_irq_t[NR_VECTORS]; typedef struct irq_desc* vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq); DECLARE_PER_CPU(vector_irq_t, vector_irq);
#endif /* !ASSEMBLY_ */ #endif /* !ASSEMBLY_ */
......
...@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); ...@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
extern void (*x86_platform_ipi_callback)(void); extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void); extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
struct irq_desc;
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern __visible unsigned int do_IRQ(struct pt_regs *regs);
......
...@@ -462,40 +462,40 @@ static int lapic_next_deadline(unsigned long delta, ...@@ -462,40 +462,40 @@ static int lapic_next_deadline(unsigned long delta,
return 0; return 0;
} }
/* static int lapic_timer_shutdown(struct clock_event_device *evt)
* Setup the lapic timer in periodic or oneshot mode
*/
static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{ {
unsigned long flags;
unsigned int v; unsigned int v;
/* Lapic used as dummy for broadcast ? */ /* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY) if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return; return 0;
local_irq_save(flags);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
__setup_APIC_LVTT(lapic_timer_frequency,
mode != CLOCK_EVT_MODE_PERIODIC, 1);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
v = apic_read(APIC_LVTT); v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v); apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0); apic_write(APIC_TMICT, 0);
break; return 0;
case CLOCK_EVT_MODE_RESUME: }
/* Nothing to do here */
break;
}
local_irq_restore(flags); static inline int
lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
{
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
__setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
return 0;
}
static int lapic_timer_set_periodic(struct clock_event_device *evt)
{
return lapic_timer_set_periodic_oneshot(evt, false);
}
static int lapic_timer_set_oneshot(struct clock_event_device *evt)
{
return lapic_timer_set_periodic_oneshot(evt, true);
} }
/* /*
...@@ -514,10 +514,13 @@ static void lapic_timer_broadcast(const struct cpumask *mask) ...@@ -514,10 +514,13 @@ static void lapic_timer_broadcast(const struct cpumask *mask)
*/ */
static struct clock_event_device lapic_clockevent = { static struct clock_event_device lapic_clockevent = {
.name = "lapic", .name = "lapic",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT .features = CLOCK_EVT_FEAT_PERIODIC |
| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY, CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
| CLOCK_EVT_FEAT_DUMMY,
.shift = 32, .shift = 32,
.set_mode = lapic_timer_setup, .set_state_shutdown = lapic_timer_shutdown,
.set_state_periodic = lapic_timer_set_periodic,
.set_state_oneshot = lapic_timer_set_oneshot,
.set_next_event = lapic_next_event, .set_next_event = lapic_next_event,
.broadcast = lapic_timer_broadcast, .broadcast = lapic_timer_broadcast,
.rating = 100, .rating = 100,
...@@ -778,7 +781,7 @@ static int __init calibrate_APIC_clock(void) ...@@ -778,7 +781,7 @@ static int __init calibrate_APIC_clock(void)
* Setup the apic timer manually * Setup the apic timer manually
*/ */
levt->event_handler = lapic_cal_handler; levt->event_handler = lapic_cal_handler;
lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt); lapic_timer_set_periodic(levt);
lapic_cal_loops = -1; lapic_cal_loops = -1;
/* Let the interrupts run */ /* Let the interrupts run */
...@@ -788,7 +791,8 @@ static int __init calibrate_APIC_clock(void) ...@@ -788,7 +791,8 @@ static int __init calibrate_APIC_clock(void)
cpu_relax(); cpu_relax();
/* Stop the lapic timer */ /* Stop the lapic timer */
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt); local_irq_disable();
lapic_timer_shutdown(levt);
/* Jiffies delta */ /* Jiffies delta */
deltaj = lapic_cal_j2 - lapic_cal_j1; deltaj = lapic_cal_j2 - lapic_cal_j1;
...@@ -799,7 +803,7 @@ static int __init calibrate_APIC_clock(void) ...@@ -799,7 +803,7 @@ static int __init calibrate_APIC_clock(void)
apic_printk(APIC_VERBOSE, "... jiffies result ok\n"); apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
else else
levt->features |= CLOCK_EVT_FEAT_DUMMY; levt->features |= CLOCK_EVT_FEAT_DUMMY;
} else }
local_irq_enable(); local_irq_enable();
if (levt->features & CLOCK_EVT_FEAT_DUMMY) { if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
...@@ -878,7 +882,7 @@ static void local_apic_timer_interrupt(void) ...@@ -878,7 +882,7 @@ static void local_apic_timer_interrupt(void)
if (!evt->event_handler) { if (!evt->event_handler) {
pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */ /* Switch it off */
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); lapic_timer_shutdown(evt);
return; return;
} }
......
...@@ -2541,7 +2541,7 @@ void __init setup_ioapic_dest(void) ...@@ -2541,7 +2541,7 @@ void __init setup_ioapic_dest(void)
* Honour affinities which have been set in early boot * Honour affinities which have been set in early boot
*/ */
if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata)) if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
mask = idata->affinity; mask = irq_data_get_affinity_mask(idata);
else else
mask = apic->target_cpus(); mask = apic->target_cpus();
......
...@@ -264,7 +264,7 @@ static inline int hpet_dev_id(struct irq_domain *domain) ...@@ -264,7 +264,7 @@ static inline int hpet_dev_id(struct irq_domain *domain)
static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{ {
hpet_msi_write(data->handler_data, msg); hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
} }
static struct irq_chip hpet_msi_controller = { static struct irq_chip hpet_msi_controller = {
......
...@@ -169,8 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -169,8 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
goto next; goto next;
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
if (per_cpu(vector_irq, new_cpu)[vector] > if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
VECTOR_UNDEFINED)
goto next; goto next;
} }
/* Found one! */ /* Found one! */
...@@ -182,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, ...@@ -182,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
cpumask_intersects(d->old_domain, cpu_online_mask); cpumask_intersects(d->old_domain, cpu_online_mask);
} }
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq; per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
d->cfg.vector = vector; d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask); cpumask_copy(d->domain, vector_cpumask);
err = 0; err = 0;
...@@ -224,15 +223,16 @@ static int assign_irq_vector_policy(int irq, int node, ...@@ -224,15 +223,16 @@ static int assign_irq_vector_policy(int irq, int node,
static void clear_irq_vector(int irq, struct apic_chip_data *data) static void clear_irq_vector(int irq, struct apic_chip_data *data)
{ {
int cpu, vector; struct irq_desc *desc;
unsigned long flags; unsigned long flags;
int cpu, vector;
raw_spin_lock_irqsave(&vector_lock, flags); raw_spin_lock_irqsave(&vector_lock, flags);
BUG_ON(!data->cfg.vector); BUG_ON(!data->cfg.vector);
vector = data->cfg.vector; vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask) for_each_cpu_and(cpu, data->domain, cpu_online_mask)
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
data->cfg.vector = 0; data->cfg.vector = 0;
cpumask_clear(data->domain); cpumask_clear(data->domain);
...@@ -242,12 +242,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) ...@@ -242,12 +242,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
return; return;
} }
desc = irq_to_desc(irq);
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) { vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq) if (per_cpu(vector_irq, cpu)[vector] != desc)
continue; continue;
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
break; break;
} }
} }
...@@ -296,7 +297,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, ...@@ -296,7 +297,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
struct irq_alloc_info *info = arg; struct irq_alloc_info *info = arg;
struct apic_chip_data *data; struct apic_chip_data *data;
struct irq_data *irq_data; struct irq_data *irq_data;
int i, err; int i, err, node;
if (disable_apic) if (disable_apic)
return -ENXIO; return -ENXIO;
...@@ -308,12 +309,13 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, ...@@ -308,12 +309,13 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i); irq_data = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irq_data); BUG_ON(!irq_data);
node = irq_data_get_node(irq_data);
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i]) if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
data = legacy_irq_data[virq + i]; data = legacy_irq_data[virq + i];
else else
#endif #endif
data = alloc_apic_chip_data(irq_data->node); data = alloc_apic_chip_data(node);
if (!data) { if (!data) {
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
...@@ -322,8 +324,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, ...@@ -322,8 +324,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller; irq_data->chip = &lapic_controller;
irq_data->chip_data = data; irq_data->chip_data = data;
irq_data->hwirq = virq + i; irq_data->hwirq = virq + i;
err = assign_irq_vector_policy(virq + i, irq_data->node, data, err = assign_irq_vector_policy(virq + i, node, data, info);
info);
if (err) if (err)
goto error; goto error;
} }
...@@ -403,32 +404,32 @@ int __init arch_early_irq_init(void) ...@@ -403,32 +404,32 @@ int __init arch_early_irq_init(void)
return arch_early_ioapic_init(); return arch_early_ioapic_init();
} }
/* Initialize vector_irq on a new cpu */
static void __setup_vector_irq(int cpu) static void __setup_vector_irq(int cpu)
{ {
/* Initialize vector_irq on a new cpu */
int irq, vector;
struct apic_chip_data *data; struct apic_chip_data *data;
struct irq_desc *desc;
int irq, vector;
/* Mark the inuse vectors */ /* Mark the inuse vectors */
for_each_active_irq(irq) { for_each_irq_desc(irq, desc) {
data = apic_chip_data(irq_get_irq_data(irq)); struct irq_data *idata = irq_desc_get_irq_data(desc);
if (!data)
continue;
if (!cpumask_test_cpu(cpu, data->domain)) data = apic_chip_data(idata);
if (!data || !cpumask_test_cpu(cpu, data->domain))
continue; continue;
vector = data->cfg.vector; vector = data->cfg.vector;
per_cpu(vector_irq, cpu)[vector] = irq; per_cpu(vector_irq, cpu)[vector] = desc;
} }
/* Mark the free vectors */ /* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) { for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector]; desc = per_cpu(vector_irq, cpu)[vector];
if (irq <= VECTOR_UNDEFINED) if (IS_ERR_OR_NULL(desc))
continue; continue;
data = apic_chip_data(irq_get_irq_data(irq)); data = apic_chip_data(irq_desc_get_irq_data(desc));
if (!cpumask_test_cpu(cpu, data->domain)) if (!cpumask_test_cpu(cpu, data->domain))
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
} }
} }
...@@ -448,7 +449,7 @@ void setup_vector_irq(int cpu) ...@@ -448,7 +449,7 @@ void setup_vector_irq(int cpu)
* legacy vector to irq mapping: * legacy vector to irq mapping:
*/ */
for (irq = 0; irq < nr_legacy_irqs(); irq++) for (irq = 0; irq < nr_legacy_irqs(); irq++)
per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq; per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
__setup_vector_irq(cpu); __setup_vector_irq(cpu);
} }
...@@ -490,7 +491,8 @@ static int apic_set_affinity(struct irq_data *irq_data, ...@@ -490,7 +491,8 @@ static int apic_set_affinity(struct irq_data *irq_data,
if (err) { if (err) {
struct irq_data *top = irq_get_irq_data(irq); struct irq_data *top = irq_get_irq_data(irq);
if (assign_irq_vector(irq, data, top->affinity)) if (assign_irq_vector(irq, data,
irq_data_get_affinity_mask(top)))
pr_err("Failed to recover vector for irq %d\n", irq); pr_err("Failed to recover vector for irq %d\n", irq);
return err; return err;
} }
...@@ -538,27 +540,30 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) ...@@ -538,27 +540,30 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
entering_ack_irq(); entering_ack_irq();
/* Prevent vectors vanishing under us */
raw_spin_lock(&vector_lock);
me = smp_processor_id(); me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
int irq;
unsigned int irr;
struct irq_desc *desc;
struct apic_chip_data *data; struct apic_chip_data *data;
struct irq_desc *desc;
unsigned int irr;
irq = __this_cpu_read(vector_irq[vector]); retry:
desc = __this_cpu_read(vector_irq[vector]);
if (irq <= VECTOR_UNDEFINED) if (IS_ERR_OR_NULL(desc))
continue; continue;
desc = irq_to_desc(irq); if (!raw_spin_trylock(&desc->lock)) {
if (!desc) raw_spin_unlock(&vector_lock);
continue; cpu_relax();
raw_spin_lock(&vector_lock);
goto retry;
}
data = apic_chip_data(&desc->irq_data); data = apic_chip_data(irq_desc_get_irq_data(desc));
if (!data) if (!data)
continue; goto unlock;
raw_spin_lock(&desc->lock);
/* /*
* Check if the irq migration is in progress. If so, we * Check if the irq migration is in progress. If so, we
...@@ -583,11 +588,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) ...@@ -583,11 +588,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
goto unlock; goto unlock;
} }
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
unlock: unlock:
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
} }
raw_spin_unlock(&vector_lock);
exiting_irq(); exiting_irq();
} }
......
...@@ -426,7 +426,7 @@ static struct irq_domain *hpet_domain; ...@@ -426,7 +426,7 @@ static struct irq_domain *hpet_domain;
void hpet_msi_unmask(struct irq_data *data) void hpet_msi_unmask(struct irq_data *data)
{ {
struct hpet_dev *hdev = data->handler_data; struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
unsigned int cfg; unsigned int cfg;
/* unmask it */ /* unmask it */
...@@ -437,7 +437,7 @@ void hpet_msi_unmask(struct irq_data *data) ...@@ -437,7 +437,7 @@ void hpet_msi_unmask(struct irq_data *data)
void hpet_msi_mask(struct irq_data *data) void hpet_msi_mask(struct irq_data *data)
{ {
struct hpet_dev *hdev = data->handler_data; struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
unsigned int cfg; unsigned int cfg;
/* mask it */ /* mask it */
......
...@@ -214,10 +214,9 @@ u64 arch_irq_stat(void) ...@@ -214,10 +214,9 @@ u64 arch_irq_stat(void)
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc * desc;
/* high bit used in ret_from_ code */ /* high bit used in ret_from_ code */
unsigned vector = ~regs->orig_ax; unsigned vector = ~regs->orig_ax;
unsigned irq;
/* /*
* NB: Unlike exception entries, IRQ entries do not reliably * NB: Unlike exception entries, IRQ entries do not reliably
...@@ -236,17 +235,17 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -236,17 +235,17 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
/* entering_irq() tells RCU that we're not quiescent. Check it. */ /* entering_irq() tells RCU that we're not quiescent. Check it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
if (!handle_irq(irq, regs)) { if (!handle_irq(desc, regs)) {
ack_APIC_irq(); ack_APIC_irq();
if (irq != VECTOR_RETRIGGERED) { if (desc != VECTOR_RETRIGGERED) {
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
__func__, smp_processor_id(), __func__, smp_processor_id(),
vector, irq); vector);
} else { } else {
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
} }
} }
...@@ -348,10 +347,10 @@ static struct cpumask affinity_new, online_new; ...@@ -348,10 +347,10 @@ static struct cpumask affinity_new, online_new;
*/ */
int check_irq_vectors_for_cpu_disable(void) int check_irq_vectors_for_cpu_disable(void)
{ {
int irq, cpu;
unsigned int this_cpu, vector, this_count, count; unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc; struct irq_desc *desc;
struct irq_data *data; struct irq_data *data;
int cpu;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask); cpumask_copy(&online_new, cpu_online_mask);
...@@ -359,48 +358,44 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -359,48 +358,44 @@ int check_irq_vectors_for_cpu_disable(void)
this_count = 0; this_count = 0;
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
if (irq >= 0) { if (IS_ERR_OR_NULL(desc))
desc = irq_to_desc(irq);
if (!desc)
continue; continue;
/* /*
* Protect against concurrent action removal, * Protect against concurrent action removal, affinity
* affinity changes etc. * changes etc.
*/ */
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, data->affinity); cpumask_copy(&affinity_new,
irq_data_get_affinity_mask(data));
cpumask_clear_cpu(this_cpu, &affinity_new); cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */ /* Do not count inactive or per-cpu irqs. */
if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
continue; continue;
} }
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
/* /*
* A single irq may be mapped to multiple * A single irq may be mapped to multiple cpu's
* cpu's vector_irq[] (for example IOAPIC cluster * vector_irq[] (for example IOAPIC cluster mode). In
* mode). In this case we have two * this case we have two possibilities:
* possibilities:
* *
* 1) the resulting affinity mask is empty; that is * 1) the resulting affinity mask is empty; that is
* this the down'd cpu is the last cpu in the irq's * this the down'd cpu is the last cpu in the irq's
* affinity mask, or * affinity mask, or
* *
* 2) the resulting affinity mask is no longer * 2) the resulting affinity mask is no longer a
* a subset of the online cpus but the affinity * subset of the online cpus but the affinity mask is
* mask is not zero; that is the down'd cpu is the * not zero; that is the down'd cpu is the last online
* last online cpu in a user set affinity mask. * cpu in a user set affinity mask.
*/ */
if (cpumask_empty(&affinity_new) || if (cpumask_empty(&affinity_new) ||
!cpumask_subset(&affinity_new, &online_new)) !cpumask_subset(&affinity_new, &online_new))
this_count++; this_count++;
} }
}
count = 0; count = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
...@@ -418,7 +413,7 @@ int check_irq_vectors_for_cpu_disable(void) ...@@ -418,7 +413,7 @@ int check_irq_vectors_for_cpu_disable(void)
for (vector = FIRST_EXTERNAL_VECTOR; for (vector = FIRST_EXTERNAL_VECTOR;
vector < first_system_vector; vector++) { vector < first_system_vector; vector++) {
if (!test_bit(vector, used_vectors) && if (!test_bit(vector, used_vectors) &&
per_cpu(vector_irq, cpu)[vector] < 0) IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
count++; count++;
} }
} }
...@@ -455,7 +450,7 @@ void fixup_irqs(void) ...@@ -455,7 +450,7 @@ void fixup_irqs(void)
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
affinity = data->affinity; affinity = irq_data_get_affinity_mask(data);
if (!irq_has_action(irq) || irqd_is_per_cpu(data) || if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
cpumask_subset(affinity, cpu_online_mask)) { cpumask_subset(affinity, cpu_online_mask)) {
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
...@@ -523,14 +518,13 @@ void fixup_irqs(void) ...@@ -523,14 +518,13 @@ void fixup_irqs(void)
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr; unsigned int irr;
if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
continue; continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
if (irr & (1 << (vector % 32))) { if (irr & (1 << (vector % 32))) {
irq = __this_cpu_read(vector_irq[vector]); desc = __this_cpu_read(vector_irq[vector]);
desc = irq_to_desc(irq);
raw_spin_lock(&desc->lock); raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc); data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data); chip = irq_data_get_irq_chip(data);
...@@ -541,7 +535,7 @@ void fixup_irqs(void) ...@@ -541,7 +535,7 @@ void fixup_irqs(void)
raw_spin_unlock(&desc->lock); raw_spin_unlock(&desc->lock);
} }
if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
} }
} }
#endif #endif
...@@ -148,21 +148,21 @@ void do_softirq_own_stack(void) ...@@ -148,21 +148,21 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp); call_on_stack(__do_softirq, isp);
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{ {
struct irq_desc *desc; unsigned int irq;
int overflow; int overflow;
overflow = check_stack_overflow(); overflow = check_stack_overflow();
desc = irq_to_desc(irq); if (IS_ERR_OR_NULL(desc))
if (unlikely(!desc))
return false; return false;
irq = irq_desc_get_irq(desc);
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow)) if (unlikely(overflow))
print_stack_overflow(); print_stack_overflow();
desc->handle_irq(irq, desc); generic_handle_irq_desc(irq, desc);
} }
return true; return true;
......
...@@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) ...@@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#endif #endif
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{ {
struct irq_desc *desc;
stack_overflow_check(regs); stack_overflow_check(regs);
desc = irq_to_desc(irq); if (unlikely(IS_ERR_OR_NULL(desc)))
if (unlikely(!desc))
return false; return false;
generic_handle_irq_desc(irq, desc); generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
return true; return true;
} }
...@@ -52,7 +52,7 @@ static struct irqaction irq2 = { ...@@ -52,7 +52,7 @@ static struct irqaction irq2 = {
}; };
DEFINE_PER_CPU(vector_irq_t, vector_irq) = { DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED, [0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
}; };
int vector_used_by_percpu_irq(unsigned int vector) int vector_used_by_percpu_irq(unsigned int vector)
...@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) ...@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
int cpu; int cpu;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
return 1; return 1;
} }
...@@ -94,7 +94,7 @@ void __init init_IRQ(void) ...@@ -94,7 +94,7 @@ void __init init_IRQ(void)
* irq's migrate etc. * irq's migrate etc.
*/ */
for (i = 0; i < nr_legacy_irqs(); i++) for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i; per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
x86_init.irqs.intr_init(); x86_init.irqs.intr_init();
} }
......
...@@ -835,16 +835,46 @@ static struct irq_chip lguest_irq_controller = { ...@@ -835,16 +835,46 @@ static struct irq_chip lguest_irq_controller = {
.irq_unmask = enable_lguest_irq, .irq_unmask = enable_lguest_irq,
}; };
/*
* Interrupt descriptors are allocated as-needed, but low-numbered ones are
* reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
* tells us the irq is already used: other errors (ie. ENOMEM) we take
* seriously.
*/
static int lguest_setup_irq(unsigned int irq)
{
struct irq_desc *desc;
int err;
/* Returns -ve error or vector number. */
err = irq_alloc_desc_at(irq, 0);
if (err < 0 && err != -EEXIST)
return err;
/*
* Tell the Linux infrastructure that the interrupt is
* controlled by our level-based lguest interrupt controller.
*/
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level");
/* Some systems map "vectors" to interrupts weirdly. Not us! */
desc = irq_to_desc(irq);
__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
return 0;
}
static int lguest_enable_irq(struct pci_dev *dev) static int lguest_enable_irq(struct pci_dev *dev)
{ {
int err;
u8 line = 0; u8 line = 0;
/* We literally use the PCI interrupt line as the irq number. */ /* We literally use the PCI interrupt line as the irq number. */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
irq_set_chip_and_handler_name(line, &lguest_irq_controller, err = lguest_setup_irq(line);
handle_level_irq, "level"); if (!err)
dev->irq = line; dev->irq = line;
return 0; return err;
} }
/* We don't do hotplug PCI, so this shouldn't be called. */ /* We don't do hotplug PCI, so this shouldn't be called. */
...@@ -855,17 +885,13 @@ static void lguest_disable_irq(struct pci_dev *dev) ...@@ -855,17 +885,13 @@ static void lguest_disable_irq(struct pci_dev *dev)
/* /*
* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
* interrupt (except 128, which is used for system calls), and then tells the * interrupt (except 128, which is used for system calls).
* Linux infrastructure that each interrupt is controlled by our level-based
* lguest interrupt controller.
*/ */
static void __init lguest_init_IRQ(void) static void __init lguest_init_IRQ(void)
{ {
unsigned int i; unsigned int i;
for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) { for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
/* Some systems map "vectors" to interrupts weirdly. Not us! */
__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
if (i != IA32_SYSCALL_VECTOR) if (i != IA32_SYSCALL_VECTOR)
set_intr_gate(i, irq_entries_start + set_intr_gate(i, irq_entries_start +
8 * (i - FIRST_EXTERNAL_VECTOR)); 8 * (i - FIRST_EXTERNAL_VECTOR));
...@@ -878,26 +904,6 @@ static void __init lguest_init_IRQ(void) ...@@ -878,26 +904,6 @@ static void __init lguest_init_IRQ(void)
irq_ctx_init(smp_processor_id()); irq_ctx_init(smp_processor_id());
} }
/*
* Interrupt descriptors are allocated as-needed, but low-numbered ones are
* reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
* tells us the irq is already used: other errors (ie. ENOMEM) we take
* seriously.
*/
int lguest_setup_irq(unsigned int irq)
{
int err;
/* Returns -ve error or vector number. */
err = irq_alloc_desc_at(irq, 0);
if (err < 0 && err != -EEXIST)
return err;
irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level");
return 0;
}
/* /*
* Time. * Time.
* *
...@@ -1028,7 +1034,8 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) ...@@ -1028,7 +1034,8 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
static void lguest_time_init(void) static void lguest_time_init(void)
{ {
/* Set up the timer interrupt (0) to go to our simple timer routine */ /* Set up the timer interrupt (0) to go to our simple timer routine */
lguest_setup_irq(0); if (lguest_setup_irq(0) != 0)
panic("Could not set up timer irq");
irq_set_handler(0, lguest_time_irq); irq_set_handler(0, lguest_time_irq);
clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
......
...@@ -89,7 +89,7 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -89,7 +89,7 @@ static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -EINVAL; return -EINVAL;
chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL, chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
irq_data->node); irq_data_get_node(irq_data));
if (!chip_data) if (!chip_data)
return -ENOMEM; return -ENOMEM;
......
...@@ -336,7 +336,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) ...@@ -336,7 +336,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1); BUG_ON(irq == -1);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu)); cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
#endif #endif
xen_evtchn_port_bind_to_cpu(info, cpu); xen_evtchn_port_bind_to_cpu(info, cpu);
...@@ -373,7 +373,7 @@ static void xen_irq_init(unsigned irq) ...@@ -373,7 +373,7 @@ static void xen_irq_init(unsigned irq)
struct irq_info *info; struct irq_info *info;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */ /* By default all event channels notify CPU#0. */
cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0)); cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
#endif #endif
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
......
...@@ -166,12 +166,16 @@ static inline int handle_domain_irq(struct irq_domain *domain, ...@@ -166,12 +166,16 @@ static inline int handle_domain_irq(struct irq_domain *domain,
#endif #endif
/* Test to see if a driver has successfully requested an irq */ /* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq) static inline int irq_desc_has_action(struct irq_desc *desc)
{ {
struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL; return desc->action != NULL;
} }
static inline int irq_has_action(unsigned int irq)
{
return irq_desc_has_action(irq_to_desc(irq));
}
/* caller has locked the irq_desc and both params are valid */ /* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq, static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler) irq_flow_handler_t handler)
......
...@@ -11,3 +11,4 @@ lguest: include/linux/virtio_types.h ...@@ -11,3 +11,4 @@ lguest: include/linux/virtio_types.h
clean: clean:
rm -f lguest rm -f lguest
rm -rf include
...@@ -125,7 +125,11 @@ struct device_list { ...@@ -125,7 +125,11 @@ struct device_list {
/* The list of Guest devices, based on command line arguments. */ /* The list of Guest devices, based on command line arguments. */
static struct device_list devices; static struct device_list devices;
struct virtio_pci_cfg_cap { /*
* Just like struct virtio_pci_cfg_cap in uapi/linux/virtio_pci.h,
* but uses a u32 explicitly for the data.
*/
struct virtio_pci_cfg_cap_u32 {
struct virtio_pci_cap cap; struct virtio_pci_cap cap;
u32 pci_cfg_data; /* Data for BAR access. */ u32 pci_cfg_data; /* Data for BAR access. */
}; };
...@@ -157,7 +161,7 @@ struct pci_config { ...@@ -157,7 +161,7 @@ struct pci_config {
struct virtio_pci_notify_cap notify; struct virtio_pci_notify_cap notify;
struct virtio_pci_cap isr; struct virtio_pci_cap isr;
struct virtio_pci_cap device; struct virtio_pci_cap device;
struct virtio_pci_cfg_cap cfg_access; struct virtio_pci_cfg_cap_u32 cfg_access;
}; };
/* The device structure describes a single device. */ /* The device structure describes a single device. */
...@@ -1291,7 +1295,7 @@ static struct device *dev_and_reg(u32 *reg) ...@@ -1291,7 +1295,7 @@ static struct device *dev_and_reg(u32 *reg)
* only fault if they try to write with some invalid bar/offset/length. * only fault if they try to write with some invalid bar/offset/length.
*/ */
static bool valid_bar_access(struct device *d, static bool valid_bar_access(struct device *d,
struct virtio_pci_cfg_cap *cfg_access) struct virtio_pci_cfg_cap_u32 *cfg_access)
{ {
/* We only have 1 bar (BAR0) */ /* We only have 1 bar (BAR0) */
if (cfg_access->cap.bar != 0) if (cfg_access->cap.bar != 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment