Commit d6f83427 authored by Heiner Kallweit's avatar Heiner Kallweit Committed by Thomas Gleixner

x86/irq: Move IS_ERR_OR_NULL() check into common do_IRQ() code

Both the 64bit and the 32bit handle_irq() implementation check the irq
descriptor pointer with IS_ERR_OR_NULL() and return failure. That can be
done simpler in the common do_IRQ() code.

This reduces the 64bit handle_irq() function to a wrapper around
generic_handle_irq_desc(). Invoke it directly from do_IRQ() to spare the
extra function call.

[ tglx: Got rid of the #ifdef and massaged changelog ]
Signed-off-by: default avatarHeiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/2ec758c7-9aaa-73ab-f083-cc44c86aa741@gmail.com
parent e30c44e2
......@@ -34,7 +34,7 @@ extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs);
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
......
......@@ -243,8 +243,12 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
desc = __this_cpu_read(vector_irq[vector]);
if (!handle_irq(desc, regs)) {
if (likely(!IS_ERR_OR_NULL(desc))) {
if (IS_ENABLED(CONFIG_X86_32))
handle_irq(desc, regs);
else
generic_handle_irq_desc(desc);
} else {
ack_APIC_irq();
if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
......
......@@ -148,18 +148,13 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
void handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
int overflow = check_stack_overflow();
if (IS_ERR_OR_NULL(desc))
return false;
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
if (unlikely(overflow))
print_stack_overflow();
generic_handle_irq_desc(desc);
}
return true;
}
......@@ -26,15 +26,6 @@
DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
DECLARE_INIT_PER_CPU(irq_stack_backing_store);
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
if (IS_ERR_OR_NULL(desc))
return false;
generic_handle_irq_desc(desc);
return true;
}
#ifdef CONFIG_VMAP_STACK
/*
* VMAP the backing store with guard pages
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment