Commit 5fc509bc authored by Boris Ostrovsky's avatar Boris Ostrovsky Committed by David Vrabel

xen/x86: Move irq allocation from Xen smp_op.cpu_up()

Commit ce0d3c0a ("genirq: Revert sparse irq locking around
__cpu_up() and move it to x86 for now") reverted irq locking
introduced by commit a8994181 ("hotplug: Prevent alloc/free
of irq descriptors during cpu up/down") because of Xen allocating
irqs in both of its cpu_up ops.

We can move those allocations into CPU notifiers so that original
patch can be reinstated.
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent fa8410b3
...@@ -140,6 +140,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); ...@@ -140,6 +140,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
__read_mostly int xen_have_vector_callback; __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback); EXPORT_SYMBOL_GPL(xen_have_vector_callback);
static struct notifier_block xen_cpu_notifier;
/* /*
* Point at some empty memory to start with. We map the real shared_info * Point at some empty memory to start with. We map the real shared_info
* page as soon as fixmap is up and running. * page as soon as fixmap is up and running.
...@@ -1627,6 +1629,7 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1627,6 +1629,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_initial_gdt = &per_cpu(gdt_page, 0); xen_initial_gdt = &per_cpu(gdt_page, 0);
xen_smp_init(); xen_smp_init();
register_cpu_notifier(&xen_cpu_notifier);
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
/* /*
...@@ -1820,21 +1823,53 @@ static void __init init_hvm_pv_info(void) ...@@ -1820,21 +1823,53 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN; xen_domain_type = XEN_HVM_DOMAIN;
} }
static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu) void *hcpu)
{ {
int cpu = (long)hcpu; int cpu = (long)hcpu;
int rc;
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
if (cpu_acpi_id(cpu) != U32_MAX) if (xen_hvm_domain()) {
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); /*
else * This can happen if CPU was offlined earlier and
per_cpu(xen_vcpu_id, cpu) = cpu; * offlining timed out in common_cpu_die().
xen_vcpu_setup(cpu); */
if (xen_have_vector_callback) { if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
if (xen_feature(XENFEAT_hvm_safe_pvclock)) xen_smp_intr_free(cpu);
xen_setup_timer(cpu); xen_uninit_lock_cpu(cpu);
}
if (cpu_acpi_id(cpu) != U32_MAX)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu);
} }
if (xen_pv_domain() ||
(xen_have_vector_callback &&
xen_feature(XENFEAT_hvm_safe_pvclock)))
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
if (rc) {
WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
cpu, rc);
return NOTIFY_BAD;
}
break;
case CPU_ONLINE:
xen_init_lock_cpu(cpu);
break;
case CPU_UP_CANCELED:
xen_smp_intr_free(cpu);
if (xen_pv_domain() ||
(xen_have_vector_callback &&
xen_feature(XENFEAT_hvm_safe_pvclock)))
xen_teardown_timer(cpu);
break; break;
default: default:
break; break;
...@@ -1842,8 +1877,8 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, ...@@ -1842,8 +1877,8 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block xen_hvm_cpu_notifier = { static struct notifier_block xen_cpu_notifier = {
.notifier_call = xen_hvm_cpu_notify, .notifier_call = xen_cpu_notify,
}; };
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
...@@ -1875,7 +1910,7 @@ static void __init xen_hvm_guest_init(void) ...@@ -1875,7 +1910,7 @@ static void __init xen_hvm_guest_init(void)
if (xen_feature(XENFEAT_hvm_callback_vector)) if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1; xen_have_vector_callback = 1;
xen_hvm_smp_init(); xen_hvm_smp_init();
register_cpu_notifier(&xen_hvm_cpu_notifier); register_cpu_notifier(&xen_cpu_notifier);
xen_unplug_emulated_devices(); xen_unplug_emulated_devices();
x86_init.irqs.intr_init = xen_init_IRQ; x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops(); xen_hvm_init_time_ops();
......
...@@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu) ...@@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
} }
static void xen_smp_intr_free(unsigned int cpu) void xen_smp_intr_free(unsigned int cpu)
{ {
if (per_cpu(xen_resched_irq, cpu).irq >= 0) { if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
...@@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu) ...@@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu)
per_cpu(xen_pmu_irq, cpu).name = NULL; per_cpu(xen_pmu_irq, cpu).name = NULL;
} }
}; };
static int xen_smp_intr_init(unsigned int cpu) int xen_smp_intr_init(unsigned int cpu)
{ {
int rc; int rc;
char *resched_name, *callfunc_name, *debug_name, *pmu_name; char *resched_name, *callfunc_name, *debug_name, *pmu_name;
...@@ -475,8 +475,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -475,8 +475,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
common_cpu_up(cpu, idle); common_cpu_up(cpu, idle);
xen_setup_runstate_info(cpu); xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
/* /*
* PV VCPUs are always successfully taken down (see 'while' loop * PV VCPUs are always successfully taken down (see 'while' loop
...@@ -495,10 +493,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -495,10 +493,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
xen_pmu_init(cpu); xen_pmu_init(cpu);
rc = xen_smp_intr_init(cpu);
if (rc)
return rc;
rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
BUG_ON(rc); BUG_ON(rc);
...@@ -769,47 +763,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) ...@@ -769,47 +763,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
xen_init_lock_cpu(0); xen_init_lock_cpu(0);
} }
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
/*
* This can happen if CPU was offlined earlier and
* offlining timed out in common_cpu_die().
*/
if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
}
/*
* xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before
* it is marked online in native_cpu_up().
*/
rc = xen_smp_intr_init(cpu);
WARN_ON(rc);
if (!rc)
rc = native_cpu_up(cpu, tidle);
/*
* We must initialize the slowpath CPU kicker _after_ the native
* path has executed. If we initialized it before none of the
* unlocker IPI kicks would reach the booting CPU as the booting
* CPU had not set itself 'online' in cpu_online_mask. That mask
* is checked when IPIs are sent (on HVM at least).
*/
xen_init_lock_cpu(cpu);
return rc;
}
void __init xen_hvm_smp_init(void) void __init xen_hvm_smp_init(void)
{ {
if (!xen_have_vector_callback) if (!xen_have_vector_callback)
return; return;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule; smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.cpu_up = xen_hvm_cpu_up;
smp_ops.cpu_die = xen_cpu_die; smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
......
#ifndef _XEN_SMP_H #ifndef _XEN_SMP_H
#ifdef CONFIG_SMP
extern void xen_send_IPI_mask(const struct cpumask *mask, extern void xen_send_IPI_mask(const struct cpumask *mask,
int vector); int vector);
extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
...@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector); ...@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector);
extern void xen_send_IPI_all(int vector); extern void xen_send_IPI_all(int vector);
extern void xen_send_IPI_self(int vector); extern void xen_send_IPI_self(int vector);
extern int xen_smp_intr_init(unsigned int cpu);
extern void xen_smp_intr_free(unsigned int cpu);
#else /* CONFIG_SMP */
static inline int xen_smp_intr_init(unsigned int cpu)
{
return 0;
}
static inline void xen_smp_intr_free(unsigned int cpu) {}
#endif /* CONFIG_SMP */
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
extern void xen_pvh_early_cpu_init(int cpu, bool entry); extern void xen_pvh_early_cpu_init(int cpu, bool entry);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment