Commit 541efb76 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from David Vrabel:
 "xen features and fixes for 4.9:

   - switch to new CPU hotplug mechanism

   - support driver_override in pciback

   - require vector callback for HVM guests (the alternate mechanism via
     the platform device has been broken for ages)"

* tag 'for-linus-4.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/x86: Update topology map for PV VCPUs
  xen/x86: Initialize per_cpu(xen_vcpu, 0) a little earlier
  xen/pciback: support driver_override
  xen/pciback: avoid multiple entries in slot list
  xen/pciback: simplify pcistub device handling
  xen: Remove event channel notification through Xen PCI platform device
  xen/events: Convert to hotplug state machine
  xen/x86: Convert to hotplug state machine
  x86/xen: add missing \n at end of printk warning message
  xen/grant-table: Use kmalloc_array() in arch_gnttab_valloc()
  xen: Make VPMU init message look less scary
  xen: rename xen_pmu_init() in sys-hypervisor.c
  hotplug: Prevent alloc/free of irq descriptors during cpu up/down (again)
  xen/x86: Move irq allocation from Xen smp_op.cpu_up()
parents 6218590b a6a198bc
...@@ -20,15 +20,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) ...@@ -20,15 +20,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
/* No need for a barrier -- XCHG is a barrier on x86. */ /* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
extern int xen_have_vector_callback;
/*
* Events delivered via platform PCI interrupts are always
* routed to vcpu 0 and hence cannot be rebound.
*/
static inline bool xen_support_evtchn_rebind(void)
{
return (!xen_hvm_domain() || xen_have_vector_callback);
}
#endif /* _ASM_X86_XEN_EVENTS_H */ #endif /* _ASM_X86_XEN_EVENTS_H */
...@@ -456,7 +456,7 @@ void __init xen_msi_init(void) ...@@ -456,7 +456,7 @@ void __init xen_msi_init(void)
int __init pci_xen_hvm_init(void) int __init pci_xen_hvm_init(void)
{ {
if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) if (!xen_feature(XENFEAT_hvm_pirqs))
return 0; return 0;
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
......
...@@ -137,8 +137,10 @@ struct shared_info xen_dummy_shared_info; ...@@ -137,8 +137,10 @@ struct shared_info xen_dummy_shared_info;
void *xen_initial_gdt; void *xen_initial_gdt;
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
__read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback); static int xen_cpu_up_prepare(unsigned int cpu);
static int xen_cpu_up_online(unsigned int cpu);
static int xen_cpu_dead(unsigned int cpu);
/* /*
* Point at some empty memory to start with. We map the real shared_info * Point at some empty memory to start with. We map the real shared_info
...@@ -1519,10 +1521,7 @@ static void __init xen_pvh_early_guest_init(void) ...@@ -1519,10 +1521,7 @@ static void __init xen_pvh_early_guest_init(void)
if (!xen_feature(XENFEAT_auto_translated_physmap)) if (!xen_feature(XENFEAT_auto_translated_physmap))
return; return;
if (!xen_feature(XENFEAT_hvm_callback_vector)) BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
return;
xen_have_vector_callback = 1;
xen_pvh_early_cpu_init(0, false); xen_pvh_early_cpu_init(0, false);
xen_pvh_set_cr_flags(0); xen_pvh_set_cr_flags(0);
...@@ -1538,6 +1537,24 @@ static void __init xen_dom0_set_legacy_features(void) ...@@ -1538,6 +1537,24 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1; x86_platform.legacy.rtc = 1;
} }
static int xen_cpuhp_setup(void)
{
int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
"XEN_HVM_GUEST_PREPARE",
xen_cpu_up_prepare, xen_cpu_dead);
if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"XEN_HVM_GUEST_ONLINE",
xen_cpu_up_online, NULL);
if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
}
return rc >= 0 ? 0 : rc;
}
/* First C function to be called on Xen boot */ /* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void) asmlinkage __visible void __init xen_start_kernel(void)
{ {
...@@ -1639,6 +1656,8 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1639,6 +1656,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
possible map and a non-dummy shared_info. */ possible map and a non-dummy shared_info. */
per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
WARN_ON(xen_cpuhp_setup());
local_irq_disable(); local_irq_disable();
early_boot_irqs_disabled = true; early_boot_irqs_disabled = true;
...@@ -1819,31 +1838,54 @@ static void __init init_hvm_pv_info(void) ...@@ -1819,31 +1838,54 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN; xen_domain_type = XEN_HVM_DOMAIN;
} }
static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, static int xen_cpu_up_prepare(unsigned int cpu)
void *hcpu)
{ {
int cpu = (long)hcpu; int rc;
switch (action) {
case CPU_UP_PREPARE: if (xen_hvm_domain()) {
/*
* This can happen if CPU was offlined earlier and
* offlining timed out in common_cpu_die().
*/
if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
}
if (cpu_acpi_id(cpu) != U32_MAX) if (cpu_acpi_id(cpu) != U32_MAX)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else else
per_cpu(xen_vcpu_id, cpu) = cpu; per_cpu(xen_vcpu_id, cpu) = cpu;
xen_vcpu_setup(cpu); xen_vcpu_setup(cpu);
if (xen_have_vector_callback) {
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
}
break;
default:
break;
} }
return NOTIFY_OK;
if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
if (rc) {
WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
cpu, rc);
return rc;
}
return 0;
} }
static struct notifier_block xen_hvm_cpu_notifier = { static int xen_cpu_dead(unsigned int cpu)
.notifier_call = xen_hvm_cpu_notify, {
}; xen_smp_intr_free(cpu);
if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
xen_teardown_timer(cpu);
return 0;
}
static int xen_cpu_up_online(unsigned int cpu)
{
xen_init_lock_cpu(cpu);
return 0;
}
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void) static void xen_hvm_shutdown(void)
...@@ -1871,10 +1913,10 @@ static void __init xen_hvm_guest_init(void) ...@@ -1871,10 +1913,10 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init(); xen_panic_handler_init();
if (xen_feature(XENFEAT_hvm_callback_vector)) BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
xen_have_vector_callback = 1;
xen_hvm_smp_init(); xen_hvm_smp_init();
register_cpu_notifier(&xen_hvm_cpu_notifier); WARN_ON(xen_cpuhp_setup());
xen_unplug_emulated_devices(); xen_unplug_emulated_devices();
x86_init.irqs.intr_init = xen_init_IRQ; x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops(); xen_hvm_init_time_ops();
...@@ -1910,7 +1952,7 @@ bool xen_hvm_need_lapic(void) ...@@ -1910,7 +1952,7 @@ bool xen_hvm_need_lapic(void)
return false; return false;
if (!xen_hvm_domain()) if (!xen_hvm_domain())
return false; return false;
if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) if (xen_feature(XENFEAT_hvm_pirqs))
return false; return false;
return true; return true;
} }
......
...@@ -89,7 +89,7 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) ...@@ -89,7 +89,7 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
{ {
area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL); area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL);
if (area->ptes == NULL) if (area->ptes == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -61,7 +61,7 @@ static int check_platform_magic(void) ...@@ -61,7 +61,7 @@ static int check_platform_magic(void)
} }
break; break;
default: default:
printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version"); printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version\n");
return XEN_PLATFORM_ERR_PROTOCOL; return XEN_PLATFORM_ERR_PROTOCOL;
} }
......
...@@ -547,8 +547,11 @@ void xen_pmu_init(int cpu) ...@@ -547,8 +547,11 @@ void xen_pmu_init(int cpu)
return; return;
fail: fail:
pr_info_once("Could not initialize VPMU for cpu %d, error %d\n", if (err == -EOPNOTSUPP || err == -ENOSYS)
cpu, err); pr_info_once("VPMU disabled by hypervisor.\n");
else
pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
cpu, err);
free_pages((unsigned long)xenpmu_data, 0); free_pages((unsigned long)xenpmu_data, 0);
} }
......
...@@ -87,6 +87,12 @@ static void cpu_bringup(void) ...@@ -87,6 +87,12 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1; cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
/*
* identify_cpu() may have set logical_pkg_id to -1 due
* to incorrect phys_proc_id. Let's re-comupte it.
*/
topology_update_package_map(apic->cpu_present_to_apicid(cpu), cpu);
xen_setup_cpu_clockevents(); xen_setup_cpu_clockevents();
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
...@@ -115,7 +121,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu) ...@@ -115,7 +121,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
} }
static void xen_smp_intr_free(unsigned int cpu) void xen_smp_intr_free(unsigned int cpu)
{ {
if (per_cpu(xen_resched_irq, cpu).irq >= 0) { if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
...@@ -159,7 +165,7 @@ static void xen_smp_intr_free(unsigned int cpu) ...@@ -159,7 +165,7 @@ static void xen_smp_intr_free(unsigned int cpu)
per_cpu(xen_pmu_irq, cpu).name = NULL; per_cpu(xen_pmu_irq, cpu).name = NULL;
} }
}; };
static int xen_smp_intr_init(unsigned int cpu) int xen_smp_intr_init(unsigned int cpu)
{ {
int rc; int rc;
char *resched_name, *callfunc_name, *debug_name, *pmu_name; char *resched_name, *callfunc_name, *debug_name, *pmu_name;
...@@ -475,8 +481,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -475,8 +481,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
common_cpu_up(cpu, idle); common_cpu_up(cpu, idle);
xen_setup_runstate_info(cpu); xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_init_lock_cpu(cpu);
/* /*
* PV VCPUs are always successfully taken down (see 'while' loop * PV VCPUs are always successfully taken down (see 'while' loop
...@@ -495,10 +499,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) ...@@ -495,10 +499,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
xen_pmu_init(cpu); xen_pmu_init(cpu);
rc = xen_smp_intr_init(cpu);
if (rc)
return rc;
rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
BUG_ON(rc); BUG_ON(rc);
...@@ -769,47 +769,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) ...@@ -769,47 +769,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
xen_init_lock_cpu(0); xen_init_lock_cpu(0);
} }
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
/*
* This can happen if CPU was offlined earlier and
* offlining timed out in common_cpu_die().
*/
if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
}
/*
* xen_smp_intr_init() needs to run before native_cpu_up()
* so that IPI vectors are set up on the booting CPU before
* it is marked online in native_cpu_up().
*/
rc = xen_smp_intr_init(cpu);
WARN_ON(rc);
if (!rc)
rc = native_cpu_up(cpu, tidle);
/*
* We must initialize the slowpath CPU kicker _after_ the native
* path has executed. If we initialized it before none of the
* unlocker IPI kicks would reach the booting CPU as the booting
* CPU had not set itself 'online' in cpu_online_mask. That mask
* is checked when IPIs are sent (on HVM at least).
*/
xen_init_lock_cpu(cpu);
return rc;
}
void __init xen_hvm_smp_init(void) void __init xen_hvm_smp_init(void)
{ {
if (!xen_have_vector_callback)
return;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule; smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.cpu_up = xen_hvm_cpu_up;
smp_ops.cpu_die = xen_cpu_die; smp_ops.cpu_die = xen_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
......
#ifndef _XEN_SMP_H #ifndef _XEN_SMP_H
#ifdef CONFIG_SMP
extern void xen_send_IPI_mask(const struct cpumask *mask, extern void xen_send_IPI_mask(const struct cpumask *mask,
int vector); int vector);
extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
...@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector); ...@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector);
extern void xen_send_IPI_all(int vector); extern void xen_send_IPI_all(int vector);
extern void xen_send_IPI_self(int vector); extern void xen_send_IPI_self(int vector);
extern int xen_smp_intr_init(unsigned int cpu);
extern void xen_smp_intr_free(unsigned int cpu);
#else /* CONFIG_SMP */
static inline int xen_smp_intr_init(unsigned int cpu)
{
return 0;
}
static inline void xen_smp_intr_free(unsigned int cpu) {}
#endif /* CONFIG_SMP */
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
extern void xen_pvh_early_cpu_init(int cpu, bool entry); extern void xen_pvh_early_cpu_init(int cpu, bool entry);
#else #else
......
...@@ -432,11 +432,6 @@ static void xen_hvm_setup_cpu_clockevents(void) ...@@ -432,11 +432,6 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void) void __init xen_hvm_init_time_ops(void)
{ {
/* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
* available */
if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM," printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n"); "disable pv timer\n");
......
...@@ -1314,9 +1314,6 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) ...@@ -1314,9 +1314,6 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return -1; return -1;
if (!xen_support_evtchn_rebind())
return -1;
/* Send future instances of this interrupt to other vcpu. */ /* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn; bind_vcpu.port = evtchn;
bind_vcpu.vcpu = xen_vcpu_nr(tcpu); bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
...@@ -1650,20 +1647,15 @@ void xen_callback_vector(void) ...@@ -1650,20 +1647,15 @@ void xen_callback_vector(void)
{ {
int rc; int rc;
uint64_t callback_via; uint64_t callback_via;
if (xen_have_vector_callback) {
callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
rc = xen_set_callback_via(callback_via); rc = xen_set_callback_via(callback_via);
if (rc) { BUG_ON(rc);
pr_err("Request for Xen HVM callback vector failed\n"); pr_info("Xen HVM callback vector for event delivery is enabled\n");
xen_have_vector_callback = 0; /* in the restore case the vector has already been allocated */
return; if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
} alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
pr_info("Xen HVM callback vector for event delivery is enabled\n"); xen_hvm_callback_vector);
/* in the restore case the vector has already been allocated */
if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
xen_hvm_callback_vector);
}
} }
#else #else
void xen_callback_vector(void) {} void xen_callback_vector(void) {}
......
...@@ -418,30 +418,18 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu) ...@@ -418,30 +418,18 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
return ret; return ret;
} }
static int evtchn_fifo_cpu_notification(struct notifier_block *self, static int xen_evtchn_cpu_prepare(unsigned int cpu)
unsigned long action,
void *hcpu)
{ {
int cpu = (long)hcpu; if (!per_cpu(cpu_control_block, cpu))
int ret = 0; return evtchn_fifo_alloc_control_block(cpu);
return 0;
switch (action) {
case CPU_UP_PREPARE:
if (!per_cpu(cpu_control_block, cpu))
ret = evtchn_fifo_alloc_control_block(cpu);
break;
case CPU_DEAD:
__evtchn_fifo_handle_events(cpu, true);
break;
default:
break;
}
return ret < 0 ? NOTIFY_BAD : NOTIFY_OK;
} }
static struct notifier_block evtchn_fifo_cpu_notifier = { static int xen_evtchn_cpu_dead(unsigned int cpu)
.notifier_call = evtchn_fifo_cpu_notification, {
}; __evtchn_fifo_handle_events(cpu, true);
return 0;
}
int __init xen_evtchn_fifo_init(void) int __init xen_evtchn_fifo_init(void)
{ {
...@@ -456,7 +444,9 @@ int __init xen_evtchn_fifo_init(void) ...@@ -456,7 +444,9 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo; evtchn_ops = &evtchn_ops_fifo;
register_cpu_notifier(&evtchn_fifo_cpu_notifier); cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
"CPUHP_XEN_EVTCHN_PREPARE",
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out: out:
put_cpu(); put_cpu();
return ret; return ret;
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
static unsigned long platform_mmio; static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc; static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen; static unsigned long platform_mmiolen;
static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len) static unsigned long alloc_xen_mmio(unsigned long len)
{ {
...@@ -55,51 +54,6 @@ static unsigned long alloc_xen_mmio(unsigned long len) ...@@ -55,51 +54,6 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr; return addr;
} }
static uint64_t get_callback_via(struct pci_dev *pdev)
{
u8 pin;
int irq;
irq = pdev->irq;
if (irq < 16)
return irq; /* ISA IRQ */
pin = pdev->pin;
/* We don't know the GSI. Specify the PCI INTx line instead. */
return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
((uint64_t)pci_domain_nr(pdev->bus) << 32) |
((uint64_t)pdev->bus->number << 16) |
((uint64_t)(pdev->devfn & 0xff) << 8) |
((uint64_t)(pin - 1) & 3);
}
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{
xen_hvm_evtchn_do_upcall();
return IRQ_HANDLED;
}
static int xen_allocate_irq(struct pci_dev *pdev)
{
return request_irq(pdev->irq, do_hvm_evtchn_intr,
IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
"xen-platform-pci", pdev);
}
static int platform_pci_resume(struct pci_dev *pdev)
{
int err;
if (xen_have_vector_callback)
return 0;
err = xen_set_callback_via(callback_via);
if (err) {
dev_err(&pdev->dev, "platform_pci_resume failure!\n");
return err;
}
return 0;
}
static int platform_pci_probe(struct pci_dev *pdev, static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
...@@ -138,21 +92,6 @@ static int platform_pci_probe(struct pci_dev *pdev, ...@@ -138,21 +92,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr; platform_mmio = mmio_addr;
platform_mmiolen = mmio_len; platform_mmiolen = mmio_len;
if (!xen_have_vector_callback) {
ret = xen_allocate_irq(pdev);
if (ret) {
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
goto out;
}
callback_via = get_callback_via(pdev);
ret = xen_set_callback_via(callback_via);
if (ret) {
dev_warn(&pdev->dev, "Unable to set the evtchn callback "
"err=%d\n", ret);
goto out;
}
}
max_nr_gframes = gnttab_max_grant_frames(); max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames); ret = gnttab_setup_auto_xlat_frames(grant_frames);
...@@ -184,9 +123,6 @@ static struct pci_driver platform_driver = { ...@@ -184,9 +123,6 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME, .name = DRV_NAME,
.probe = platform_pci_probe, .probe = platform_pci_probe,
.id_table = platform_pci_tbl, .id_table = platform_pci_tbl,
#ifdef CONFIG_PM
.resume_early = platform_pci_resume,
#endif
}; };
static int __init platform_pci_init(void) static int __init platform_pci_init(void)
......
...@@ -215,7 +215,7 @@ static const struct attribute_group xen_compilation_group = { ...@@ -215,7 +215,7 @@ static const struct attribute_group xen_compilation_group = {
.attrs = xen_compile_attrs, .attrs = xen_compile_attrs,
}; };
static int __init xen_compilation_init(void) static int __init xen_sysfs_compilation_init(void)
{ {
return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
} }
...@@ -341,7 +341,7 @@ static const struct attribute_group xen_properties_group = { ...@@ -341,7 +341,7 @@ static const struct attribute_group xen_properties_group = {
.attrs = xen_properties_attrs, .attrs = xen_properties_attrs,
}; };
static int __init xen_properties_init(void) static int __init xen_sysfs_properties_init(void)
{ {
return sysfs_create_group(hypervisor_kobj, &xen_properties_group); return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
} }
...@@ -455,7 +455,7 @@ static const struct attribute_group xen_pmu_group = { ...@@ -455,7 +455,7 @@ static const struct attribute_group xen_pmu_group = {
.attrs = xen_pmu_attrs, .attrs = xen_pmu_attrs,
}; };
static int __init xen_pmu_init(void) static int __init xen_sysfs_pmu_init(void)
{ {
return sysfs_create_group(hypervisor_kobj, &xen_pmu_group); return sysfs_create_group(hypervisor_kobj, &xen_pmu_group);
} }
...@@ -474,18 +474,18 @@ static int __init hyper_sysfs_init(void) ...@@ -474,18 +474,18 @@ static int __init hyper_sysfs_init(void)
ret = xen_sysfs_version_init(); ret = xen_sysfs_version_init();
if (ret) if (ret)
goto version_out; goto version_out;
ret = xen_compilation_init(); ret = xen_sysfs_compilation_init();
if (ret) if (ret)
goto comp_out; goto comp_out;
ret = xen_sysfs_uuid_init(); ret = xen_sysfs_uuid_init();
if (ret) if (ret)
goto uuid_out; goto uuid_out;
ret = xen_properties_init(); ret = xen_sysfs_properties_init();
if (ret) if (ret)
goto prop_out; goto prop_out;
#ifdef CONFIG_XEN_HAVE_VPMU #ifdef CONFIG_XEN_HAVE_VPMU
if (xen_initial_domain()) { if (xen_initial_domain()) {
ret = xen_pmu_init(); ret = xen_sysfs_pmu_init();
if (ret) { if (ret) {
sysfs_remove_group(hypervisor_kobj, sysfs_remove_group(hypervisor_kobj,
&xen_properties_group); &xen_properties_group);
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include "conf_space.h" #include "conf_space.h"
#include "conf_space_quirks.h" #include "conf_space_quirks.h"
#define PCISTUB_DRIVER_NAME "pciback"
static char *pci_devs_to_hide; static char *pci_devs_to_hide;
wait_queue_head_t xen_pcibk_aer_wait_queue; wait_queue_head_t xen_pcibk_aer_wait_queue;
/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops, /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
...@@ -149,13 +151,10 @@ static inline void pcistub_device_put(struct pcistub_device *psdev) ...@@ -149,13 +151,10 @@ static inline void pcistub_device_put(struct pcistub_device *psdev)
kref_put(&psdev->kref, pcistub_device_release); kref_put(&psdev->kref, pcistub_device_release);
} }
static struct pcistub_device *pcistub_device_find(int domain, int bus, static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
int slot, int func) int slot, int func)
{ {
struct pcistub_device *psdev = NULL; struct pcistub_device *psdev;
unsigned long flags;
spin_lock_irqsave(&pcistub_devices_lock, flags);
list_for_each_entry(psdev, &pcistub_devices, dev_list) { list_for_each_entry(psdev, &pcistub_devices, dev_list) {
if (psdev->dev != NULL if (psdev->dev != NULL
...@@ -163,15 +162,25 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus, ...@@ -163,15 +162,25 @@ static struct pcistub_device *pcistub_device_find(int domain, int bus,
&& bus == psdev->dev->bus->number && bus == psdev->dev->bus->number
&& slot == PCI_SLOT(psdev->dev->devfn) && slot == PCI_SLOT(psdev->dev->devfn)
&& func == PCI_FUNC(psdev->dev->devfn)) { && func == PCI_FUNC(psdev->dev->devfn)) {
pcistub_device_get(psdev); return psdev;
goto out;
} }
} }
/* didn't find it */ return NULL;
psdev = NULL; }
static struct pcistub_device *pcistub_device_find(int domain, int bus,
int slot, int func)
{
struct pcistub_device *psdev;
unsigned long flags;
spin_lock_irqsave(&pcistub_devices_lock, flags);
psdev = pcistub_device_find_locked(domain, bus, slot, func);
if (psdev)
pcistub_device_get(psdev);
out:
spin_unlock_irqrestore(&pcistub_devices_lock, flags); spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return psdev; return psdev;
} }
...@@ -207,16 +216,9 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev, ...@@ -207,16 +216,9 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
spin_lock_irqsave(&pcistub_devices_lock, flags); spin_lock_irqsave(&pcistub_devices_lock, flags);
list_for_each_entry(psdev, &pcistub_devices, dev_list) { psdev = pcistub_device_find_locked(domain, bus, slot, func);
if (psdev->dev != NULL if (psdev)
&& domain == pci_domain_nr(psdev->dev->bus) found_dev = pcistub_device_get_pci_dev(pdev, psdev);
&& bus == psdev->dev->bus->number
&& slot == PCI_SLOT(psdev->dev->devfn)
&& func == PCI_FUNC(psdev->dev->devfn)) {
found_dev = pcistub_device_get_pci_dev(pdev, psdev);
break;
}
}
spin_unlock_irqrestore(&pcistub_devices_lock, flags); spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return found_dev; return found_dev;
...@@ -478,15 +480,48 @@ static int __init pcistub_init_devices_late(void) ...@@ -478,15 +480,48 @@ static int __init pcistub_init_devices_late(void)
return 0; return 0;
} }
static int pcistub_seize(struct pci_dev *dev) static void pcistub_device_id_add_list(struct pcistub_device_id *new,
int domain, int bus, unsigned int devfn)
{
struct pcistub_device_id *pci_dev_id;
unsigned long flags;
int found = 0;
spin_lock_irqsave(&device_ids_lock, flags);
list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
pci_dev_id->devfn == devfn) {
found = 1;
break;
}
}
if (!found) {
new->domain = domain;
new->bus = bus;
new->devfn = devfn;
list_add_tail(&new->slot_list, &pcistub_device_ids);
}
spin_unlock_irqrestore(&device_ids_lock, flags);
if (found)
kfree(new);
}
static int pcistub_seize(struct pci_dev *dev,
struct pcistub_device_id *pci_dev_id)
{ {
struct pcistub_device *psdev; struct pcistub_device *psdev;
unsigned long flags; unsigned long flags;
int err = 0; int err = 0;
psdev = pcistub_device_alloc(dev); psdev = pcistub_device_alloc(dev);
if (!psdev) if (!psdev) {
kfree(pci_dev_id);
return -ENOMEM; return -ENOMEM;
}
spin_lock_irqsave(&pcistub_devices_lock, flags); spin_lock_irqsave(&pcistub_devices_lock, flags);
...@@ -507,8 +542,12 @@ static int pcistub_seize(struct pci_dev *dev) ...@@ -507,8 +542,12 @@ static int pcistub_seize(struct pci_dev *dev)
spin_unlock_irqrestore(&pcistub_devices_lock, flags); spin_unlock_irqrestore(&pcistub_devices_lock, flags);
if (err) if (err) {
kfree(pci_dev_id);
pcistub_device_put(psdev); pcistub_device_put(psdev);
} else if (pci_dev_id)
pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
dev->bus->number, dev->devfn);
return err; return err;
} }
...@@ -517,11 +556,16 @@ static int pcistub_seize(struct pci_dev *dev) ...@@ -517,11 +556,16 @@ static int pcistub_seize(struct pci_dev *dev)
* other functions that take the sysfs lock. */ * other functions that take the sysfs lock. */
static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{ {
int err = 0; int err = 0, match;
struct pcistub_device_id *pci_dev_id = NULL;
dev_dbg(&dev->dev, "probing...\n"); dev_dbg(&dev->dev, "probing...\n");
if (pcistub_match(dev)) { match = pcistub_match(dev);
if ((dev->driver_override &&
!strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
match) {
if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
&& dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
...@@ -532,8 +576,16 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) ...@@ -532,8 +576,16 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out; goto out;
} }
if (!match) {
pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
if (!pci_dev_id) {
err = -ENOMEM;
goto out;
}
}
dev_info(&dev->dev, "seizing device\n"); dev_info(&dev->dev, "seizing device\n");
err = pcistub_seize(dev); err = pcistub_seize(dev, pci_dev_id);
} else } else
/* Didn't find the device */ /* Didn't find the device */
err = -ENODEV; err = -ENODEV;
...@@ -945,7 +997,7 @@ static const struct pci_error_handlers xen_pcibk_error_handler = { ...@@ -945,7 +997,7 @@ static const struct pci_error_handlers xen_pcibk_error_handler = {
static struct pci_driver xen_pcibk_pci_driver = { static struct pci_driver xen_pcibk_pci_driver = {
/* The name should be xen_pciback, but until the tools are updated /* The name should be xen_pciback, but until the tools are updated
* we will keep it as pciback. */ * we will keep it as pciback. */
.name = "pciback", .name = PCISTUB_DRIVER_NAME,
.id_table = pcistub_ids, .id_table = pcistub_ids,
.probe = pcistub_probe, .probe = pcistub_probe,
.remove = pcistub_remove, .remove = pcistub_remove,
...@@ -1012,7 +1064,6 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int ...@@ -1012,7 +1064,6 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
static int pcistub_device_id_add(int domain, int bus, int slot, int func) static int pcistub_device_id_add(int domain, int bus, int slot, int func)
{ {
struct pcistub_device_id *pci_dev_id; struct pcistub_device_id *pci_dev_id;
unsigned long flags;
int rc = 0, devfn = PCI_DEVFN(slot, func); int rc = 0, devfn = PCI_DEVFN(slot, func);
if (slot < 0) { if (slot < 0) {
...@@ -1042,16 +1093,10 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func) ...@@ -1042,16 +1093,10 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
if (!pci_dev_id) if (!pci_dev_id)
return -ENOMEM; return -ENOMEM;
pci_dev_id->domain = domain;
pci_dev_id->bus = bus;
pci_dev_id->devfn = devfn;
pr_debug("wants to seize %04x:%02x:%02x.%d\n", pr_debug("wants to seize %04x:%02x:%02x.%d\n",
domain, bus, slot, func); domain, bus, slot, func);
spin_lock_irqsave(&device_ids_lock, flags); pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
spin_unlock_irqrestore(&device_ids_lock, flags);
return 0; return 0;
} }
......
...@@ -43,6 +43,8 @@ enum cpuhp_state { ...@@ -43,6 +43,8 @@ enum cpuhp_state {
CPUHP_CPUIDLE_COUPLED_PREPARE, CPUHP_CPUIDLE_COUPLED_PREPARE,
CPUHP_POWERPC_PMAC_PREPARE, CPUHP_POWERPC_PMAC_PREPARE,
CPUHP_POWERPC_MMU_CTX_PREPARE, CPUHP_POWERPC_MMU_CTX_PREPARE,
CPUHP_XEN_PREPARE,
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_NOTIFY_PREPARE, CPUHP_NOTIFY_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE, CPUHP_SH_SH3X_PREPARE,
......
...@@ -38,8 +38,7 @@ extern enum xen_domain_type xen_domain_type; ...@@ -38,8 +38,7 @@ extern enum xen_domain_type xen_domain_type;
*/ */
#include <xen/features.h> #include <xen/features.h>
#define xen_pvh_domain() (xen_pv_domain() && \ #define xen_pvh_domain() (xen_pv_domain() && \
xen_feature(XENFEAT_auto_translated_physmap) && \ xen_feature(XENFEAT_auto_translated_physmap))
xen_have_vector_callback)
#else #else
#define xen_pvh_domain() (0) #define xen_pvh_domain() (0)
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment