Commit 9d2da7af authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.10-rc0-tag' of...

Merge tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen updates from Konrad Rzeszutek Wilk:
 "Features:
   - Populate the boot_params with EDD data.
   - Cleanups in the IRQ code.
  Bug-fixes:
   - CPU hotplug offline/online in PVHVM mode.
   - Re-upload processor PM data after ACPI S3 suspend/resume cycle."

And Konrad gets a gold star for sending the pull request early when he
thought he'd be away for the first week of the merge window (but because
of 3.9 dragging out to -rc8 he then re-sent the reminder on the first
day of the merge window anyway)

* tag 'stable/for-linus-3.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: resolve section mismatch warnings in xen-acpi-processor
  xen: Re-upload processor PM data to hypervisor after S3 resume (v2)
  xen/smp: Unifiy some of the PVs and PVHVM offline CPU path
  xen/smp/pvhvm: Don't initialize IRQ_WORKER as we are using the native one.
  xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM
  xen/spinlock:  Check against default value of -1 for IRQ line.
  xen/time: Add default value of -1 for IRQ and check for that.
  xen/events: Check that IRQ value passed in is valid.
  xen/time: Fix kasprintf splat when allocating timer%d IRQ line.
  xen/smp/spinlock: Fix leakage of the spinlock interrupt line for every CPU online/offline
  xen/smp: Fix leakage of timer interrupt line for every CPU online/offline.
  xen kconfig: fix select INPUT_XEN_KBDDEV_FRONTEND
  xen: drop tracking of IRQ vector
  x86/xen: populate boot_params with EDD data
parents c1be5a5b 18c0025b
...@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ...@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error; goto error;
i = 0; i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) { list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSIX) ? (type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" : "pcifront-msi-x" :
"pcifront-msi", "pcifront-msi",
...@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ...@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
dev_dbg(&dev->dev, dev_dbg(&dev->dev,
"xen: msi already bound to pirq=%d\n", pirq); "xen: msi already bound to pirq=%d\n", pirq);
} }
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSIX) ? (type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi", "msi-x" : "msi",
DOMID_SELF); DOMID_SELF);
...@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ...@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
} }
ret = xen_bind_pirq_msi_to_irq(dev, msidesc, ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
map_irq.pirq, map_irq.index, map_irq.pirq,
(type == PCI_CAP_ID_MSIX) ? (type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi", "msi-x" : "msi",
domid); domid);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/edd.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/events.h> #include <xen/events.h>
...@@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = { ...@@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart, .emergency_restart = xen_emergency_restart,
}; };
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
struct xen_platform_op op;
struct edd_info *edd_info;
u32 *mbr_signature;
unsigned nr;
int ret;
edd_info = boot_params.eddbuf;
mbr_signature = boot_params.edd_mbr_sig_buffer;
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_DISK_INFO;
for (nr = 0; nr < EDDMAXNR; nr++) {
struct edd_info *info = edd_info + nr;
op.u.firmware_info.index = nr;
info->params.length = sizeof(info->params);
set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
&info->params);
ret = HYPERVISOR_dom0_op(&op);
if (ret)
break;
#define C(x) info->x = op.u.firmware_info.u.disk_info.x
C(device);
C(version);
C(interface_support);
C(legacy_max_cylinder);
C(legacy_max_head);
C(legacy_sectors_per_track);
#undef C
}
boot_params.eddbuf_entries = nr;
op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
op.u.firmware_info.index = nr;
ret = HYPERVISOR_dom0_op(&op);
if (ret)
break;
mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
}
boot_params.edd_mbr_sig_buf_entries = nr;
#endif
}
/* /*
* Set up the GDT and segment registers for -fstack-protector. Until * Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected * we do this, we have to be careful not to call any stack-protected
...@@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void) ...@@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Avoid searching for BIOS MP tables */ /* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop;
xen_boot_params_init_edd();
} }
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */ /* PCI BIOS service won't work from a PV guest. */
...@@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, ...@@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
xen_vcpu_setup(cpu); xen_vcpu_setup(cpu);
if (xen_have_vector_callback) if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu); xen_init_lock_cpu(cpu);
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
}
break; break;
default: default:
break; break;
......
...@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu) ...@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail; goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc; per_cpu(xen_callfuncsingle_irq, cpu) = rc;
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
*/
if (xen_hvm_domain())
return 0;
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu, cpu,
...@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu) ...@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL); NULL);
if (xen_hvm_domain())
return rc;
if (per_cpu(xen_irq_work, cpu) >= 0) if (per_cpu(xen_irq_work, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
...@@ -418,7 +428,7 @@ static int xen_cpu_disable(void) ...@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu) static void xen_cpu_die(unsigned int cpu)
{ {
while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
current->state = TASK_UNINTERRUPTIBLE; current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10); schedule_timeout(HZ/10);
} }
...@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu) ...@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); if (!xen_hvm_domain())
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu); xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu); xen_teardown_timer(cpu);
} }
...@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
static void xen_hvm_cpu_die(unsigned int cpu) static void xen_hvm_cpu_die(unsigned int cpu)
{ {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); xen_cpu_die(cpu);
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
native_cpu_die(cpu); native_cpu_die(cpu);
} }
......
...@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu) ...@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
int irq; int irq;
const char *name; const char *name;
WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
/*
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return;
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu, cpu,
...@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu) ...@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu)
{ {
/*
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return;
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
} }
void __init xen_init_spinlocks(void) void __init xen_init_spinlocks(void)
{ {
/*
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return;
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked; pv_lock_ops.spin_is_locked = xen_spin_is_locked;
......
...@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = { ...@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
static const struct clock_event_device *xen_clockevent = static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent; &xen_timerop_clockevent;
static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{ {
...@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu) ...@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
struct clock_event_device *evt; struct clock_event_device *evt;
int irq; int irq;
evt = &per_cpu(xen_clock_events, cpu);
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu); name = kasprintf(GFP_KERNEL, "timer%d", cpu);
...@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu) ...@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
IRQF_FORCE_RESUME, IRQF_FORCE_RESUME,
name, NULL); name, NULL);
evt = &per_cpu(xen_clock_events, cpu);
memcpy(evt, xen_clockevent, sizeof(*evt)); memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu); evt->cpumask = cpumask_of(cpu);
...@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu) ...@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
BUG_ON(cpu == 0); BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu); evt = &per_cpu(xen_clock_events, cpu);
unbind_from_irqhandler(evt->irq, NULL); unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
} }
void xen_setup_cpu_clockevents(void) void xen_setup_cpu_clockevents(void)
...@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void) ...@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
xen_setup_runstate_info(cpu); xen_setup_runstate_info(cpu);
xen_setup_timer(cpu); /*
* xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
* doing it xen_hvm_cpu_notify (which gets called by smp_init during
* early bootup and also during CPU hotplug events).
*/
xen_setup_cpu_clockevents(); xen_setup_cpu_clockevents();
} }
......
...@@ -2277,7 +2277,7 @@ config XEN_FBDEV_FRONTEND ...@@ -2277,7 +2277,7 @@ config XEN_FBDEV_FRONTEND
select FB_SYS_IMAGEBLIT select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS select FB_SYS_FOPS
select FB_DEFERRED_IO select FB_DEFERRED_IO
select INPUT_XEN_KBDDEV_FRONTEND select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
select XEN_XENBUS_FRONTEND select XEN_XENBUS_FRONTEND
default y default y
help help
......
...@@ -85,8 +85,7 @@ enum xen_irq_type { ...@@ -85,8 +85,7 @@ enum xen_irq_type {
* event channel - irq->event channel mapping * event channel - irq->event channel mapping
* cpu - cpu this event channel is bound to * cpu - cpu this event channel is bound to
* index - type-specific information: * index - type-specific information:
* PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM * PIRQ - physical IRQ, GSI, flags, and owner domain
* guest, or GSI (real passthrough IRQ) of the device.
* VIRQ - virq number * VIRQ - virq number
* IPI - IPI vector * IPI - IPI vector
* EVTCHN - * EVTCHN -
...@@ -105,7 +104,6 @@ struct irq_info { ...@@ -105,7 +104,6 @@ struct irq_info {
struct { struct {
unsigned short pirq; unsigned short pirq;
unsigned short gsi; unsigned short gsi;
unsigned char vector;
unsigned char flags; unsigned char flags;
uint16_t domid; uint16_t domid;
} pirq; } pirq;
...@@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq, ...@@ -211,7 +209,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
unsigned short evtchn, unsigned short evtchn,
unsigned short pirq, unsigned short pirq,
unsigned short gsi, unsigned short gsi,
unsigned short vector,
uint16_t domid, uint16_t domid,
unsigned char flags) unsigned char flags)
{ {
...@@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq, ...@@ -221,7 +218,6 @@ static void xen_irq_info_pirq_init(unsigned irq,
info->u.pirq.pirq = pirq; info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi; info->u.pirq.gsi = gsi;
info->u.pirq.vector = vector;
info->u.pirq.domid = domid; info->u.pirq.domid = domid;
info->u.pirq.flags = flags; info->u.pirq.flags = flags;
} }
...@@ -519,6 +515,9 @@ static void xen_free_irq(unsigned irq) ...@@ -519,6 +515,9 @@ static void xen_free_irq(unsigned irq)
{ {
struct irq_info *info = irq_get_handler_data(irq); struct irq_info *info = irq_get_handler_data(irq);
if (WARN_ON(!info))
return;
list_del(&info->list); list_del(&info->list);
irq_set_handler_data(irq, NULL); irq_set_handler_data(irq, NULL);
...@@ -714,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -714,7 +713,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
goto out; goto out;
} }
xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0); shareable ? PIRQ_SHAREABLE : 0);
pirq_query_unmask(irq); pirq_query_unmask(irq);
...@@ -762,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) ...@@ -762,8 +761,7 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
} }
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int vector, const char *name, int pirq, const char *name, domid_t domid)
domid_t domid)
{ {
int irq, ret; int irq, ret;
...@@ -776,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -776,7 +774,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
name); name);
xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
ret = irq_set_msi_desc(irq, msidesc); ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0) if (ret < 0)
goto error_irq; goto error_irq;
...@@ -1008,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq) ...@@ -1008,6 +1006,9 @@ static void unbind_from_irq(unsigned int irq)
int evtchn = evtchn_from_irq(irq); int evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq); struct irq_info *info = irq_get_handler_data(irq);
if (WARN_ON(!info))
return;
mutex_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
if (info->refcnt > 0) { if (info->refcnt > 0) {
...@@ -1135,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, ...@@ -1135,6 +1136,10 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
void unbind_from_irqhandler(unsigned int irq, void *dev_id) void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{ {
struct irq_info *info = irq_get_handler_data(irq);
if (WARN_ON(!info))
return;
free_irq(irq, dev_id); free_irq(irq, dev_id);
unbind_from_irq(irq); unbind_from_irq(irq);
} }
...@@ -1457,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1457,6 +1462,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
if (WARN_ON(!info))
return;
/* Make sure the irq is masked, since the new event channel /* Make sure the irq is masked, since the new event channel
will also be masked. */ will also be masked. */
disable_irq(irq); disable_irq(irq);
...@@ -1730,7 +1738,12 @@ void xen_poll_irq(int irq) ...@@ -1730,7 +1738,12 @@ void xen_poll_irq(int irq)
int xen_test_irq_shared(int irq) int xen_test_irq_shared(int irq)
{ {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; struct physdev_irq_status_query irq_status;
if (WARN_ON(!info))
return -ENOENT;
irq_status.irq = info->u.pirq.pirq;
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
return 0; return 0;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/syscore_ops.h>
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h> #include <acpi/acpi_drivers.h>
#include <acpi/processor.h> #include <acpi/processor.h>
...@@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex); ...@@ -51,9 +52,9 @@ static DEFINE_MUTEX(acpi_ids_mutex);
/* Which ACPI ID we have processed from 'struct acpi_processor'. */ /* Which ACPI ID we have processed from 'struct acpi_processor'. */
static unsigned long *acpi_ids_done; static unsigned long *acpi_ids_done;
/* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */
static unsigned long __initdata *acpi_id_present; static unsigned long *acpi_id_present;
/* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */
static unsigned long __initdata *acpi_id_cst_present; static unsigned long *acpi_id_cst_present;
static int push_cxx_to_hypervisor(struct acpi_processor *_pr) static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
{ {
...@@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void) ...@@ -329,7 +330,7 @@ static unsigned int __init get_max_acpi_id(void)
* for_each_[present|online]_cpu macros which are banded to the virtual * for_each_[present|online]_cpu macros which are banded to the virtual
* CPU amount. * CPU amount.
*/ */
static acpi_status __init static acpi_status
read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
{ {
u32 acpi_id; u32 acpi_id;
...@@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) ...@@ -384,12 +385,16 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK; return AE_OK;
} }
static int __init check_acpi_ids(struct acpi_processor *pr_backup) static int check_acpi_ids(struct acpi_processor *pr_backup)
{ {
if (!pr_backup) if (!pr_backup)
return -ENODEV; return -ENODEV;
if (acpi_id_present && acpi_id_cst_present)
/* OK, done this once .. skip to uploading */
goto upload;
/* All online CPUs have been processed at this stage. Now verify /* All online CPUs have been processed at this stage. Now verify
* whether in fact "online CPUs" == physical CPUs. * whether in fact "online CPUs" == physical CPUs.
*/ */
...@@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) ...@@ -408,6 +413,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
read_acpi_id, NULL, NULL, NULL); read_acpi_id, NULL, NULL, NULL);
acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
upload:
if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
unsigned int i; unsigned int i;
for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { for_each_set_bit(i, acpi_id_present, nr_acpi_bits) {
...@@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup) ...@@ -417,10 +423,7 @@ static int __init check_acpi_ids(struct acpi_processor *pr_backup)
(void)upload_pm_data(pr_backup); (void)upload_pm_data(pr_backup);
} }
} }
kfree(acpi_id_present);
acpi_id_present = NULL;
kfree(acpi_id_cst_present);
acpi_id_cst_present = NULL;
return 0; return 0;
} }
static int __init check_prereq(void) static int __init check_prereq(void)
...@@ -467,9 +470,46 @@ static void free_acpi_perf_data(void) ...@@ -467,9 +470,46 @@ static void free_acpi_perf_data(void)
free_percpu(acpi_perf_data); free_percpu(acpi_perf_data);
} }
static int __init xen_acpi_processor_init(void) static int xen_upload_processor_pm_data(void)
{ {
struct acpi_processor *pr_backup = NULL; struct acpi_processor *pr_backup = NULL;
unsigned int i;
int rc = 0;
pr_info(DRV_NAME "Uploading Xen processor PM info\n");
for_each_possible_cpu(i) {
struct acpi_processor *_pr;
_pr = per_cpu(processors, i /* APIC ID */);
if (!_pr)
continue;
if (!pr_backup) {
pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (pr_backup)
memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
}
(void)upload_pm_data(_pr);
}
rc = check_acpi_ids(pr_backup);
kfree(pr_backup);
return rc;
}
static void xen_acpi_processor_resume(void)
{
bitmap_zero(acpi_ids_done, nr_acpi_bits);
xen_upload_processor_pm_data();
}
static struct syscore_ops xap_syscore_ops = {
.resume = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
{
unsigned int i; unsigned int i;
int rc = check_prereq(); int rc = check_prereq();
...@@ -514,27 +554,12 @@ static int __init xen_acpi_processor_init(void) ...@@ -514,27 +554,12 @@ static int __init xen_acpi_processor_init(void)
goto err_out; goto err_out;
} }
for_each_possible_cpu(i) { rc = xen_upload_processor_pm_data();
struct acpi_processor *_pr;
_pr = per_cpu(processors, i /* APIC ID */);
if (!_pr)
continue;
if (!pr_backup) {
pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (pr_backup)
memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
}
(void)upload_pm_data(_pr);
}
rc = check_acpi_ids(pr_backup);
kfree(pr_backup);
pr_backup = NULL;
if (rc) if (rc)
goto err_unregister; goto err_unregister;
register_syscore_ops(&xap_syscore_ops);
return 0; return 0;
err_unregister: err_unregister:
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -552,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void) ...@@ -552,7 +577,10 @@ static void __exit xen_acpi_processor_exit(void)
{ {
int i; int i;
unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done); kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
perf = per_cpu_ptr(acpi_perf_data, i); perf = per_cpu_ptr(acpi_perf_data, i);
......
...@@ -90,8 +90,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -90,8 +90,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
/* Bind an PSI pirq to an irq. */ /* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int vector, const char *name, int pirq, const char *name, domid_t domid);
domid_t domid);
#endif #endif
/* De-allocates the above mentioned physical interrupt. */ /* De-allocates the above mentioned physical interrupt. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment