Commit abbe0d3c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable/bug.fixes' of git://oss.oracle.com/git/kwilk/xen

* 'stable/bug.fixes' of git://oss.oracle.com/git/kwilk/xen:
  xen/i386: follow-up to "replace order-based range checking of M2P table by linear one"
  xen/irq: Alter the locking to use a mutex instead of a spinlock.
  xen/e820: if there is no dom0_mem=, don't tweak extra_pages.
  xen: disable PV spinlocks on HVM
parents c455ea4f 61cca2fa
...@@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void) ...@@ -1721,10 +1721,8 @@ void __init xen_setup_machphys_mapping(void)
machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if ((machine_to_phys_mapping + machine_to_phys_nr) WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
< machine_to_phys_mapping) < machine_to_phys_mapping);
machine_to_phys_nr = (unsigned long *)NULL
- machine_to_phys_mapping;
#endif #endif
} }
......
...@@ -306,10 +306,12 @@ char * __init xen_memory_setup(void) ...@@ -306,10 +306,12 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
extra_limit = xen_get_max_pages(); extra_limit = xen_get_max_pages();
if (extra_limit >= max_pfn) if (max_pfn + extra_pages > extra_limit) {
extra_pages = extra_limit - max_pfn; if (extra_limit > max_pfn)
else extra_pages = extra_limit - max_pfn;
extra_pages = 0; else
extra_pages = 0;
}
extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
......
...@@ -532,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) ...@@ -532,7 +532,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
WARN_ON(xen_smp_intr_init(0)); WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0); xen_init_lock_cpu(0);
xen_init_spinlocks();
} }
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
* This lock protects updates to the following mapping and reference-count * This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables. * arrays. The lock does not need to be acquired to read the mapping tables.
*/ */
static DEFINE_SPINLOCK(irq_mapping_update_lock); static DEFINE_MUTEX(irq_mapping_update_lock);
static LIST_HEAD(xen_irq_list_head); static LIST_HEAD(xen_irq_list_head);
...@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int irq = -1; int irq = -1;
struct physdev_irq irq_op; struct physdev_irq irq_op;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = find_irq_by_gsi(gsi); irq = find_irq_by_gsi(gsi);
if (irq != -1) { if (irq != -1) {
...@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, ...@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
handle_edge_irq, name); handle_edge_irq, name);
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
} }
...@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
{ {
int irq, ret; int irq, ret;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic(); irq = xen_allocate_irq_dynamic();
if (irq == -1) if (irq == -1)
...@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, ...@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
if (ret < 0) if (ret < 0)
goto error_irq; goto error_irq;
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
error_irq: error_irq:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq); xen_free_irq(irq);
return -1; return -1;
} }
...@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq) ...@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT; int rc = -ENOENT;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
if (!desc) if (!desc)
...@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq) ...@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
xen_free_irq(irq); xen_free_irq(irq);
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return rc; return rc;
} }
...@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq) ...@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
struct irq_info *info; struct irq_info *info;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) { list_for_each_entry(info, &xen_irq_list_head, list) {
if (info == NULL || info->type != IRQT_PIRQ) if (info == NULL || info->type != IRQT_PIRQ)
...@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq) ...@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
} }
irq = -1; irq = -1;
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
} }
...@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) ...@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
{ {
int irq; int irq;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn]; irq = evtchn_to_irq[evtchn];
...@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) ...@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
} }
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
} }
...@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) ...@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
int evtchn, irq; int evtchn, irq;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi]; irq = per_cpu(ipi_to_irq, cpu)[ipi];
...@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) ...@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
} }
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
} }
...@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) ...@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
struct evtchn_bind_virq bind_virq; struct evtchn_bind_virq bind_virq;
int evtchn, irq; int evtchn, irq;
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq]; irq = per_cpu(virq_to_irq, cpu)[virq];
...@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) ...@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
} }
out: out:
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
return irq; return irq;
} }
...@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq) ...@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
struct evtchn_close close; struct evtchn_close close;
int evtchn = evtchn_from_irq(irq); int evtchn = evtchn_from_irq(irq);
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
if (VALID_EVTCHN(evtchn)) { if (VALID_EVTCHN(evtchn)) {
close.port = evtchn; close.port = evtchn;
...@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq) ...@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq); xen_free_irq(irq);
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
} }
int bind_evtchn_to_irqhandler(unsigned int evtchn, int bind_evtchn_to_irqhandler(unsigned int evtchn,
...@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
will also be masked. */ will also be masked. */
disable_irq(irq); disable_irq(irq);
spin_lock(&irq_mapping_update_lock); mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */ /* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1); BUG_ON(evtchn_to_irq[evtchn] != -1);
...@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq) ...@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
xen_irq_info_evtchn_init(irq, evtchn); xen_irq_info_evtchn_init(irq, evtchn);
spin_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */ /* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0)); irq_set_affinity(irq, cpumask_of(0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment