Commit 29567292 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.7-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel.

* tag 'for-linus-4.7-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: use same main loop for counting and remapping pages
  xen/events: Don't move disabled irqs
  xen/x86: actually allocate legacy interrupts on PV guests
  Xen: don't warn about 2-byte wchar_t in efi
  xen/gntdev: reduce copy batch size to 16
  xen/x86: don't lose event interrupts
parents ecaba718 dd14be92
...@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void) ...@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
#endif #endif
__acpi_register_gsi = acpi_register_gsi_xen; __acpi_register_gsi = acpi_register_gsi_xen;
__acpi_unregister_gsi = NULL; __acpi_unregister_gsi = NULL;
/* Pre-allocate legacy irqs */ /*
for (irq = 0; irq < nr_legacy_irqs(); irq++) { * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
* because we don't have a PIC and thus nr_legacy_irqs() is zero.
*/
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity; int trigger, polarity;
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
......
...@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long i = 0; unsigned long i = 0;
unsigned long n = end_pfn - start_pfn; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0)
remap_pfn = nr_pages;
while (i < n) { while (i < n) {
unsigned long cur_pfn = start_pfn + i; unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i; unsigned long left = n - i;
...@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk( ...@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn; return remap_pfn;
} }
static void __init xen_set_identity_and_remap(unsigned long nr_pages) static unsigned long __init xen_count_remap_pages(
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
unsigned long remap_pages)
{
if (start_pfn >= nr_pages)
return remap_pages;
return remap_pages + min(end_pfn, nr_pages) - start_pfn;
}
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
unsigned long nr_pages, unsigned long last_val))
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long last_pfn = nr_pages; unsigned long ret_val = 0;
const struct e820entry *entry = xen_e820_map; const struct e820entry *entry = xen_e820_map;
int i; int i;
/* /*
* Combine non-RAM regions and gaps until a RAM region (or the * Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and * end of the map) is reached, then call the provided function
* remap the memory in those non-RAM regions. * to perform its duty on the non-RAM region.
* *
* The combined non-RAM regions are rounded to a whole number * The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1 * of pages so any partial pages are accessible via the 1:1
...@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages) ...@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
last_pfn = xen_set_identity_and_remap_chunk( ret_val = func(start_pfn, end_pfn, nr_pages,
start_pfn, end_pfn, nr_pages, ret_val);
last_pfn);
start = end; start = end;
} }
} }
pr_info("Released %ld page(s)\n", xen_released_pages); return ret_val;
} }
/* /*
...@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void) ...@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
} }
} }
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
unsigned long extra = 0;
unsigned long start_pfn, end_pfn;
const struct e820entry *entry = xen_e820_map;
int i;
end_pfn = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
start_pfn = PFN_DOWN(entry->addr);
/* Adjacent regions on non-page boundaries handling! */
end_pfn = min(end_pfn, start_pfn);
if (start_pfn >= max_pfn)
return extra + max_pfn - end_pfn;
/* Add any holes in map to result. */
extra += start_pfn - end_pfn;
end_pfn = PFN_UP(entry->addr + entry->size);
end_pfn = min(end_pfn, max_pfn);
if (entry->type != E820_RAM)
extra += end_pfn - start_pfn;
}
return extra;
}
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{ {
struct e820entry *entry; struct e820entry *entry;
...@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void) ...@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
max_pages = xen_get_max_pages(); max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ /* How many extra pages do we need due to remapping? */
max_pages += xen_count_remap_pages(max_pfn); max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn) if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
...@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void) ...@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the * Set identity map on non-RAM pages and prepare remapping the
* underlying RAM. * underlying RAM.
*/ */
xen_set_identity_and_remap(max_pfn); xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen"; return "Xen";
} }
......
...@@ -290,11 +290,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta, ...@@ -290,11 +290,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
WARN_ON(!clockevent_state_oneshot(evt)); WARN_ON(!clockevent_state_oneshot(evt));
single.timeout_abs_ns = get_abs_timeout(delta); single.timeout_abs_ns = get_abs_timeout(delta);
single.flags = VCPU_SSHOTTMR_future; /* Get an event anyway, even if the timeout is already expired */
single.flags = 0;
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
BUG_ON(ret != 0);
BUG_ON(ret != 0 && ret != -ETIME);
return ret; return ret;
} }
......
...@@ -8,6 +8,7 @@ nostackp := $(call cc-option, -fno-stack-protector) ...@@ -8,6 +8,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp) CFLAGS_features.o := $(nostackp)
CFLAGS_efi.o += -fshort-wchar CFLAGS_efi.o += -fshort-wchar
LDFLAGS += $(call ld-option, --no-wchar-size-warning)
dom0-$(CONFIG_PCI) += pci.o dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
......
...@@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data) ...@@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return; return;
if (unlikely(irqd_is_setaffinity_pending(data))) { if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn); int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn); clear_evtchn(evtchn);
...@@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data) ...@@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return; return;
if (unlikely(irqd_is_setaffinity_pending(data))) { if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn); int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn); clear_evtchn(evtchn);
......
...@@ -748,7 +748,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) ...@@ -748,7 +748,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
return rc; return rc;
} }
#define GNTDEV_COPY_BATCH 24 #define GNTDEV_COPY_BATCH 16
struct gntdev_copy_batch { struct gntdev_copy_batch {
struct gnttab_copy ops[GNTDEV_COPY_BATCH]; struct gnttab_copy ops[GNTDEV_COPY_BATCH];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment