Commit d14d7f14 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.1a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "xen fixes and features:

   - remove fallback code for very old Xen hypervisors

   - three patches for fixing Xen dom0 boot regressions

   - an old patch for Xen PCI passthrough which was never applied for
     unknown reasons

   - some more minor fixes and cleanup patches"

* tag 'for-linus-5.1a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: fix dom0 boot on huge systems
  xen, cpu_hotplug: Prevent an out of bounds access
  xen: remove pre-xen3 fallback handlers
  xen/ACPI: Switch to bitmap_zalloc()
  x86/xen: dont add memory above max allowed allocation
  x86: respect memory size limiting via mem= parameter
  xen/gntdev: Check and release imported dma-bufs on close
  xen/gntdev: Do not destroy context while dma-bufs are in use
  xen/pciback: Don't disable PCI_COMMAND on PCI device reset.
  xen-scsiback: mark expected switch fall-through
  xen: mark expected switch fall-through
parents 6cdfa54c 01bd2ac2
...@@ -332,15 +332,11 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, ...@@ -332,15 +332,11 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
return _hypercall4(int, update_va_mapping, va, return _hypercall4(int, update_va_mapping, va,
new_val.pte, new_val.pte >> 32, flags); new_val.pte, new_val.pte >> 32, flags);
} }
extern int __must_check xen_event_channel_op_compat(int, void *);
static inline int static inline int
HYPERVISOR_event_channel_op(int cmd, void *arg) HYPERVISOR_event_channel_op(int cmd, void *arg)
{ {
int rc = _hypercall2(int, event_channel_op, cmd, arg); return _hypercall2(int, event_channel_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
rc = xen_event_channel_op_compat(cmd, arg);
return rc;
} }
static inline int static inline int
...@@ -355,15 +351,10 @@ HYPERVISOR_console_io(int cmd, int count, char *str) ...@@ -355,15 +351,10 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str); return _hypercall3(int, console_io, cmd, count, str);
} }
extern int __must_check xen_physdev_op_compat(int, void *);
static inline int static inline int
HYPERVISOR_physdev_op(int cmd, void *arg) HYPERVISOR_physdev_op(int cmd, void *arg)
{ {
int rc = _hypercall2(int, physdev_op, cmd, arg); return _hypercall2(int, physdev_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
rc = xen_physdev_op_compat(cmd, arg);
return rc;
} }
static inline int static inline int
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/firmware-map.h> #include <linux/firmware-map.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/memory_hotplug.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -878,6 +879,10 @@ static int __init parse_memopt(char *p) ...@@ -878,6 +879,10 @@ static int __init parse_memopt(char *p)
e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1);
#ifdef CONFIG_MEMORY_HOTPLUG
max_mem_size = mem_size;
#endif
return 0; return 0;
} }
early_param("mem", parse_memopt); early_param("mem", parse_memopt);
......
...@@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void) ...@@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void)
pt = early_memremap(pt_phys, PAGE_SIZE); pt = early_memremap(pt_phys, PAGE_SIZE);
clear_page(pt); clear_page(pt);
for (idx_pte = 0; for (idx_pte = 0;
idx_pte < min(n_pte, PTRS_PER_PTE); idx_pte < min(n_pte, PTRS_PER_PTE);
idx_pte++) { idx_pte++) {
set_pte(pt + idx_pte, pt[idx_pte] = pfn_pte(p2m_pfn,
pfn_pte(p2m_pfn, PAGE_KERNEL)); PAGE_KERNEL);
p2m_pfn++; p2m_pfn++;
} }
n_pte -= PTRS_PER_PTE; n_pte -= PTRS_PER_PTE;
...@@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void) ...@@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void)
make_lowmem_page_readonly(__va(pt_phys)); make_lowmem_page_readonly(__va(pt_phys));
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
PFN_DOWN(pt_phys)); PFN_DOWN(pt_phys));
set_pmd(pmd + idx_pt, pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
__pmd(_PAGE_TABLE | pt_phys));
pt_phys += PAGE_SIZE; pt_phys += PAGE_SIZE;
} }
n_pt -= PTRS_PER_PMD; n_pt -= PTRS_PER_PMD;
...@@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void) ...@@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void)
make_lowmem_page_readonly(__va(pmd_phys)); make_lowmem_page_readonly(__va(pmd_phys));
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
PFN_DOWN(pmd_phys)); PFN_DOWN(pmd_phys));
set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
pmd_phys += PAGE_SIZE; pmd_phys += PAGE_SIZE;
} }
n_pmd -= PTRS_PER_PUD; n_pmd -= PTRS_PER_PUD;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/memory_hotplug.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/vdso.h> #include <asm/vdso.h>
...@@ -589,6 +590,14 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start, ...@@ -589,6 +590,14 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start,
if (type == E820_TYPE_RAM) { if (type == E820_TYPE_RAM) {
start = PAGE_ALIGN(start); start = PAGE_ALIGN(start);
end &= ~((phys_addr_t)PAGE_SIZE - 1); end &= ~((phys_addr_t)PAGE_SIZE - 1);
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Don't allow adding memory not in E820 map while booting the
* system. Once the balloon driver is up it will remove that
* restriction again.
*/
max_mem_size = end;
#endif
} }
e820__range_add(start, end - start, type); e820__range_add(start, end - start, type);
...@@ -748,6 +757,10 @@ char * __init xen_memory_setup(void) ...@@ -748,6 +757,10 @@ char * __init xen_memory_setup(void)
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
xen_saved_max_mem_size = max_mem_size;
#endif
op = xen_initial_domain() ? op = xen_initial_domain() ?
XENMEM_machine_memory_map : XENMEM_machine_memory_map :
XENMEM_memory_map; XENMEM_memory_map;
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += mem-reservation.o obj-y += mem-reservation.o
obj-y += events/ obj-y += events/
......
...@@ -54,7 +54,7 @@ static int vcpu_online(unsigned int cpu) ...@@ -54,7 +54,7 @@ static int vcpu_online(unsigned int cpu)
} }
static void vcpu_hotplug(unsigned int cpu) static void vcpu_hotplug(unsigned int cpu)
{ {
if (!cpu_possible(cpu)) if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
return; return;
switch (vcpu_online(cpu)) { switch (vcpu_online(cpu)) {
......
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <asm/hypervisor.h>
#include <asm/xen/hypercall.h>
int xen_event_channel_op_compat(int cmd, void *arg)
{
struct evtchn_op op;
int rc;
op.cmd = cmd;
memcpy(&op.u, arg, sizeof(op.u));
rc = _hypercall1(int, event_channel_op_compat, &op);
switch (cmd) {
case EVTCHNOP_close:
case EVTCHNOP_send:
case EVTCHNOP_bind_vcpu:
case EVTCHNOP_unmask:
/* no output */
break;
#define COPY_BACK(eop) \
case EVTCHNOP_##eop: \
memcpy(arg, &op.u.eop, sizeof(op.u.eop)); \
break
COPY_BACK(bind_interdomain);
COPY_BACK(bind_virq);
COPY_BACK(bind_pirq);
COPY_BACK(status);
COPY_BACK(alloc_unbound);
COPY_BACK(bind_ipi);
#undef COPY_BACK
default:
WARN_ON(rc != -ENOSYS);
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
int xen_physdev_op_compat(int cmd, void *arg)
{
struct physdev_op op;
int rc;
op.cmd = cmd;
memcpy(&op.u, arg, sizeof(op.u));
rc = _hypercall1(int, physdev_op_compat, &op);
switch (cmd) {
case PHYSDEVOP_IRQ_UNMASK_NOTIFY:
case PHYSDEVOP_set_iopl:
case PHYSDEVOP_set_iobitmap:
case PHYSDEVOP_apic_write:
/* no output */
break;
#define COPY_BACK(pop, fld) \
case PHYSDEVOP_##pop: \
memcpy(arg, &op.u.fld, sizeof(op.u.fld)); \
break
COPY_BACK(irq_status_query, irq_status_query);
COPY_BACK(apic_read, apic_op);
COPY_BACK(ASSIGN_VECTOR, irq_op);
#undef COPY_BACK
default:
WARN_ON(rc != -ENOSYS);
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
...@@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv { ...@@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv {
struct list_head imp_list; struct list_head imp_list;
/* This is the lock which protects dma_buf_xxx lists. */ /* This is the lock which protects dma_buf_xxx lists. */
struct mutex lock; struct mutex lock;
/*
* We reference this file while exporting dma-bufs, so
* the grant device context is not destroyed while there are
* external users alive.
*/
struct file *filp;
}; };
/* DMA buffer export support. */ /* DMA buffer export support. */
...@@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref) ...@@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref)
dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf); dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
list_del(&gntdev_dmabuf->next); list_del(&gntdev_dmabuf->next);
fput(gntdev_dmabuf->priv->filp);
kfree(gntdev_dmabuf); kfree(gntdev_dmabuf);
} }
...@@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args) ...@@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
mutex_lock(&args->dmabuf_priv->lock); mutex_lock(&args->dmabuf_priv->lock);
list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list); list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
mutex_unlock(&args->dmabuf_priv->lock); mutex_unlock(&args->dmabuf_priv->lock);
get_file(gntdev_dmabuf->priv->filp);
return 0; return 0;
fail: fail:
...@@ -737,6 +745,14 @@ static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd) ...@@ -737,6 +745,14 @@ static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
return 0; return 0;
} }
static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
{
struct gntdev_dmabuf *q, *gntdev_dmabuf;
list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
dmabuf_imp_release(priv, gntdev_dmabuf->fd);
}
/* DMA buffer IOCTL support. */ /* DMA buffer IOCTL support. */
long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod, long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
...@@ -834,7 +850,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv, ...@@ -834,7 +850,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
return dmabuf_imp_release(priv->dmabuf_priv, op.fd); return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
} }
struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void) struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
{ {
struct gntdev_dmabuf_priv *priv; struct gntdev_dmabuf_priv *priv;
...@@ -847,10 +863,13 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void) ...@@ -847,10 +863,13 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
INIT_LIST_HEAD(&priv->exp_wait_list); INIT_LIST_HEAD(&priv->exp_wait_list);
INIT_LIST_HEAD(&priv->imp_list); INIT_LIST_HEAD(&priv->imp_list);
priv->filp = filp;
return priv; return priv;
} }
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv) void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
{ {
dmabuf_imp_release_all(priv);
kfree(priv); kfree(priv);
} }
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
struct gntdev_dmabuf_priv; struct gntdev_dmabuf_priv;
struct gntdev_priv; struct gntdev_priv;
struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void); struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv); void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
......
...@@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) ...@@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
mutex_init(&priv->lock); mutex_init(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF #ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(); priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) { if (IS_ERR(priv->dmabuf_priv)) {
ret = PTR_ERR(priv->dmabuf_priv); ret = PTR_ERR(priv->dmabuf_priv);
kfree(priv); kfree(priv);
......
...@@ -410,21 +410,21 @@ static int check_acpi_ids(struct acpi_processor *pr_backup) ...@@ -410,21 +410,21 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
/* All online CPUs have been processed at this stage. Now verify /* All online CPUs have been processed at this stage. Now verify
* whether in fact "online CPUs" == physical CPUs. * whether in fact "online CPUs" == physical CPUs.
*/ */
acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); acpi_id_present = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_id_present) if (!acpi_id_present)
return -ENOMEM; return -ENOMEM;
acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); acpi_id_cst_present = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_id_cst_present) { if (!acpi_id_cst_present) {
kfree(acpi_id_present); bitmap_free(acpi_id_present);
return -ENOMEM; return -ENOMEM;
} }
acpi_psd = kcalloc(nr_acpi_bits, sizeof(struct acpi_psd_package), acpi_psd = kcalloc(nr_acpi_bits, sizeof(struct acpi_psd_package),
GFP_KERNEL); GFP_KERNEL);
if (!acpi_psd) { if (!acpi_psd) {
kfree(acpi_id_present); bitmap_free(acpi_id_present);
kfree(acpi_id_cst_present); bitmap_free(acpi_id_cst_present);
return -ENOMEM; return -ENOMEM;
} }
...@@ -533,14 +533,14 @@ static int __init xen_acpi_processor_init(void) ...@@ -533,14 +533,14 @@ static int __init xen_acpi_processor_init(void)
return -ENODEV; return -ENODEV;
nr_acpi_bits = get_max_acpi_id() + 1; nr_acpi_bits = get_max_acpi_id() + 1;
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); acpi_ids_done = bitmap_zalloc(nr_acpi_bits, GFP_KERNEL);
if (!acpi_ids_done) if (!acpi_ids_done)
return -ENOMEM; return -ENOMEM;
acpi_perf_data = alloc_percpu(struct acpi_processor_performance); acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) { if (!acpi_perf_data) {
pr_debug("Memory allocation error for acpi_perf_data\n"); pr_debug("Memory allocation error for acpi_perf_data\n");
kfree(acpi_ids_done); bitmap_free(acpi_ids_done);
return -ENOMEM; return -ENOMEM;
} }
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -584,7 +584,7 @@ static int __init xen_acpi_processor_init(void) ...@@ -584,7 +584,7 @@ static int __init xen_acpi_processor_init(void)
err_out: err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data(); free_acpi_perf_data();
kfree(acpi_ids_done); bitmap_free(acpi_ids_done);
return rc; return rc;
} }
static void __exit xen_acpi_processor_exit(void) static void __exit xen_acpi_processor_exit(void)
...@@ -592,9 +592,9 @@ static void __exit xen_acpi_processor_exit(void) ...@@ -592,9 +592,9 @@ static void __exit xen_acpi_processor_exit(void)
int i; int i;
unregister_syscore_ops(&xap_syscore_ops); unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done); bitmap_free(acpi_ids_done);
kfree(acpi_id_present); bitmap_free(acpi_id_present);
kfree(acpi_id_cst_present); bitmap_free(acpi_id_cst_present);
kfree(acpi_psd); kfree(acpi_psd);
for_each_possible_cpu(i) for_each_possible_cpu(i)
acpi_processor_unregister_performance(i); acpi_processor_unregister_performance(i);
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/memory_hotplug.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -50,6 +51,10 @@ ...@@ -50,6 +51,10 @@
#define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_CLASS_NAME "xen_memory"
#ifdef CONFIG_MEMORY_HOTPLUG
u64 xen_saved_max_mem_size = 0;
#endif
static struct device balloon_dev; static struct device balloon_dev;
static int register_balloon(struct device *dev); static int register_balloon(struct device *dev);
...@@ -63,6 +68,12 @@ static void watch_target(struct xenbus_watch *watch, ...@@ -63,6 +68,12 @@ static void watch_target(struct xenbus_watch *watch,
static bool watch_fired; static bool watch_fired;
static long target_diff; static long target_diff;
#ifdef CONFIG_MEMORY_HOTPLUG
/* The balloon driver will take care of adding memory now. */
if (xen_saved_max_mem_size)
max_mem_size = xen_saved_max_mem_size;
#endif
err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
if (err != 1) { if (err != 1) {
/* This is ok (for domain0 at least) - so just return */ /* This is ok (for domain0 at least) - so just return */
......
...@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev) ...@@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
if (pci_is_enabled(dev)) if (pci_is_enabled(dev))
pci_disable_device(dev); pci_disable_device(dev);
pci_write_config_word(dev, PCI_COMMAND, 0);
dev->is_busmaster = 0; dev->is_busmaster = 0;
} else { } else {
pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_read_config_word(dev, PCI_COMMAND, &cmd);
......
...@@ -544,7 +544,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, ...@@ -544,7 +544,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
xenbus_switch_state(xdev, XenbusStateClosed); xenbus_switch_state(xdev, XenbusStateClosed);
if (xenbus_dev_is_online(xdev)) if (xenbus_dev_is_online(xdev))
break; break;
/* fall through if not online */ /* fall through - if not online */
case XenbusStateUnknown: case XenbusStateUnknown:
dev_dbg(&xdev->dev, "frontend is gone! unregister device\n"); dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
device_unregister(&xdev->dev); device_unregister(&xdev->dev);
......
...@@ -1184,7 +1184,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev, ...@@ -1184,7 +1184,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed); xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev)) if (xenbus_dev_is_online(dev))
break; break;
/* fall through if not online */ /* fall through - if not online */
case XenbusStateUnknown: case XenbusStateUnknown:
device_unregister(&dev->dev); device_unregister(&dev->dev);
break; break;
......
...@@ -100,6 +100,8 @@ extern void __online_page_free(struct page *page); ...@@ -100,6 +100,8 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid); extern int try_online_node(int nid);
extern u64 max_mem_size;
extern bool memhp_auto_online; extern bool memhp_auto_online;
/* If movable_node boot option specified */ /* If movable_node boot option specified */
extern bool movable_node_enabled; extern bool movable_node_enabled;
......
...@@ -46,4 +46,8 @@ struct bio_vec; ...@@ -46,4 +46,8 @@ struct bio_vec;
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2); const struct bio_vec *vec2);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
extern u64 xen_saved_max_mem_size;
#endif
#endif /* _XEN_XEN_H */ #endif /* _XEN_XEN_H */
...@@ -96,10 +96,16 @@ void mem_hotplug_done(void) ...@@ -96,10 +96,16 @@ void mem_hotplug_done(void)
cpus_read_unlock(); cpus_read_unlock();
} }
u64 max_mem_size = U64_MAX;
/* add this memory to iomem resource */ /* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size) static struct resource *register_memory_resource(u64 start, u64 size)
{ {
struct resource *res, *conflict; struct resource *res, *conflict;
if (start + size > max_mem_size)
return ERR_PTR(-E2BIG);
res = kzalloc(sizeof(struct resource), GFP_KERNEL); res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res) if (!res)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment