Commit d7c02680 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.19c-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "This contains some minor cleanups and fixes:

   - a new knob for controlling scrubbing of pages returned by the Xen
     balloon driver to the Xen hypervisor to address a boot performance
     issue seen in large guests booted pre-ballooned

   - a fix of a regression in the gntdev driver which made it impossible
     to use fully virtualized guests (HVM guests) with a 4.19 based dom0

   - a fix in Xen cpu hotplug functionality which could be triggered by
     wrong admin commands (setting number of active vcpus to 0)

  One further note: the patches have all been under test for several
  days in another branch. This branch has been rebased in order to avoid
  merge conflicts"

* tag 'for-linus-4.19c-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/gntdev: fix up blockable calls to mn_invl_range_start
  xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage
  xen: avoid crash in disable_hotplug_cpu
  xen/balloon: add runtime control for scrubbing ballooned out pages
  xen/manage: don't complain about an empty value in control/sysrq node
parents eae4f885 58a57569
......@@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Description:
Amount (in KiB) of low (or normal) memory in the
balloon.
What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
Date: September 2018
KernelVersion: 4.20
Contact: xen-devel@lists.xenproject.org
Description:
Control scrubbing pages before returning them to Xen for others domains
use. Can be set with xen_scrub_pages cmdline
parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
......@@ -5000,6 +5000,12 @@
Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers.
xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back
to Xen, for use by other domains. Can be also changed at runtime
with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
......
......@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
This value is used to allocate enough space in internal
tables needed for physical memory administration.
config XEN_SCRUB_PAGES
bool "Scrub pages before returning them to system"
config XEN_SCRUB_PAGES_DEFAULT
bool "Scrub pages before returning them to system by default"
depends on XEN_BALLOON
default y
help
Scrub pages before returning them to the system for reuse by
other domains. This makes sure that any confidential data
is not accidentally visible to other domains. Is it more
secure, but slightly less efficient.
secure, but slightly less efficient. This can be controlled with
xen_scrub_pages=0 parameter and
/sys/devices/system/xen_memory/xen_memory0/scrub_pages.
This option only sets the default value.
If in doubt, say yes.
config XEN_DEV_EVTCHN
......
......@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
static void disable_hotplug_cpu(int cpu)
{
if (cpu_online(cpu)) {
lock_device_hotplug();
if (!cpu_is_hotpluggable(cpu))
return;
lock_device_hotplug();
if (cpu_online(cpu))
device_offline(get_cpu_device(cpu));
unlock_device_hotplug();
}
if (cpu_present(cpu))
if (!cpu_online(cpu) && cpu_present(cpu)) {
xen_arch_unregister_cpu(cpu);
set_cpu_present(cpu, false);
set_cpu_present(cpu, false);
}
unlock_device_hotplug();
}
static int vcpu_online(unsigned int cpu)
......
......@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
clear_evtchn_to_irq_row(row);
}
evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
evtchn_to_irq[row][col] = irq;
return 0;
}
......
......@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
return true;
}
static void unmap_if_in_range(struct gntdev_grant_map *map,
unsigned long start, unsigned long end)
static int unmap_if_in_range(struct gntdev_grant_map *map,
unsigned long start, unsigned long end,
bool blockable)
{
unsigned long mstart, mend;
int err;
if (!in_range(map, start, end))
return 0;
if (!blockable)
return -EAGAIN;
mstart = max(start, map->vma->vm_start);
mend = min(end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
......@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
return 0;
}
static int mn_invl_range_start(struct mmu_notifier *mn,
......@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_grant_map *map;
int ret = 0;
/* TODO do we really need a mutex here? */
if (blockable)
mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock))
return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) {
if (in_range(map, start, end)) {
ret = -EAGAIN;
ret = unmap_if_in_range(map, start, end, blockable);
if (ret)
goto out_unlock;
}
unmap_if_in_range(map, start, end);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
if (in_range(map, start, end)) {
ret = -EAGAIN;
ret = unmap_if_in_range(map, start, end, blockable);
if (ret)
goto out_unlock;
}
unmap_if_in_range(map, start, end);
}
out_unlock:
......
......@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
/*
* The Xenstore watch fires directly after registering it and
* after a suspend/resume cycle. So ENOENT is no error but
* might happen in those cases.
* might happen in those cases. ERANGE is observed when we get
* an empty value (''), this happens when we acknowledge the
* request by writing '\0' below.
*/
if (err != -ENOENT)
if (err != -ENOENT && err != -ERANGE)
pr_err("Error %d reading sysrq code in control/sysrq\n",
err);
xenbus_transaction_end(xbt, 1);
......
......@@ -14,6 +14,10 @@
#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>
#include <linux/moduleparam.h>
bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
......
......@@ -44,6 +44,7 @@
#include <xen/xenbus.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/mem-reservation.h>
#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
......@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
char *buf)
......@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
&dev_attr_max_schedule_delay.attr.attr,
&dev_attr_retry_count.attr.attr,
&dev_attr_max_retry_count.attr.attr,
&dev_attr_scrub_pages.attr.attr,
NULL
};
......
......@@ -17,11 +17,12 @@
#include <xen/page.h>
extern bool xen_scrub_pages;
static inline void xenmem_reservation_scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
if (xen_scrub_pages)
clear_highpage(page);
}
#ifdef CONFIG_XEN_HAVE_PVMMU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment