Commit c1d15f5c authored by Stefano Stabellini's avatar Stefano Stabellini Committed by Konrad Rzeszutek Wilk

xen/balloon: Seperate the auto-translate logic properly (v2)

The auto-xlat logic vs the non-xlat means that we don't need to for
auto-xlat guests (like PVH, HVM or ARM):
 - use P2M
 - use scratch page.

However the code in increase_reservation does modify the p2m for
auto_translate guests, but not in decrease_reservation.

Fix that by avoiding any p2m modifications in both increase_reservation
and decrease_reservation for auto_translated guests.

And also avoid allocating or using scratch pages for auto_translated guests.

Lastly, since !auto-xlat is really another way of saying 'xen_pv'
remove the redundant 'xen_pv_domain' check.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
[v2: Updated the description]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 380108d8
......@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frame_list[i]);
#ifdef CONFIG_XEN_HAVE_PVMMU
/* Link back into the page tables if not highmem. */
if (xen_pv_domain() && !PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
#endif
......@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
struct page *scratch_page;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
......@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
* a scratch frame. Ensure direct mappings and the
* p2m are consistent.
*/
scratch_page = get_balloon_scratch_page();
#ifdef CONFIG_XEN_HAVE_PVMMU
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
}
#endif
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
struct page *scratch_page = get_balloon_scratch_page();
if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
BUG_ON(ret);
}
p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
put_balloon_scratch_page();
}
put_balloon_scratch_page();
#endif
balloon_append(pfn_to_page(pfn));
}
......@@ -627,15 +630,17 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
for_each_online_cpu(cpu)
{
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
return -ENOMEM;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
for_each_online_cpu(cpu)
{
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
return -ENOMEM;
}
}
register_cpu_notifier(&balloon_cpu_notifier);
}
register_cpu_notifier(&balloon_cpu_notifier);
pr_info("Initialising balloon driver\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment