Commit 88221399 authored by Juergen Gross's avatar Juergen Gross

xen: fix p2m size in dom0 for disabled memory hotplug case

Since commit 9e2369c0 ("xen: add helpers to allocate unpopulated
memory") foreign mappings are using guest physical addresses allocated
via ZONE_DEVICE functionality.

This will result in problems for the case of no balloon memory hotplug
being configured, as the p2m list will only cover the initial memory
size of the domain. Any ZONE_DEVICE allocated address will be outside
the p2m range and thus a mapping can't be established with that memory
address.

Fix that by extending the p2m size for that case. At the same time add
a check for a to be created mapping to be within the p2m limits in
order to detect errors early.

While changing a comment, remove some 32-bit leftovers.

This is XSA-369.

Fixes: 9e2369c0 ("xen: add helpers to allocate unpopulated memory")
Cc: <stable@vger.kernel.org> # 5.9
Reported-by: default avatarMarek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 2991397d
...@@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
} }
#endif #endif
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define XEN_EXTRA_MEM_RATIO (10)
/* /*
* Helper functions to write or read unsigned long values to/from * Helper functions to write or read unsigned long values to/from
* memory, when the access may fault. * memory, when the access may fault.
......
...@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void) ...@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn; xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
vm.flags = VM_ALLOC; vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE); PMD_SIZE * PMDS_PER_MID_PAGE);
...@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep; pte_t *ptep;
unsigned int level; unsigned int level;
if (unlikely(pfn >= xen_p2m_size)) { /* Only invalid entries allowed above the highest p2m covered frame. */
BUG_ON(mfn != INVALID_P2M_ENTRY); if (unlikely(pfn >= xen_p2m_size))
return true; return mfn == INVALID_P2M_ENTRY;
}
/* /*
* The interface requires atomic updates on p2m elements. * The interface requires atomic updates on p2m elements.
......
...@@ -59,18 +59,6 @@ static struct { ...@@ -59,18 +59,6 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE); } xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void) static void __init xen_parse_512gb(void)
...@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void) ...@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
/* /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base * factor the base size.
* size is the full initial memory allocation; on highmem it
* is limited to the max size of lowmem, so that it doesn't
* get completely filled.
* *
* Make sure we have no memory above max_pages, as this area * Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management. * isn't handled by the p2m management.
*
* In principle there could be a problem in lowmem systems if
* the initial memory is also very large with respect to
* lowmem, but we won't try to deal with that here.
*/ */
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn); extra_pages, max_pages - max_pfn);
i = 0; i = 0;
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment