Commit eb64c3c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.19-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull additional xen update from David Vrabel:
 "Xen: additional features for 3.19-rc0

   - Linear p2m for x86 PV guests which simplifies the p2m code,
     improves performance and will allow for > 512 GB PV guests in the
     future.

  A last-minute, configuration specific issue was discovered with this
  change which is why it was not included in my previous pull request.
  This is now been fixed and tested"

* tag 'stable/for-linus-3.19-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen: switch to post-init routines in xen mmu.c earlier
  Revert "swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single"
  xen: annotate xen_set_identity_and_remap_chunk() with __init
  xen: introduce helper functions to do safe read and write accesses
  xen: Speed up set_phys_to_machine() by using read-only mappings
  xen: switch to linear virtual mapped sparse p2m list
  xen: Hide get_phys_to_machine() to be able to tune common path
  x86: Introduce function to get pmd entry pointer
  xen: Delay invalidating extra memory
  xen: Delay m2p_override initialization
  xen: Delay remapping memory of pv-domain
  xen: use common page allocation function in p2m.c
  xen: Make functions static
  xen: fix some style issues in p2m.c
parents 61de8e53 f1d04b23
...@@ -452,6 +452,7 @@ static inline void update_page_count(int level, unsigned long pages) { } ...@@ -452,6 +452,7 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level); unsigned int *level);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address); extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags); unsigned numpages, unsigned long page_flags);
......
...@@ -41,10 +41,12 @@ typedef struct xpaddr { ...@@ -41,10 +41,12 @@ typedef struct xpaddr {
extern unsigned long *machine_to_phys_mapping; extern unsigned long *machine_to_phys_mapping;
extern unsigned long machine_to_phys_nr; extern unsigned long machine_to_phys_nr;
extern unsigned long *xen_p2m_addr;
extern unsigned long xen_p2m_size;
extern unsigned long xen_max_p2m_pfn;
extern unsigned long get_phys_to_machine(unsigned long pfn); extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e); unsigned long pfn_e);
...@@ -52,17 +54,52 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, ...@@ -52,17 +54,52 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
*/
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
return __put_user(val, (unsigned long __user *)addr);
}
static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
{
return __get_user(*val, (unsigned long __user *)addr);
}
/*
* When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
* - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
* bits (identity or foreign) are set.
* - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
* identity or foreign indicator will be still set. __pfn_to_mfn() is
* encapsulating get_phys_to_machine() which is called in special cases only.
* - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
* cases needing an extended handling.
*/
static inline unsigned long __pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
if (pfn < xen_p2m_size)
mfn = xen_p2m_addr[pfn];
else if (unlikely(pfn < xen_max_p2m_pfn))
return get_phys_to_machine(pfn);
else
return IDENTITY_FRAME(pfn);
if (unlikely(mfn == INVALID_P2M_ENTRY))
return get_phys_to_machine(pfn);
return mfn;
}
static inline unsigned long pfn_to_mfn(unsigned long pfn) static inline unsigned long pfn_to_mfn(unsigned long pfn)
{ {
unsigned long mfn; unsigned long mfn;
...@@ -70,7 +107,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) ...@@ -70,7 +107,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn; return pfn;
mfn = get_phys_to_machine(pfn); mfn = __pfn_to_mfn(pfn);
if (mfn != INVALID_P2M_ENTRY) if (mfn != INVALID_P2M_ENTRY)
mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
...@@ -83,7 +120,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) ...@@ -83,7 +120,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return 1; return 1;
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
} }
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
...@@ -102,7 +139,7 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) ...@@ -102,7 +139,7 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
* In such cases it doesn't matter what we return (we return garbage), * In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing! * but we must handle the fault without crashing!
*/ */
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
if (ret < 0) if (ret < 0)
return ~0; return ~0;
...@@ -117,7 +154,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) ...@@ -117,7 +154,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
return mfn; return mfn;
pfn = mfn_to_pfn_no_overrides(mfn); pfn = mfn_to_pfn_no_overrides(mfn);
if (get_phys_to_machine(pfn) != mfn) { if (__pfn_to_mfn(pfn) != mfn) {
/* /*
* If this appears to be a foreign mfn (because the pfn * If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override * doesn't map back to the mfn), then check the local override
...@@ -133,8 +170,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) ...@@ -133,8 +170,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
* entry doesn't map back to the mfn and m2p_override doesn't have a * entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it. * valid entry for it.
*/ */
if (pfn == ~0 && if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
pfn = mfn; pfn = mfn;
return pfn; return pfn;
...@@ -180,7 +216,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) ...@@ -180,7 +216,7 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
return mfn; return mfn;
pfn = mfn_to_pfn(mfn); pfn = mfn_to_pfn(mfn);
if (get_phys_to_machine(pfn) != mfn) if (__pfn_to_mfn(pfn) != mfn)
return -1; /* force !pfn_valid() */ return -1; /* force !pfn_valid() */
return pfn; return pfn;
} }
......
...@@ -383,6 +383,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, ...@@ -383,6 +383,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
return lookup_address(address, level); return lookup_address(address, level);
} }
/*
* Lookup the PMD entry for a virtual address. Return a pointer to the entry
* or NULL if not present.
*/
pmd_t *lookup_pmd_address(unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
return NULL;
pud = pud_offset(pgd, address);
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
return NULL;
return pmd_offset(pud, address);
}
/* /*
* This is necessary because __pa() does not work on some * This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap() * kinds of memory, like vmalloc() or the alloc_remap()
......
...@@ -387,7 +387,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) ...@@ -387,7 +387,7 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
unsigned long mfn; unsigned long mfn;
if (!xen_feature(XENFEAT_auto_translated_physmap)) if (!xen_feature(XENFEAT_auto_translated_physmap))
mfn = get_phys_to_machine(pfn); mfn = __pfn_to_mfn(pfn);
else else
mfn = pfn; mfn = pfn;
/* /*
...@@ -1113,20 +1113,16 @@ static void __init xen_cleanhighmap(unsigned long vaddr, ...@@ -1113,20 +1113,16 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
* instead of somewhere later and be confusing. */ * instead of somewhere later and be confusing. */
xen_mc_flush(); xen_mc_flush();
} }
static void __init xen_pagetable_p2m_copy(void)
static void __init xen_pagetable_p2m_free(void)
{ {
unsigned long size; unsigned long size;
unsigned long addr; unsigned long addr;
unsigned long new_mfn_list;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
new_mfn_list = xen_revector_p2m_tree();
/* No memory or already called. */ /* No memory or already called. */
if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list) if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
return; return;
/* using __ka address and sticking INVALID_P2M_ENTRY! */ /* using __ka address and sticking INVALID_P2M_ENTRY! */
...@@ -1144,8 +1140,6 @@ static void __init xen_pagetable_p2m_copy(void) ...@@ -1144,8 +1140,6 @@ static void __init xen_pagetable_p2m_copy(void)
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
memblock_free(__pa(xen_start_info->mfn_list), size); memblock_free(__pa(xen_start_info->mfn_list), size);
/* And revector! Bye bye old array */
xen_start_info->mfn_list = new_mfn_list;
/* At this stage, cleanup_highmap has already cleaned __ka space /* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of * from _brk_limit way up to the max_pfn_mapped (which is the end of
...@@ -1169,17 +1163,35 @@ static void __init xen_pagetable_p2m_copy(void) ...@@ -1169,17 +1163,35 @@ static void __init xen_pagetable_p2m_copy(void)
} }
#endif #endif
static void __init xen_pagetable_init(void) static void __init xen_pagetable_p2m_setup(void)
{ {
paging_init(); if (xen_feature(XENFEAT_auto_translated_physmap))
return;
xen_vmalloc_p2m_tree();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
xen_pagetable_p2m_copy(); xen_pagetable_p2m_free();
#endif #endif
/* And revector! Bye bye old array */
xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
}
static void __init xen_pagetable_init(void)
{
paging_init();
xen_post_allocator_init();
xen_pagetable_p2m_setup();
/* Allocate and initialize top and mid mfn levels for p2m structure */ /* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list(); xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
if (!xen_feature(XENFEAT_auto_translated_physmap))
xen_remap_memory();
xen_setup_shared_info(); xen_setup_shared_info();
xen_post_allocator_init();
} }
static void xen_write_cr2(unsigned long cr2) static void xen_write_cr2(unsigned long cr2)
{ {
......
This diff is collapsed.
This diff is collapsed.
...@@ -29,11 +29,13 @@ void xen_build_mfn_list_list(void); ...@@ -29,11 +29,13 @@ void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void); void xen_setup_machphys_mapping(void);
void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
void xen_reserve_top(void); void xen_reserve_top(void);
extern unsigned long xen_max_p2m_pfn;
void xen_mm_pin_all(void); void xen_mm_pin_all(void);
void xen_mm_unpin_all(void); void xen_mm_unpin_all(void);
unsigned long __ref xen_chk_extra_mem(unsigned long pfn);
void __init xen_inv_extra_mem(void);
void __init xen_remap_memory(void);
char * __init xen_memory_setup(void); char * __init xen_memory_setup(void);
char * xen_auto_xlated_memory_setup(void); char * xen_auto_xlated_memory_setup(void);
void __init xen_arch_setup(void); void __init xen_arch_setup(void);
...@@ -46,7 +48,7 @@ void xen_hvm_init_shared_info(void); ...@@ -46,7 +48,7 @@ void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void); void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void); void __init xen_build_dynamic_phys_to_machine(void);
unsigned long __init xen_revector_p2m_tree(void); void __init xen_vmalloc_p2m_tree(void);
void xen_init_irq_ops(void); void xen_init_irq_ops(void);
void xen_setup_timer(int cpu); void xen_setup_timer(int cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment