Commit 45478336 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/mm/pat: Add 5-level paging support

Straight-forward extension of existing code to support additional page
table level.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170317185515.8636-4-kirill.shutemov@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e981316f
...@@ -346,6 +346,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -346,6 +346,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level) unsigned int *level)
{ {
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -354,7 +355,15 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, ...@@ -354,7 +355,15 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pgd_none(*pgd)) if (pgd_none(*pgd))
return NULL; return NULL;
pud = pud_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return NULL;
*level = PG_LEVEL_512G;
if (p4d_large(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
pud = pud_offset(p4d, address);
if (pud_none(*pud)) if (pud_none(*pud))
return NULL; return NULL;
...@@ -406,13 +415,18 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, ...@@ -406,13 +415,18 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
pmd_t *lookup_pmd_address(unsigned long address) pmd_t *lookup_pmd_address(unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
if (pgd_none(*pgd)) if (pgd_none(*pgd))
return NULL; return NULL;
pud = pud_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
return NULL; return NULL;
...@@ -477,11 +491,13 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) ...@@ -477,11 +491,13 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
list_for_each_entry(page, &pgd_list, lru) { list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pgd = (pgd_t *)page_address(page) + pgd_index(address); pgd = (pgd_t *)page_address(page) + pgd_index(address);
pud = pud_offset(pgd, address); p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
set_pte_atomic((pte_t *)pmd, pte); set_pte_atomic((pte_t *)pmd, pte);
} }
...@@ -836,9 +852,9 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) ...@@ -836,9 +852,9 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
pud_clear(pud); pud_clear(pud);
} }
static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
{ {
pud_t *pud = pud_offset(pgd, start); pud_t *pud = pud_offset(p4d, start);
/* /*
* Not on a GB page boundary? * Not on a GB page boundary?
...@@ -1004,8 +1020,8 @@ static long populate_pmd(struct cpa_data *cpa, ...@@ -1004,8 +1020,8 @@ static long populate_pmd(struct cpa_data *cpa,
return num_pages; return num_pages;
} }
static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
pgprot_t pgprot) pgprot_t pgprot)
{ {
pud_t *pud; pud_t *pud;
unsigned long end; unsigned long end;
...@@ -1026,7 +1042,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, ...@@ -1026,7 +1042,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
cur_pages = (pre_end - start) >> PAGE_SHIFT; cur_pages = (pre_end - start) >> PAGE_SHIFT;
cur_pages = min_t(int, (int)cpa->numpages, cur_pages); cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
pud = pud_offset(pgd, start); pud = pud_offset(p4d, start);
/* /*
* Need a PMD page? * Need a PMD page?
...@@ -1047,7 +1063,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, ...@@ -1047,7 +1063,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
if (cpa->numpages == cur_pages) if (cpa->numpages == cur_pages)
return cur_pages; return cur_pages;
pud = pud_offset(pgd, start); pud = pud_offset(p4d, start);
pud_pgprot = pgprot_4k_2_large(pgprot); pud_pgprot = pgprot_4k_2_large(pgprot);
/* /*
...@@ -1067,7 +1083,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, ...@@ -1067,7 +1083,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
if (start < end) { if (start < end) {
long tmp; long tmp;
pud = pud_offset(pgd, start); pud = pud_offset(p4d, start);
if (pud_none(*pud)) if (pud_none(*pud))
if (alloc_pmd_page(pud)) if (alloc_pmd_page(pud))
return -1; return -1;
...@@ -1090,33 +1106,43 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) ...@@ -1090,33 +1106,43 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
{ {
pgprot_t pgprot = __pgprot(_KERNPG_TABLE); pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */ pud_t *pud = NULL; /* shut up gcc */
p4d_t *p4d;
pgd_t *pgd_entry; pgd_t *pgd_entry;
long ret; long ret;
pgd_entry = cpa->pgd + pgd_index(addr); pgd_entry = cpa->pgd + pgd_index(addr);
if (pgd_none(*pgd_entry)) {
p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
if (!p4d)
return -1;
set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
}
/* /*
* Allocate a PUD page and hand it down for mapping. * Allocate a PUD page and hand it down for mapping.
*/ */
if (pgd_none(*pgd_entry)) { p4d = p4d_offset(pgd_entry, addr);
if (p4d_none(*p4d)) {
pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
if (!pud) if (!pud)
return -1; return -1;
set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
} }
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
ret = populate_pud(cpa, addr, pgd_entry, pgprot); ret = populate_pud(cpa, addr, p4d, pgprot);
if (ret < 0) { if (ret < 0) {
/* /*
* Leave the PUD page in place in case some other CPU or thread * Leave the PUD page in place in case some other CPU or thread
* already found it, but remove any useless entries we just * already found it, but remove any useless entries we just
* added to it. * added to it.
*/ */
unmap_pud_range(pgd_entry, addr, unmap_pud_range(p4d, addr,
addr + (cpa->numpages << PAGE_SHIFT)); addr + (cpa->numpages << PAGE_SHIFT));
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment