Commit 3a19109e authored by Toshi Kani's avatar Toshi Kani Committed by Thomas Gleixner

x86/mm: Fix try_preserve_large_page() to handle large PAT bit

try_preserve_large_page() is called from __change_page_attr() to
change the mapping attribute of a given large page.  This function
uses pte_pfn() and pte_pgprot() for PUD/PMD, which do not handle
the large PAT bit properly.

Fix try_preserve_large_page() by using the corresponding pud/pmd
prot/pfn interfaces.

Also remove '#ifdef CONFIG_X86_64', which is not necessary.
Signed-off-by: default avatarToshi Kani <toshi.kani@hpe.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Konrad Wilk <konrad.wilk@oracle.com>
Cc: Robert Elliot <elliott@hpe.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1442514264-12475-10-git-send-email-toshi.kani@hpe.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent daf3e35c
...@@ -468,7 +468,7 @@ static int ...@@ -468,7 +468,7 @@ static int
try_preserve_large_page(pte_t *kpte, unsigned long address, try_preserve_large_page(pte_t *kpte, unsigned long address,
struct cpa_data *cpa) struct cpa_data *cpa)
{ {
unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn;
pte_t new_pte, old_pte, *tmp; pte_t new_pte, old_pte, *tmp;
pgprot_t old_prot, new_prot, req_prot; pgprot_t old_prot, new_prot, req_prot;
int i, do_split = 1; int i, do_split = 1;
...@@ -488,17 +488,21 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, ...@@ -488,17 +488,21 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
switch (level) { switch (level) {
case PG_LEVEL_2M: case PG_LEVEL_2M:
#ifdef CONFIG_X86_64 old_prot = pmd_pgprot(*(pmd_t *)kpte);
old_pfn = pmd_pfn(*(pmd_t *)kpte);
break;
case PG_LEVEL_1G: case PG_LEVEL_1G:
#endif old_prot = pud_pgprot(*(pud_t *)kpte);
psize = page_level_size(level); old_pfn = pud_pfn(*(pud_t *)kpte);
pmask = page_level_mask(level);
break; break;
default: default:
do_split = -EINVAL; do_split = -EINVAL;
goto out_unlock; goto out_unlock;
} }
psize = page_level_size(level);
pmask = page_level_mask(level);
/* /*
* Calculate the number of pages, which fit into this large * Calculate the number of pages, which fit into this large
* page starting at address: * page starting at address:
...@@ -514,7 +518,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, ...@@ -514,7 +518,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* up accordingly. * up accordingly.
*/ */
old_pte = *kpte; old_pte = *kpte;
old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte)); old_prot = req_prot = pgprot_large_2_4k(old_prot);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
...@@ -540,10 +544,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, ...@@ -540,10 +544,10 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
req_prot = canon_pgprot(req_prot); req_prot = canon_pgprot(req_prot);
/* /*
* old_pte points to the large page base address. So we need * old_pfn points to the large page base pfn. So we need
* to add the offset of the virtual address: * to add the offset of the virtual address:
*/ */
pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
cpa->pfn = pfn; cpa->pfn = pfn;
new_prot = static_protections(req_prot, address, pfn); new_prot = static_protections(req_prot, address, pfn);
...@@ -554,7 +558,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, ...@@ -554,7 +558,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* the pages in the range we try to preserve: * the pages in the range we try to preserve:
*/ */
addr = address & pmask; addr = address & pmask;
pfn = pte_pfn(old_pte); pfn = old_pfn;
for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
pgprot_t chk_prot = static_protections(req_prot, addr, pfn); pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
...@@ -584,7 +588,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, ...@@ -584,7 +588,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* The address is aligned and the number of pages * The address is aligned and the number of pages
* covers the full page. * covers the full page.
*/ */
new_pte = pfn_pte(pte_pfn(old_pte), new_prot); new_pte = pfn_pte(old_pfn, new_prot);
__set_pmd_pte(kpte, address, new_pte); __set_pmd_pte(kpte, address, new_pte);
cpa->flags |= CPA_FLUSHTLB; cpa->flags |= CPA_FLUSHTLB;
do_split = 0; do_split = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment