Commit 42a54772 authored by Borislav Petkov's avatar Borislav Petkov Committed by Matt Fleming

x86, pageattr: Export page unmapping interface

We will use it in efi so expose it.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarToshi Kani <toshi.kani@hp.com>
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
parent 11cc8512
...@@ -385,6 +385,8 @@ extern pte_t *lookup_address(unsigned long address, unsigned int *level); ...@@ -385,6 +385,8 @@ extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern phys_addr_t slow_virt_to_phys(void *__address); extern phys_addr_t slow_virt_to_phys(void *__address);
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags); unsigned numpages, unsigned long page_flags);
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
unsigned numpages);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */ #endif /* _ASM_X86_PGTABLE_DEFS_H */
...@@ -692,6 +692,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd) ...@@ -692,6 +692,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
return true; return true;
} }
static bool try_to_free_pud_page(pud_t *pud)
{
int i;
for (i = 0; i < PTRS_PER_PUD; i++)
if (!pud_none(pud[i]))
return false;
free_page((unsigned long)pud);
return true;
}
static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
{ {
pte_t *pte = pte_offset_kernel(pmd, start); pte_t *pte = pte_offset_kernel(pmd, start);
...@@ -805,6 +817,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) ...@@ -805,6 +817,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
*/ */
} }
static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
{
pgd_t *pgd_entry = root + pgd_index(addr);
unmap_pud_range(pgd_entry, addr, end);
if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
pgd_clear(pgd_entry);
}
static int alloc_pte_page(pmd_t *pmd) static int alloc_pte_page(pmd_t *pmd)
{ {
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
...@@ -999,9 +1021,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, ...@@ -999,9 +1021,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
static int populate_pgd(struct cpa_data *cpa, unsigned long addr) static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
{ {
pgprot_t pgprot = __pgprot(_KERNPG_TABLE); pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
bool allocd_pgd = false;
pgd_t *pgd_entry;
pud_t *pud = NULL; /* shut up gcc */ pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry;
int ret; int ret;
pgd_entry = cpa->pgd + pgd_index(addr); pgd_entry = cpa->pgd + pgd_index(addr);
...@@ -1015,7 +1036,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) ...@@ -1015,7 +1036,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
return -1; return -1;
set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
allocd_pgd = true;
} }
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
...@@ -1023,19 +1043,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) ...@@ -1023,19 +1043,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
ret = populate_pud(cpa, addr, pgd_entry, pgprot); ret = populate_pud(cpa, addr, pgd_entry, pgprot);
if (ret < 0) { if (ret < 0) {
unmap_pud_range(pgd_entry, addr, unmap_pgd_range(cpa->pgd, addr,
addr + (cpa->numpages << PAGE_SHIFT)); addr + (cpa->numpages << PAGE_SHIFT));
if (allocd_pgd) {
/*
* If I allocated this PUD page, I can just as well
* free it in this error path.
*/
pgd_clear(pgd_entry);
free_page((unsigned long)pud);
}
return ret; return ret;
} }
cpa->numpages = ret; cpa->numpages = ret;
return 0; return 0;
} }
...@@ -1861,6 +1873,12 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, ...@@ -1861,6 +1873,12 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
return retval; return retval;
} }
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
unsigned numpages)
{
unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
}
/* /*
* The testcases use internal knowledge of the implementation that shouldn't * The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here. * be exposed to the rest of the kernel. Include these directly here.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment