Commit 640710a3 authored by Chris Metcalf's avatar Chris Metcalf

tile: add virt_to_kpte() API and clean up and document behavior

We use virt_to_pte(NULL, va) a lot, which isn't very obvious.
I added virt_to_kpte(va) as a more obvious wrapper function,
that also validates the va as being a kernel adddress.

And, I fixed the semantics of virt_to_pte() so that we handle
the pud and pmd the same way, and we now document the fact that
we handle the final pte level differently.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 49cf78ef
...@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) ...@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
static inline void install_page_table(pgd_t *pgdir, int asid) static inline void install_page_table(pgd_t *pgdir, int asid)
{ {
pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
__install_page_table(pgdir, asid, *ptep); __install_page_table(pgdir, asid, *ptep);
} }
......
...@@ -328,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn) ...@@ -328,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn)
struct mm_struct; struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
extern pte_t *virt_to_kpte(unsigned long kaddr);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -1600,7 +1600,7 @@ void __init setup_per_cpu_areas(void) ...@@ -1600,7 +1600,7 @@ void __init setup_per_cpu_areas(void)
/* Update the vmalloc mapping and page home. */ /* Update the vmalloc mapping and page home. */
unsigned long addr = (unsigned long)ptr + i; unsigned long addr = (unsigned long)ptr + i;
pte_t *ptep = virt_to_pte(NULL, addr); pte_t *ptep = virt_to_kpte(addr);
pte_t pte = *ptep; pte_t pte = *ptep;
BUG_ON(pfn != pte_pfn(pte)); BUG_ON(pfn != pte_pfn(pte));
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
...@@ -1609,12 +1609,12 @@ void __init setup_per_cpu_areas(void) ...@@ -1609,12 +1609,12 @@ void __init setup_per_cpu_areas(void)
/* Update the lowmem mapping for consistency. */ /* Update the lowmem mapping for consistency. */
lowmem_va = (unsigned long)pfn_to_kaddr(pfn); lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
ptep = virt_to_pte(NULL, lowmem_va); ptep = virt_to_kpte(lowmem_va);
if (pte_huge(*ptep)) { if (pte_huge(*ptep)) {
printk(KERN_DEBUG "early shatter of huge page" printk(KERN_DEBUG "early shatter of huge page"
" at %#lx\n", lowmem_va); " at %#lx\n", lowmem_va);
shatter_pmd((pmd_t *)ptep); shatter_pmd((pmd_t *)ptep);
ptep = virt_to_pte(NULL, lowmem_va); ptep = virt_to_kpte(lowmem_va);
BUG_ON(pte_huge(*ptep)); BUG_ON(pte_huge(*ptep));
} }
BUG_ON(pfn != pte_pfn(*ptep)); BUG_ON(pfn != pte_pfn(*ptep));
......
...@@ -200,7 +200,7 @@ void homecache_finv_map_page(struct page *page, int home) ...@@ -200,7 +200,7 @@ void homecache_finv_map_page(struct page *page, int home)
#else #else
va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
#endif #endif
ptep = virt_to_pte(NULL, (unsigned long)va); ptep = virt_to_kpte(va);
pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
__set_pte(ptep, pte_set_home(pte, home)); __set_pte(ptep, pte_set_home(pte, home));
homecache_finv_page_va((void *)va, home); homecache_finv_page_va((void *)va, home);
...@@ -385,7 +385,7 @@ int page_home(struct page *page) ...@@ -385,7 +385,7 @@ int page_home(struct page *page)
return initial_page_home(); return initial_page_home();
} else { } else {
unsigned long kva = (unsigned long)page_address(page); unsigned long kva = (unsigned long)page_address(page);
return pte_to_home(*virt_to_pte(NULL, kva)); return pte_to_home(*virt_to_kpte(kva));
} }
} }
EXPORT_SYMBOL(page_home); EXPORT_SYMBOL(page_home);
...@@ -404,7 +404,7 @@ void homecache_change_page_home(struct page *page, int order, int home) ...@@ -404,7 +404,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
NULL, 0); NULL, 0);
for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
pte_t *ptep = virt_to_pte(NULL, kva); pte_t *ptep = virt_to_kpte(kva);
pte_t pteval = *ptep; pte_t pteval = *ptep;
BUG_ON(!pte_present(pteval) || pte_huge(pteval)); BUG_ON(!pte_present(pteval) || pte_huge(pteval));
__set_pte(ptep, pte_set_home(pteval, home)); __set_pte(ptep, pte_set_home(pteval, home));
......
...@@ -951,7 +951,7 @@ static void mark_w1data_ro(void) ...@@ -951,7 +951,7 @@ static void mark_w1data_ro(void)
BUG_ON((addr & (PAGE_SIZE-1)) != 0); BUG_ON((addr & (PAGE_SIZE-1)) != 0);
for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
unsigned long pfn = kaddr_to_pfn((void *)addr); unsigned long pfn = kaddr_to_pfn((void *)addr);
pte_t *ptep = virt_to_pte(NULL, addr); pte_t *ptep = virt_to_kpte(addr);
BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
} }
...@@ -997,7 +997,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -997,7 +997,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
*/ */
int pfn = kaddr_to_pfn((void *)addr); int pfn = kaddr_to_pfn((void *)addr);
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
pte_t *ptep = virt_to_pte(NULL, addr); pte_t *ptep = virt_to_kpte(addr);
if (!initfree) { if (!initfree) {
/* /*
* If debugging page accesses then do not free * If debugging page accesses then do not free
......
...@@ -325,6 +325,17 @@ void ptep_set_wrprotect(struct mm_struct *mm, ...@@ -325,6 +325,17 @@ void ptep_set_wrprotect(struct mm_struct *mm,
#endif #endif
/*
* Return a pointer to the PTE that corresponds to the given
* address in the given page table. A NULL page table just uses
* the standard kernel page table; the preferred API in this case
* is virt_to_kpte().
*
* The returned pointer can point to a huge page in other levels
* of the page table than the bottom, if the huge page is present
* in the page table. For bottom-level PTEs, the returned pointer
* can point to a PTE that is either present or not.
*/
pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -341,14 +352,21 @@ pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) ...@@ -341,14 +352,21 @@ pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
if (pud_huge_page(*pud)) if (pud_huge_page(*pud))
return (pte_t *)pud; return (pte_t *)pud;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_huge_page(*pmd))
return (pte_t *)pmd;
if (!pmd_present(*pmd)) if (!pmd_present(*pmd))
return NULL; return NULL;
if (pmd_huge_page(*pmd))
return (pte_t *)pmd;
return pte_offset_kernel(pmd, addr); return pte_offset_kernel(pmd, addr);
} }
EXPORT_SYMBOL(virt_to_pte); EXPORT_SYMBOL(virt_to_pte);
pte_t *virt_to_kpte(unsigned long kaddr)
{
BUG_ON(kaddr < PAGE_OFFSET);
return virt_to_pte(NULL, kaddr);
}
EXPORT_SYMBOL(virt_to_kpte);
pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
{ {
unsigned int width = smp_width; unsigned int width = smp_width;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment