Commit e88b3331 authored by Nanyong Sun's avatar Nanyong Sun Committed by Palmer Dabbelt

riscv: mm: add THP support on 64-bit

Bring Transparent HugePage support to riscv. A
transparent huge page is always represented as a pmd.
Signed-off-by: default avatarNanyong Sun <sunnanyong@huawei.com>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent c3b2d670
......@@ -103,6 +103,7 @@ config RISCV
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select UACCESS_MEMCPY if !MMU
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
......
......@@ -172,10 +172,23 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
/*
* Checking for _PAGE_LEAF is needed too because:
* When splitting a THP, split_huge_page() will temporarily clear
* the present bit, in this situation, pmd_present() and
* pmd_trans_huge() still needs to return true.
*/
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
}
#else
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
#endif
static inline int pmd_none(pmd_t pmd)
{
......@@ -369,6 +382,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
local_flush_tlb_page(address);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pte_t *ptep = (pte_t *)pmdp;
update_mmu_cache(vma, address, ptep);
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
......@@ -462,6 +483,141 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
/*
* THP functions
*/
static inline pmd_t pte_pmd(pte_t pte)
{
return __pmd(pte_val(pte));
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}
#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}
static inline pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
return pfn_pmd(page_to_pfn(page), prot);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return pte_write(pmd_pte(pmd));
}
static inline int pmd_dirty(pmd_t pmd)
{
return pte_dirty(pmd_pte(pmd));
}
static inline int pmd_young(pmd_t pmd)
{
return pte_young(pmd_pte(pmd));
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pte_pmd(pte_mkold(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
return pmd_leaf(pmd);
}
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Encode and decode a swap entry
*
......
......@@ -54,3 +54,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment