Commit fe6c1b86 authored by Vineet Gupta's avatar Vineet Gupta

ARCv2: mm: THP support

MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
support.

Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
new bit "SZ" in TLB page desciptor to distinguish between them.
Super Page size is configurable in hardware (4K to 16M), but fixed once
RTL builds.

The exact THP size a Linx configuration will support is a function of:
 - MMU page size (typical 8K, RTL fixed)
 - software page walker address split between PGD:PTE:PFN (typical
   11:8:13, but can be changed with 1 line)

So for above default, THP size supported is 8K * 256 = 2M

Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
reduces to 1 level (as PTE is folded into PGD and canonically referred
to as PMD).

Thus thp PMD accessors are implemented in terms of PTE (just like sparc)
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 55ad769f
...@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT ...@@ -76,6 +76,10 @@ config STACKTRACE_SUPPORT
config HAVE_LATENCYTOP_SUPPORT config HAVE_LATENCYTOP_SUPPORT
def_bool y def_bool y
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
def_bool y
depends on ARC_MMU_V4
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
......
/*
* Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_HUGEPAGE_H
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
}
static inline pmd_t pte_pmd(pte_t pte)
{
return __pmd(pte_val(pte));
}
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_special(pmd) pte_special(pmd_pte(pmd))
#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
#define pmd_trans_splitting(pmd) (pmd_trans_huge(pmd) && pmd_special(pmd))
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
/*
* open-coded pte_modify() with additional retaining of HW_SZ bit
* so that pmd_trans_huge() remains true for this PMD
*/
return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
#define has_transparent_hugepage() 1
/* Generic variants assume pgtable_t is struct page *, hence need for these */
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
#define __HAVE_ARCH_PGTABLE_WITHDRAW
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
...@@ -64,6 +64,7 @@ typedef unsigned long pgprot_t; ...@@ -64,6 +64,7 @@ typedef unsigned long pgprot_t;
#define pgd_val(x) (x) #define pgd_val(x) (x)
#define pgprot_val(x) (x) #define pgprot_val(x) (x)
#define __pte(x) (x) #define __pte(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
#define pte_pgprot(x) (x) #define pte_pgprot(x) (x)
......
...@@ -83,11 +83,13 @@ ...@@ -83,11 +83,13 @@
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#if (CONFIG_ARC_MMU_VER >= 4) #if (CONFIG_ARC_MMU_VER >= 4)
#define _PAGE_SZ (1<<10) /* Page Size indicator (H) */ #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
#endif #endif
#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */ usable for shared TLB entries (H) */
#define _PAGE_UNUSED_BIT (1<<12)
#endif #endif
/* vmalloc permissions */ /* vmalloc permissions */
...@@ -99,6 +101,10 @@ ...@@ -99,6 +101,10 @@
#define _PAGE_CACHEABLE 0 #define _PAGE_CACHEABLE 0
#endif #endif
#ifndef _PAGE_HW_SZ
#define _PAGE_HW_SZ 0
#endif
/* Defaults for every user page */ /* Defaults for every user page */
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
...@@ -125,7 +131,7 @@ ...@@ -125,7 +131,7 @@
#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/* Masks for actual TLB "PD"s */ /* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
...@@ -299,6 +305,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) ...@@ -299,6 +305,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define PTE_BIT_FUNC(fn, op) \ #define PTE_BIT_FUNC(fn, op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
...@@ -308,6 +315,7 @@ PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); ...@@ -308,6 +315,7 @@ PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PTE_SPECIAL
...@@ -381,6 +389,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -381,6 +389,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* remap a physical page `pfn' of size `size' with page protection `prot' * remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from' * into virtual address `from'
*/ */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#include <asm/hugepage.h>
#endif
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/* to cope with aliasing VIPT cache */ /* to cope with aliasing VIPT cache */
......
...@@ -256,6 +256,18 @@ noinline void local_flush_tlb_all(void) ...@@ -256,6 +256,18 @@ noinline void local_flush_tlb_all(void)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
} }
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
const int stlb_idx = 0x800;
/* Blank sTLB entry */
write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
write_aux_reg(ARC_REG_TLBINDEX, entry);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
}
utlb_invalidate(); utlb_invalidate();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -580,6 +592,75 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -580,6 +592,75 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
} }
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
* support.
*
* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
* new bit "SZ" in TLB page desciptor to distinguish between them.
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
* The exact THP size a Linx configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
* So for above default, THP size supported is 8K * (2^8) = 2M
*
* Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
* reduces to 1 level (as PTE is folded into PGD and canonically referred
* to as PMD).
* Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
*/
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
pte_t pte = __pte(pmd_val(*pmd));
update_mmu_cache(vma, addr, &pte);
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
struct list_head *lh = (struct list_head *) pgtable;
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(lh);
else
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
pmd_huge_pte(mm, pmdp) = pgtable;
}
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
struct list_head *lh;
pgtable_t pgtable;
assert_spin_locked(&mm->page_table_lock);
pgtable = pmd_huge_pte(mm, pmdp);
lh = (struct list_head *) pgtable;
if (list_empty(lh))
pmd_huge_pte(mm, pmdp) = NULL;
else {
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
list_del(lh);
}
pte_val(pgtable[0]) = 0;
pte_val(pgtable[1]) = 0;
return pgtable;
}
#endif
/* Read the Cache Build Confuration Registers, Decode them and save into /* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use. * the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs * No Validation is done here, simply read/convert the BCRs
......
...@@ -205,10 +205,18 @@ ex_saved_reg1: ...@@ -205,10 +205,18 @@ ex_saved_reg1:
#endif #endif
lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags tst r3, r3
; contains Ptr to Page Table bz do_slow_path_pf ; if no Page Table, do page fault
bz.d do_slow_path_pf ; if no Page Table, do page fault
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
add2.nz r1, r1, r0
bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
mov.nz r0, r3
#endif
and r1, r3, PAGE_MASK
; Get the PTE entry: The idea is ; Get the PTE entry: The idea is
; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
...@@ -219,6 +227,9 @@ ex_saved_reg1: ...@@ -219,6 +227,9 @@ ex_saved_reg1:
lsr r0, r2, (PAGE_SHIFT - 2) lsr r0, r2, (PAGE_SHIFT - 2)
and r0, r0, ( (PTRS_PER_PTE - 1) << 2) and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
2:
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT and.f 0, r0, _PAGE_PRESENT
bz 1f bz 1f
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment