Commit 28c807e5 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/mm: add guest ASCE TLB flush optimization

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 118bd31b
...@@ -953,9 +953,11 @@ static inline pte_t pte_mkhuge(pte_t pte) ...@@ -953,9 +953,11 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define IPTE_LOCAL 1 #define IPTE_LOCAL 1
#define IPTE_NODAT 0x400 #define IPTE_NODAT 0x400
#define IPTE_GUEST_ASCE 0x800
static inline void __ptep_ipte(unsigned long address, pte_t *ptep, static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
unsigned long opt, int local) unsigned long opt, unsigned long asce,
int local)
{ {
unsigned long pto = (unsigned long) ptep; unsigned long pto = (unsigned long) ptep;
...@@ -969,6 +971,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep, ...@@ -969,6 +971,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
} }
/* Invalidate ptes with options + TLB flush of the ptes */ /* Invalidate ptes with options + TLB flush of the ptes */
opt = opt | (asce & _ASCE_ORIGIN);
asm volatile( asm volatile(
" .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
: [r2] "+a" (address), [r3] "+a" (opt) : [r2] "+a" (address), [r3] "+a" (opt)
...@@ -1355,34 +1358,59 @@ static inline void __pmdp_csp(pmd_t *pmdp) ...@@ -1355,34 +1358,59 @@ static inline void __pmdp_csp(pmd_t *pmdp)
#define IDTE_PTOA 0x0800 #define IDTE_PTOA 0x0800
#define IDTE_NODAT 0x1000 #define IDTE_NODAT 0x1000
#define IDTE_GUEST_ASCE 0x2000
static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
unsigned long opt, int local) unsigned long opt, unsigned long asce,
int local)
{ {
unsigned long sto; unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t); sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
asm volatile( if (__builtin_constant_p(opt) && opt == 0) {
" .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" /* flush without guest asce */
: "+m" (*pmdp) asm volatile(
: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
[m4] "i" (local) : "+m" (*pmdp)
: "cc" ); : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
[m4] "i" (local)
: "cc" );
} else {
/* flush with guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
: "+m" (*pmdp)
: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
[r3] "a" (asce), [m4] "i" (local)
: "cc" );
}
} }
static inline void __pudp_idte(unsigned long addr, pud_t *pudp, static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
unsigned long opt, int local) unsigned long opt, unsigned long asce,
int local)
{ {
unsigned long r3o; unsigned long r3o;
r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t); r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3; r3o |= _ASCE_TYPE_REGION3;
asm volatile( if (__builtin_constant_p(opt) && opt == 0) {
" .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" /* flush without guest asce */
: "+m" (*pudp) asm volatile(
: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
[m4] "i" (local) : "+m" (*pudp)
: "cc" ); : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
[m4] "i" (local)
: "cc");
} else {
/* flush with guest asce */
asm volatile(
" .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
: "+m" (*pudp)
: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
[r3] "a" (asce), [m4] "i" (local)
: "cc" );
}
} }
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
......
...@@ -23,6 +23,8 @@ static inline void __tlb_flush_idte(unsigned long asce) ...@@ -23,6 +23,8 @@ static inline void __tlb_flush_idte(unsigned long asce)
unsigned long opt; unsigned long opt;
opt = IDTE_PTOA; opt = IDTE_PTOA;
if (MACHINE_HAS_TLB_GUEST)
opt |= IDTE_GUEST_ASCE;
/* Global TLB flush for the mm */ /* Global TLB flush for the mm */
asm volatile( asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0" " .insn rrf,0xb98e0000,0,%0,%1,0"
......
...@@ -328,7 +328,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) ...@@ -328,7 +328,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
return; return;
} }
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
__ptep_ipte(address, pte, 0, IPTE_GLOBAL); __ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
} }
......
...@@ -35,9 +35,13 @@ static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, ...@@ -35,9 +35,13 @@ static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
asce = READ_ONCE(mm->context.gmap_asce); asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL) if (asce == 0UL)
opt |= IPTE_NODAT; opt |= IPTE_NODAT;
__ptep_ipte(addr, ptep, opt, IPTE_LOCAL); if (asce != -1UL) {
asce = asce ? : mm->context.asce;
opt |= IPTE_GUEST_ASCE;
}
__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
} else { } else {
__ptep_ipte(addr, ptep, 0, IPTE_LOCAL); __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
} }
} }
...@@ -51,9 +55,13 @@ static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, ...@@ -51,9 +55,13 @@ static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
asce = READ_ONCE(mm->context.gmap_asce); asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL) if (asce == 0UL)
opt |= IPTE_NODAT; opt |= IPTE_NODAT;
__ptep_ipte(addr, ptep, opt, IPTE_GLOBAL); if (asce != -1UL) {
asce = asce ? : mm->context.asce;
opt |= IPTE_GUEST_ASCE;
}
__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
} else { } else {
__ptep_ipte(addr, ptep, 0, IPTE_GLOBAL); __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
} }
} }
...@@ -326,18 +334,20 @@ static inline void pmdp_idte_local(struct mm_struct *mm, ...@@ -326,18 +334,20 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
if (MACHINE_HAS_TLB_GUEST) if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(addr, pmdp, IDTE_NODAT, IDTE_LOCAL); __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else else
__pmdp_idte(addr, pmdp, 0, IDTE_LOCAL); __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
} }
static inline void pmdp_idte_global(struct mm_struct *mm, static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
if (MACHINE_HAS_TLB_GUEST) if (MACHINE_HAS_TLB_GUEST)
__pmdp_idte(addr, pmdp, IDTE_NODAT, IDTE_GLOBAL); __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE) else if (MACHINE_HAS_IDTE)
__pmdp_idte(addr, pmdp, 0, IDTE_GLOBAL); __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
else else
__pmdp_csp(pmdp); __pmdp_csp(pmdp);
} }
...@@ -410,18 +420,20 @@ static inline void pudp_idte_local(struct mm_struct *mm, ...@@ -410,18 +420,20 @@ static inline void pudp_idte_local(struct mm_struct *mm,
unsigned long addr, pud_t *pudp) unsigned long addr, pud_t *pudp)
{ {
if (MACHINE_HAS_TLB_GUEST) if (MACHINE_HAS_TLB_GUEST)
__pudp_idte(addr, pudp, IDTE_NODAT, IDTE_LOCAL); __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else else
__pudp_idte(addr, pudp, 0, IDTE_LOCAL); __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
} }
static inline void pudp_idte_global(struct mm_struct *mm, static inline void pudp_idte_global(struct mm_struct *mm,
unsigned long addr, pud_t *pudp) unsigned long addr, pud_t *pudp)
{ {
if (MACHINE_HAS_TLB_GUEST) if (MACHINE_HAS_TLB_GUEST)
__pudp_idte(addr, pudp, IDTE_NODAT, IDTE_GLOBAL); __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE) else if (MACHINE_HAS_IDTE)
__pudp_idte(addr, pudp, 0, IDTE_GLOBAL); __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
else else
/* /*
* Invalid bit position is the same for pmd and pud, so we can * Invalid bit position is the same for pmd and pud, so we can
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment