Commit 99161de3 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/radix: tidy up TLB flushing code

There should be no functional changes.

- Use calls to existing radix_tlb.c functions in flush_partition.

- Rename radix__flush_tlb_lpid to radix__flush_all_lpid and similar,
  because they flush everything, matching flush_all_mm rather than
  flush_tlb_mm for the lpid.

- Remove some unused radix_tlb.c flush primitives.

Signed-off: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190902152931.17840-3-npiggin@gmail.com
parent ed6546bd
...@@ -17,8 +17,8 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid, ...@@ -17,8 +17,8 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr, unsigned long addr,
unsigned long page_size); unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid); extern void radix__flush_pwc_lpid(unsigned int lpid);
extern void radix__flush_tlb_lpid(unsigned int lpid); extern void radix__flush_all_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); extern void radix__flush_all_lpid_guest(unsigned int lpid);
#else #else
static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }; static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
static inline void radix__flush_tlb_lpid_page(unsigned int lpid, static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
...@@ -31,11 +31,7 @@ static inline void radix__flush_pwc_lpid(unsigned int lpid) ...@@ -31,11 +31,7 @@ static inline void radix__flush_pwc_lpid(unsigned int lpid)
{ {
WARN_ON(1); WARN_ON(1);
} }
static inline void radix__flush_tlb_lpid(unsigned int lpid) static inline void radix__flush_all_lpid(unsigned int lpid)
{
WARN_ON(1);
}
static inline void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
{ {
WARN_ON(1); WARN_ON(1);
} }
...@@ -73,6 +69,4 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); ...@@ -73,6 +69,4 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void); extern void radix__flush_tlb_all(void);
extern void radix__local_flush_tlb_lpid(unsigned int lpid);
#endif #endif
...@@ -398,7 +398,7 @@ static void kvmhv_flush_lpid(unsigned int lpid) ...@@ -398,7 +398,7 @@ static void kvmhv_flush_lpid(unsigned int lpid)
long rc; long rc;
if (!kvmhv_on_pseries()) { if (!kvmhv_on_pseries()) {
radix__flush_tlb_lpid(lpid); radix__flush_all_lpid(lpid);
return; return;
} }
......
...@@ -210,20 +210,17 @@ void __init mmu_partition_table_init(void) ...@@ -210,20 +210,17 @@ void __init mmu_partition_table_init(void)
static void flush_partition(unsigned int lpid, bool radix) static void flush_partition(unsigned int lpid, bool radix)
{ {
asm volatile("ptesync" : : : "memory");
if (radix) { if (radix) {
asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : radix__flush_all_lpid(lpid);
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); radix__flush_all_lpid_guest(lpid);
asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
} else { } else {
asm volatile("ptesync" : : : "memory");
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
/* do we need fixup here ?*/
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
} }
/* do we need fixup here ?*/
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
} }
void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
......
...@@ -116,22 +116,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric) ...@@ -116,22 +116,6 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r); trace_tlbie(0, 0, rb, rs, ric, prs, r);
} }
static __always_inline void __tlbiel_lpid(unsigned long lpid, int set,
unsigned long ric)
{
unsigned long rb,rs,prs,r;
rb = PPC_BIT(52); /* IS = 2 */
rb |= set << PPC_BITLSHIFT(51);
rs = 0; /* LPID comes from LPIDR */
prs = 0; /* partition scoped */
r = 1; /* radix format */
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
}
static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{ {
unsigned long rb,rs,prs,r; unsigned long rb,rs,prs,r;
...@@ -146,23 +130,20 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric) ...@@ -146,23 +130,20 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
trace_tlbie(lpid, 0, rb, rs, ric, prs, r); trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
} }
static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set, static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
unsigned long ric)
{ {
unsigned long rb,rs,prs,r; unsigned long rb,rs,prs,r;
rb = PPC_BIT(52); /* IS = 2 */ rb = PPC_BIT(52); /* IS = 2 */
rb |= set << PPC_BITLSHIFT(51); rs = lpid;
rs = 0; /* LPID comes from LPIDR */
prs = 1; /* process scoped */ prs = 1; /* process scoped */
r = 1; /* radix format */ r = 1; /* radix format */
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
trace_tlbie(lpid, 1, rb, rs, ric, prs, r); trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
} }
static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
unsigned long ap, unsigned long ric) unsigned long ap, unsigned long ric)
{ {
...@@ -285,34 +266,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric) ...@@ -285,34 +266,6 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
{
int set;
VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
asm volatile("ptesync": : :"memory");
/*
* Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
* also flush the entire Page Walk Cache.
*/
__tlbiel_lpid(lpid, 0, ric);
/* For PWC, only one flush is needed */
if (ric == RIC_FLUSH_PWC) {
asm volatile("ptesync": : :"memory");
return;
}
/* For the remaining sets, just flush the TLB */
for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
__tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST "; isync" : : :"memory");
}
static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
{ {
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
...@@ -337,35 +290,28 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) ...@@ -337,35 +290,28 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
{ {
int set;
VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
asm volatile("ptesync": : :"memory");
/* /*
* Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, * Workaround the fact that the "ric" argument to __tlbie_pid
* also flush the entire Page Walk Cache. * must be a compile-time contraint to match the "i" constraint
* in the asm statement.
*/ */
__tlbiel_lpid_guest(lpid, 0, ric); switch (ric) {
case RIC_FLUSH_TLB:
/* For PWC, only one flush is needed */ __tlbie_lpid_guest(lpid, RIC_FLUSH_TLB);
if (ric == RIC_FLUSH_PWC) { break;
asm volatile("ptesync": : :"memory"); case RIC_FLUSH_PWC:
return; __tlbie_lpid_guest(lpid, RIC_FLUSH_PWC);
break;
case RIC_FLUSH_ALL:
default:
__tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
} }
fixup_tlbie_lpid(lpid);
/* For the remaining sets, just flush the TLB */ asm volatile("eieio; tlbsync; ptesync": : :"memory");
for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
} }
static inline void __tlbiel_va_range(unsigned long start, unsigned long end, static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size, unsigned long pid, unsigned long page_size,
unsigned long psize) unsigned long psize)
...@@ -835,32 +781,19 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); ...@@ -835,32 +781,19 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
/* /*
* Flush partition scoped translations from LPID (=LPIDR) * Flush partition scoped translations from LPID (=LPIDR)
*/ */
void radix__flush_tlb_lpid(unsigned int lpid) void radix__flush_all_lpid(unsigned int lpid)
{ {
_tlbie_lpid(lpid, RIC_FLUSH_ALL); _tlbie_lpid(lpid, RIC_FLUSH_ALL);
} }
EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid); EXPORT_SYMBOL_GPL(radix__flush_all_lpid);
/* /*
* Flush partition scoped translations from LPID (=LPIDR) * Flush process scoped translations from LPID (=LPIDR)
*/ */
void radix__local_flush_tlb_lpid(unsigned int lpid) void radix__flush_all_lpid_guest(unsigned int lpid)
{ {
_tlbiel_lpid(lpid, RIC_FLUSH_ALL); _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
} }
EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid);
/*
* Flush process scoped translations from LPID (=LPIDR).
* Important difference, the guest normally manages its own translations,
* but some cases e.g., vCPU CPU migration require KVM to flush.
*/
void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
{
_tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL);
}
EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest);
static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start, static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize); unsigned long end, int psize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment