Commit 126b11b2 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/hash: Add SLB allocation status bitmaps

Add 32-entry bitmaps to track the allocation status of the first 32
SLB entries, and whether they are user or kernel entries. These are
used to allocate free SLB entries first, before resorting to the round
robin allocator.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 48e7b769
...@@ -113,7 +113,10 @@ struct paca_struct { ...@@ -113,7 +113,10 @@ struct paca_struct {
* on the linear mapping */ * on the linear mapping */
/* SLB related definitions */ /* SLB related definitions */
u16 vmalloc_sllp; u16 vmalloc_sllp;
u16 slb_cache_ptr; u8 slb_cache_ptr;
u8 stab_rr; /* stab/slb round-robin counter */
u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
u32 slb_kern_bitmap;
u32 slb_cache[SLB_CACHE_ENTRIES]; u32 slb_cache[SLB_CACHE_ENTRIES];
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
...@@ -160,7 +163,6 @@ struct paca_struct { ...@@ -160,7 +163,6 @@ struct paca_struct {
*/ */
struct task_struct *__current; /* Pointer to current */ struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */ u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */ u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */ u16 trap_save; /* Used when bad stack is encountered */
......
...@@ -178,7 +178,6 @@ int main(void) ...@@ -178,7 +178,6 @@ int main(void)
OFFSET(PACAKSAVE, paca_struct, kstack); OFFSET(PACAKSAVE, paca_struct, kstack);
OFFSET(PACACURRENT, paca_struct, __current); OFFSET(PACACURRENT, paca_struct, __current);
OFFSET(PACASAVEDMSR, paca_struct, saved_msr); OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
OFFSET(PACASTABRR, paca_struct, stab_rr);
OFFSET(PACAR1, paca_struct, saved_r1); OFFSET(PACAR1, paca_struct, saved_r1);
OFFSET(PACATOC, paca_struct, kernel_toc); OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase); OFFSET(PACAKBASE, paca_struct, kernelbase);
...@@ -217,6 +216,7 @@ int main(void) ...@@ -217,6 +216,7 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACASLBCACHE, paca_struct, slb_cache); OFFSET(PACASLBCACHE, paca_struct, slb_cache);
OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
OFFSET(PACASTABRR, paca_struct, stab_rr);
OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp); OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
......
...@@ -122,6 +122,9 @@ void slb_restore_bolted_realmode(void) ...@@ -122,6 +122,9 @@ void slb_restore_bolted_realmode(void)
{ {
__slb_restore_bolted_realmode(); __slb_restore_bolted_realmode();
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
} }
/* /*
...@@ -129,9 +132,6 @@ void slb_restore_bolted_realmode(void) ...@@ -129,9 +132,6 @@ void slb_restore_bolted_realmode(void)
*/ */
void slb_flush_all_realmode(void) void slb_flush_all_realmode(void)
{ {
/*
* This flushes all SLB entries including 0, so it must be realmode.
*/
asm volatile("slbmte %0,%0; slbia" : : "r" (0)); asm volatile("slbmte %0,%0; slbia" : : "r" (0));
} }
...@@ -177,6 +177,9 @@ void slb_flush_and_rebolt(void) ...@@ -177,6 +177,9 @@ void slb_flush_and_rebolt(void)
: "memory"); : "memory");
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
} }
void slb_save_contents(struct slb_entry *slb_ptr) void slb_save_contents(struct slb_entry *slb_ptr)
...@@ -209,7 +212,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr) ...@@ -209,7 +212,7 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
return; return;
pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr); pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
for (i = 0; i < mmu_slb_size; i++) { for (i = 0; i < mmu_slb_size; i++) {
e = slb_ptr->esid; e = slb_ptr->esid;
...@@ -342,10 +345,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -342,10 +345,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
"isync" "isync"
:: "r"(ksp_vsid_data), :: "r"(ksp_vsid_data),
"r"(ksp_esid_data)); "r"(ksp_esid_data));
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
} }
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
} }
get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
copy_mm_to_paca(mm); copy_mm_to_paca(mm);
...@@ -402,6 +408,8 @@ void slb_initialize(void) ...@@ -402,6 +408,8 @@ void slb_initialize(void)
} }
get_paca()->stab_rr = SLB_NUM_BOLTED - 1; get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
lflags = SLB_VSID_KERNEL | linear_llp; lflags = SLB_VSID_KERNEL | linear_llp;
...@@ -453,17 +461,47 @@ static void slb_cache_update(unsigned long esid_data) ...@@ -453,17 +461,47 @@ static void slb_cache_update(unsigned long esid_data)
} }
} }
static enum slb_index alloc_slb_index(void) static enum slb_index alloc_slb_index(bool kernel)
{ {
enum slb_index index; enum slb_index index;
/* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ /*
index = get_paca()->stab_rr; * The allocation bitmaps can become out of synch with the SLB
if (index < (mmu_slb_size - 1)) * when the _switch code does slbie when bolting a new stack
index++; * segment and it must not be anywhere else in the SLB. This leaves
else * a kernel allocated entry that is unused in the SLB. With very
index = SLB_NUM_BOLTED; * large systems or small segment sizes, the bitmaps could slowly
get_paca()->stab_rr = index; * fill with these entries. They will eventually be cleared out
* by the round robin allocator in that case, so it's probably not
* worth accounting for.
*/
/*
* SLBs beyond 32 entries are allocated with stab_rr only
* POWER7/8/9 have 32 SLB entries, this could be expanded if a
* future CPU has more.
*/
if (local_paca->slb_used_bitmap != U32_MAX) {
index = ffz(local_paca->slb_used_bitmap);
local_paca->slb_used_bitmap |= 1U << index;
if (kernel)
local_paca->slb_kern_bitmap |= 1U << index;
} else {
/* round-robin replacement of slb starting at SLB_NUM_BOLTED. */
index = local_paca->stab_rr;
if (index < (mmu_slb_size - 1))
index++;
else
index = SLB_NUM_BOLTED;
local_paca->stab_rr = index;
if (index < 32) {
if (kernel)
local_paca->slb_kern_bitmap |= 1U << index;
else
local_paca->slb_kern_bitmap &= ~(1U << index);
}
}
BUG_ON(index < SLB_NUM_BOLTED);
return index; return index;
} }
...@@ -490,7 +528,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context, ...@@ -490,7 +528,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
*/ */
barrier(); barrier();
index = alloc_slb_index(); index = alloc_slb_index(kernel);
vsid_data = __mk_vsid_data(vsid, ssize, flags); vsid_data = __mk_vsid_data(vsid, ssize, flags);
esid_data = mk_esid_data(ea, ssize, index); esid_data = mk_esid_data(ea, ssize, index);
......
...@@ -2394,7 +2394,9 @@ static void dump_one_paca(int cpu) ...@@ -2394,7 +2394,9 @@ static void dump_one_paca(int cpu)
} }
} }
DUMP(p, vmalloc_sllp, "%#-*x"); DUMP(p, vmalloc_sllp, "%#-*x");
DUMP(p, stab_rr, "%#-*llx"); DUMP(p, stab_rr, "%#-*x");
DUMP(p, slb_used_bitmap, "%#-*x");
DUMP(p, slb_kern_bitmap, "%#-*x");
if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
DUMP(p, slb_cache_ptr, "%#-*x"); DUMP(p, slb_cache_ptr, "%#-*x");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment