Commit 760573c1 authored by Michael Ellerman's avatar Michael Ellerman

powerpc/mm: Split radix vs hash mm context initialisation

Complete the split of the radix vs hash mm context initialisation.

This is mostly code movement, with the exception that we now limit the
context allocation to PRTB_ENTRIES - 1 on radix.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent c1ff840d
...@@ -65,6 +65,8 @@ extern struct patb_entry *partition_tb; ...@@ -65,6 +65,8 @@ extern struct patb_entry *partition_tb;
* MAX_USER_CONTEXT * 16 bytes of space. * MAX_USER_CONTEXT * 16 bytes of space.
*/ */
#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4) #define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4)
#define PRTB_ENTRIES (1ul << CONTEXT_BITS)
/* /*
* Power9 currently only support 64K partition table size. * Power9 currently only support 64K partition table size.
*/ */
......
...@@ -63,47 +63,66 @@ int hash__alloc_context_id(void) ...@@ -63,47 +63,66 @@ int hash__alloc_context_id(void)
} }
EXPORT_SYMBOL_GPL(hash__alloc_context_id); EXPORT_SYMBOL_GPL(hash__alloc_context_id);
static int radix__init_new_context(struct mm_struct *mm, int index) static int hash__init_new_context(struct mm_struct *mm)
{
int index;
index = hash__alloc_context_id();
if (index < 0)
return index;
/*
* The old code would re-promote on fork, we don't do that when using
* slices as it could cause problem promoting slices that have been
* forced down to 4K.
*
* For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
* initialize context slice details for newly allocated mm's (which will
* have id == 0) and don't alter context slice inherited via fork (which
* will have id != 0).
*
* We should not be calling init_new_context() on init_mm. Hence a
* check against 0 is OK.
*/
if (mm->context.id == 0)
slice_set_user_psize(mm, mmu_virtual_psize);
subpage_prot_init_new_context(mm);
return index;
}
static int radix__init_new_context(struct mm_struct *mm)
{ {
unsigned long rts_field; unsigned long rts_field;
int index;
index = alloc_context_id(1, PRTB_ENTRIES - 1);
if (index < 0)
return index;
/* /*
* set the process table entry, * set the process table entry,
*/ */
rts_field = radix__get_tree_size(); rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
return 0;
return index;
} }
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
int index; int index;
index = hash__alloc_context_id(); if (radix_enabled())
index = radix__init_new_context(mm);
else
index = hash__init_new_context(mm);
if (index < 0) if (index < 0)
return index; return index;
if (radix_enabled()) {
radix__init_new_context(mm, index);
} else {
/* The old code would re-promote on fork, we don't do that
* when using slices as it could cause problem promoting slices
* that have been forced down to 4K
*
* For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we
* properly initialize context slice details for newly allocated
* mm's (which will have id == 0) and don't alter context slice
* inherited via fork (which will have id != 0).
*
* We should not be calling init_new_context() on init_mm. Hence a
* check against 0 is ok.
*/
if (mm->context.id == 0)
slice_set_user_psize(mm, mmu_virtual_psize);
subpage_prot_init_new_context(mm);
}
mm->context.id = index; mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX #ifdef CONFIG_PPC_ICSWX
mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment