Commit 227ae625 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/book3s64/kuap/kuep: Add PPC_PKEY config on book3s64

The config CONFIG_PPC_PKEY is used to select the base support that is
required for PPC_MEM_KEYS, KUAP, and KUEP. Adding this dependency
reduces the code complexity(in terms of #ifdefs) and enables us to
move some of the initialization code to pkeys.c
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201127044424.40686-4-aneesh.kumar@linux.ibm.com
parent 9f378b9f
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
BEGIN_MMU_FTR_SECTION_NESTED(67) BEGIN_MMU_FTR_SECTION_NESTED(67)
mfspr \gpr1, SPRN_AMR mfspr \gpr1, SPRN_AMR
ld \gpr2, STACK_REGS_KUAP(r1) ld \gpr2, STACK_REGS_AMR(r1)
cmpd \gpr1, \gpr2 cmpd \gpr1, \gpr2
beq 998f beq 998f
isync isync
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
bne \msr_pr_cr, 99f bne \msr_pr_cr, 99f
.endif .endif
mfspr \gpr1, SPRN_AMR mfspr \gpr1, SPRN_AMR
std \gpr1, STACK_REGS_KUAP(r1) std \gpr1, STACK_REGS_AMR(r1)
li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
sldi \gpr2, \gpr2, AMR_KUAP_SHIFT sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
cmpd \use_cr, \gpr1, \gpr2 cmpd \use_cr, \gpr1, \gpr2
......
...@@ -199,7 +199,7 @@ extern int mmu_io_psize; ...@@ -199,7 +199,7 @@ extern int mmu_io_psize;
void mmu_early_init_devtree(void); void mmu_early_init_devtree(void);
void hash__early_init_devtree(void); void hash__early_init_devtree(void);
void radix__early_init_devtree(void); void radix__early_init_devtree(void);
#ifdef CONFIG_PPC_MEM_KEYS #ifdef CONFIG_PPC_PKEY
void pkey_early_init_devtree(void); void pkey_early_init_devtree(void);
#else #else
static inline void pkey_early_init_devtree(void) {} static inline void pkey_early_init_devtree(void) {}
......
...@@ -53,9 +53,14 @@ struct pt_regs ...@@ -53,9 +53,14 @@ struct pt_regs
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long ppr; unsigned long ppr;
#endif #endif
union {
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
unsigned long kuap; unsigned long kuap;
#endif #endif
#ifdef CONFIG_PPC_PKEY
unsigned long amr;
#endif
};
}; };
unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */ unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
}; };
......
...@@ -356,6 +356,9 @@ int main(void) ...@@ -356,6 +356,9 @@ int main(void)
STACK_PT_REGS_OFFSET(_PPR, ppr); STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_PKEY
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
#endif
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap); STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
#endif #endif
......
...@@ -17,7 +17,7 @@ endif ...@@ -17,7 +17,7 @@ endif
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o obj-$(CONFIG_PPC_PKEY) += pkeys.o
# Instrumenting the SLB fault path can lead to duplicate SLB entries # Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n KCOV_INSTRUMENT_slb.o := n
...@@ -89,12 +89,14 @@ static int scan_pkey_feature(void) ...@@ -89,12 +89,14 @@ static int scan_pkey_feature(void)
} }
} }
#ifdef CONFIG_PPC_MEM_KEYS
/* /*
* Adjust the upper limit, based on the number of bits supported by * Adjust the upper limit, based on the number of bits supported by
* arch-neutral code. * arch-neutral code.
*/ */
pkeys_total = min_t(int, pkeys_total, pkeys_total = min_t(int, pkeys_total,
((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1)); ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
#endif
return pkeys_total; return pkeys_total;
} }
...@@ -102,6 +104,7 @@ void __init pkey_early_init_devtree(void) ...@@ -102,6 +104,7 @@ void __init pkey_early_init_devtree(void)
{ {
int pkeys_total, i; int pkeys_total, i;
#ifdef CONFIG_PPC_MEM_KEYS
/* /*
* We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral * We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
* generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE. * generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
...@@ -117,7 +120,7 @@ void __init pkey_early_init_devtree(void) ...@@ -117,7 +120,7 @@ void __init pkey_early_init_devtree(void)
BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
__builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) __builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
!= (sizeof(u64) * BITS_PER_BYTE)); != (sizeof(u64) * BITS_PER_BYTE));
#endif
/* /*
* Only P7 and above supports SPRN_AMR update with MSR[PR] = 1 * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
*/ */
...@@ -223,14 +226,6 @@ void __init pkey_early_init_devtree(void) ...@@ -223,14 +226,6 @@ void __init pkey_early_init_devtree(void)
return; return;
} }
void pkey_mm_init(struct mm_struct *mm)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
mm_pkey_allocation_map(mm) = initial_allocation_mask;
mm->context.execute_only_pkey = execute_only_key;
}
static inline u64 read_amr(void) static inline u64 read_amr(void)
{ {
return mfspr(SPRN_AMR); return mfspr(SPRN_AMR);
...@@ -257,6 +252,15 @@ static inline void write_iamr(u64 value) ...@@ -257,6 +252,15 @@ static inline void write_iamr(u64 value)
mtspr(SPRN_IAMR, value); mtspr(SPRN_IAMR, value);
} }
#ifdef CONFIG_PPC_MEM_KEYS
void pkey_mm_init(struct mm_struct *mm)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
mm_pkey_allocation_map(mm) = initial_allocation_mask;
mm->context.execute_only_pkey = execute_only_key;
}
static inline void init_amr(int pkey, u8 init_bits) static inline void init_amr(int pkey, u8 init_bits)
{ {
u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
...@@ -445,3 +449,5 @@ void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm) ...@@ -445,3 +449,5 @@ void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm); mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
} }
#endif /* CONFIG_PPC_MEM_KEYS */
...@@ -406,6 +406,11 @@ config PPC_KUAP_DEBUG ...@@ -406,6 +406,11 @@ config PPC_KUAP_DEBUG
Add extra debugging for Kernel Userspace Access Protection (KUAP) Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N. If you're unsure, say N.
config PPC_PKEY
def_bool y
depends on PPC_BOOK3S_64
depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP
config ARCH_ENABLE_HUGEPAGE_MIGRATION config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y def_bool y
depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment