Commit 0a956d52 authored by Mike Rapoport (IBM)'s avatar Mike Rapoport (IBM) Committed by Luis Chamberlain

powerpc: use CONFIG_EXECMEM instead of CONFIG_MODULES where appropriate

There are places where CONFIG_MODULES guards the code that depends on
memory allocation being done with module_alloc().

Replace CONFIG_MODULES with CONFIG_EXECMEM in such places.
Signed-off-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
parent 14e56fb2
...@@ -286,7 +286,7 @@ config PPC ...@@ -286,7 +286,7 @@ config PPC
select IOMMU_HELPER if PPC64 select IOMMU_HELPER if PPC64
select IRQ_DOMAIN select IRQ_DOMAIN
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN && MODULES select KASAN_VMALLOC if KASAN && EXECMEM
select LOCK_MM_AND_FIND_VMA select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_PAGE_SIZE select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_RCU_TABLE_FREE
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define KASAN_SHADOW_SCALE_SHIFT 3 #define KASAN_SHADOW_SCALE_SHIFT 3
#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32) #if defined(CONFIG_EXECMEM) && defined(CONFIG_PPC32)
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) #define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else #else
#define KASAN_KERN_START PAGE_OFFSET #define KASAN_KERN_START PAGE_OFFSET
......
...@@ -199,12 +199,12 @@ instruction_counter: ...@@ -199,12 +199,12 @@ instruction_counter:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10 mtspr SPRN_MD_EPN, r10
#ifdef CONFIG_MODULES #ifdef CONFIG_EXECMEM
mfcr r11 mfcr r11
compare_to_kernel_boundary r10, r10 compare_to_kernel_boundary r10, r10
#endif #endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */ mfspr r10, SPRN_M_TWB /* Get level 1 table */
#ifdef CONFIG_MODULES #ifdef CONFIG_EXECMEM
blt+ 3f blt+ 3f
rlwinm r10, r10, 0, 20, 31 rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
......
...@@ -419,14 +419,14 @@ InstructionTLBMiss: ...@@ -419,14 +419,14 @@ InstructionTLBMiss:
*/ */
/* Get PTE (linux-style) and check access */ /* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS mfspr r3,SPRN_IMISS
#ifdef CONFIG_MODULES #ifdef CONFIG_EXECMEM
lis r1, TASK_SIZE@h /* check if kernel address */ lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3 cmplw 0,r1,r3
#endif #endif
mfspr r2, SPRN_SDR1 mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
rlwinm r2, r2, 28, 0xfffff000 rlwinm r2, r2, 28, 0xfffff000
#ifdef CONFIG_MODULES #ifdef CONFIG_EXECMEM
li r0, 3 li r0, 3
bgt- 112f bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
...@@ -442,7 +442,7 @@ InstructionTLBMiss: ...@@ -442,7 +442,7 @@ InstructionTLBMiss:
andc. r1,r1,r2 /* check access & ~permission */ andc. r1,r1,r2 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */ bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
#ifdef CONFIG_MODULES #ifdef CONFIG_EXECMEM
rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */ rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
#endif #endif
ori r1, r1, 0xe06 /* clear out reserved bits */ ori r1, r1, 0xe06 /* clear out reserved bits */
......
...@@ -225,7 +225,7 @@ void __init poking_init(void) ...@@ -225,7 +225,7 @@ void __init poking_init(void)
static unsigned long get_patch_pfn(void *addr) static unsigned long get_patch_pfn(void *addr)
{ {
if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
return vmalloc_to_pfn(addr); return vmalloc_to_pfn(addr);
else else
return __pa_symbol(addr) >> PAGE_SHIFT; return __pa_symbol(addr) >> PAGE_SHIFT;
......
...@@ -184,7 +184,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -184,7 +184,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
static bool is_module_segment(unsigned long addr) static bool is_module_segment(unsigned long addr)
{ {
if (!IS_ENABLED(CONFIG_MODULES)) if (!IS_ENABLED(CONFIG_EXECMEM))
return false; return false;
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment