Commit b7415964 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-5.13-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - A fix to avoid over-allocating the kernel's mapping on !MMU systems,
   which could lead to up to 2MiB of lost memory

 - The SiFive address extension errata only manifest on rv64, they are
   now disabled on rv32 where they are unnecessary

 - A pair of late-landing cleanups

* tag 'riscv-for-linus-5.13-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: remove unused handle_exception symbol
  riscv: Consistify protect_kernel_linear_mapping_text_rodata() use
  riscv: enable SiFive errata CIP-453 and CIP-1200 Kconfig only if CONFIG_64BIT=y
  riscv: Only extend kernel reservation if mapped read-only
parents fec4d427 beaf5ae1
...@@ -21,7 +21,7 @@ config ERRATA_SIFIVE ...@@ -21,7 +21,7 @@ config ERRATA_SIFIVE
config ERRATA_SIFIVE_CIP_453 config ERRATA_SIFIVE_CIP_453
bool "Apply SiFive errata CIP-453" bool "Apply SiFive errata CIP-453"
depends on ERRATA_SIFIVE depends on ERRATA_SIFIVE && 64BIT
default y default y
help help
This will apply the SiFive CIP-453 errata to add sign extension This will apply the SiFive CIP-453 errata to add sign extension
...@@ -32,7 +32,7 @@ config ERRATA_SIFIVE_CIP_453 ...@@ -32,7 +32,7 @@ config ERRATA_SIFIVE_CIP_453
config ERRATA_SIFIVE_CIP_1200 config ERRATA_SIFIVE_CIP_1200
bool "Apply SiFive errata CIP-1200" bool "Apply SiFive errata CIP-1200"
depends on ERRATA_SIFIVE depends on ERRATA_SIFIVE && 64BIT
default y default y
help help
This will apply the SiFive CIP-1200 errata to repalce all This will apply the SiFive CIP-1200 errata to repalce all
......
...@@ -17,7 +17,6 @@ int set_memory_x(unsigned long addr, int numpages); ...@@ -17,7 +17,6 @@ int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages);
void protect_kernel_text_data(void); void protect_kernel_text_data(void);
void protect_kernel_linear_mapping_text_rodata(void);
#else #else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
...@@ -27,6 +26,12 @@ static inline void protect_kernel_text_data(void) {} ...@@ -27,6 +26,12 @@ static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif #endif
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void protect_kernel_linear_mapping_text_rodata(void);
#else
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
#endif
int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page); bool kernel_page_present(struct page *page);
......
...@@ -293,9 +293,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -293,9 +293,7 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
protect_kernel_text_data(); protect_kernel_text_data();
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) && !defined(CONFIG_XIP_KERNEL)
protect_kernel_linear_mapping_text_rodata(); protect_kernel_linear_mapping_text_rodata();
#endif
} }
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
extern asmlinkage void handle_exception(void);
static DEFINE_SPINLOCK(die_lock); static DEFINE_SPINLOCK(die_lock);
void die(struct pt_regs *regs, const char *str) void die(struct pt_regs *regs, const char *str)
......
...@@ -135,11 +135,16 @@ void __init setup_bootmem(void) ...@@ -135,11 +135,16 @@ void __init setup_bootmem(void)
/* /*
* Reserve from the start of the kernel to the end of the kernel * Reserve from the start of the kernel to the end of the kernel
* and make sure we align the reservation on PMD_SIZE since we will */
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
/*
* Make sure we align the reservation on PMD_SIZE since we will
* map the kernel in the linear mapping as read-only: we do not want * map the kernel in the linear mapping as read-only: we do not want
* any allocation to happen between _end and the next pmd aligned page. * any allocation to happen between _end and the next pmd aligned page.
*/ */
memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK); vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
#endif
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
/* /*
* memblock allocator is not aware of the fact that last 4K bytes of * memblock allocator is not aware of the fact that last 4K bytes of
...@@ -640,7 +645,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -640,7 +645,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif #endif
} }
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void protect_kernel_linear_mapping_text_rodata(void) void protect_kernel_linear_mapping_text_rodata(void)
{ {
unsigned long text_start = (unsigned long)lm_alias(_start); unsigned long text_start = (unsigned long)lm_alias(_start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment