Commit 6bd33e1e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Paul Walmsley

riscv: add nommu support

The kernel runs in M-mode without using page tables, and thus can't run
bare metal without help from additional firmware.

Most of the patch is just stubbing out code not needed without page
tables, but there is an interesting detail in the signals implementation:

 - The normal RISC-V syscall ABI only implements rt_sigreturn as VDSO
   entry point, but the ELF VDSO is not supported for nommu Linux.
   We instead copy the code to call the syscall onto the stack.

In addition to enabling the nommu code a new defconfig for a small
kernel image that can run in nommu mode on qemu is also provided, to run
a kernel in qemu you can use the following command line:

qemu-system-riscv64 -smp 2 -m 64 -machine virt -nographic \
	-kernel arch/riscv/boot/loader \
	-drive file=rootfs.ext2,format=raw,id=hd0 \
	-device virtio-blk-device,drive=hd0

Contains contributions from Damien Le Moal <Damien.LeMoal@wdc.com>.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
[paul.walmsley@sifive.com: updated to apply; add CONFIG_MMU guards
 around PCI_IOBASE definition to fix build issues; fixed checkpatch
 issues; move the PCI_IO_* and VMEMMAP address space macros along
 with the others; resolve sparse warning]
Signed-off-by: default avatarPaul Walmsley <paul.walmsley@sifive.com>
parent 9e806356
...@@ -26,14 +26,14 @@ config RISCV ...@@ -26,14 +26,14 @@ config RISCV
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER if MMU
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
...@@ -50,6 +50,7 @@ config RISCV ...@@ -50,6 +50,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI select PCI_MSI if PCI
select RISCV_TIMER select RISCV_TIMER
select UACCESS_MEMCPY if !MMU
select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ARCH_TOPOLOGY if SMP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
...@@ -60,7 +61,7 @@ config RISCV ...@@ -60,7 +61,7 @@ config RISCV
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS if MMU
config ARCH_MMAP_RND_BITS_MIN config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT default 18 if 64BIT
...@@ -75,6 +76,7 @@ config ARCH_MMAP_RND_BITS_MAX ...@@ -75,6 +76,7 @@ config ARCH_MMAP_RND_BITS_MAX
# set if we run in machine mode, cleared if we run in supervisor mode # set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE config RISCV_M_MODE
bool bool
default !MMU
# set if we are running in S-mode and can use SBI calls # set if we are running in S-mode and can use SBI calls
config RISCV_SBI config RISCV_SBI
...@@ -83,7 +85,11 @@ config RISCV_SBI ...@@ -83,7 +85,11 @@ config RISCV_SBI
default y default y
config MMU config MMU
def_bool y bool "MMU-based Paged Memory Management Support"
default y
help
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
config ZONE_DMA32 config ZONE_DMA32
bool bool
...@@ -102,6 +108,7 @@ config PA_BITS ...@@ -102,6 +108,7 @@ config PA_BITS
config PAGE_OFFSET config PAGE_OFFSET
hex hex
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
default 0x80000000 if 64BIT && !MMU
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
...@@ -145,7 +152,7 @@ config GENERIC_HWEIGHT ...@@ -145,7 +152,7 @@ config GENERIC_HWEIGHT
def_bool y def_bool y
config FIX_EARLYCON_MEM config FIX_EARLYCON_MEM
def_bool y def_bool CONFIG_MMU
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
...@@ -170,6 +177,7 @@ config ARCH_RV32I ...@@ -170,6 +177,7 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2 select GENERIC_LIB_UCMPDI2
select MMU
config ARCH_RV64I config ARCH_RV64I
bool "RV64I" bool "RV64I"
...@@ -178,9 +186,9 @@ config ARCH_RV64I ...@@ -178,9 +186,9 @@ config ARCH_RV64I
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select SWIOTLB select SWIOTLB if MMU
endchoice endchoice
......
# CONFIG_CPU_ISOLATION is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
CONFIG_CMDLINE_FORCE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
# CONFIG_MQ_IOSCHED_DEADLINE is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_VIRTIO_BLK=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_SIFIVE_PLIC=y
# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_LSM="[]"
CONFIG_PRINTK_TIME=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
...@@ -11,4 +11,12 @@ ...@@ -11,4 +11,12 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
* the flat loader aligns it accordingly.
*/
#ifndef CONFIG_MMU
#define ARCH_SLAB_MINALIGN 16
#endif
#endif /* _ASM_RISCV_CACHE_H */ #endif /* _ASM_RISCV_CACHE_H */
...@@ -56,16 +56,16 @@ extern unsigned long elf_hwcap; ...@@ -56,16 +56,16 @@ extern unsigned long elf_hwcap;
*/ */
#define ELF_PLATFORM (NULL) #define ELF_PLATFORM (NULL)
#ifdef CONFIG_MMU
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)current->mm->context.vdso); \ (elf_addr_t)current->mm->context.vdso); \
} while (0) } while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm; struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_ELF_H */ #endif /* _ASM_RISCV_ELF_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#ifdef CONFIG_MMU
/* /*
* Here we define all the compile-time 'special' virtual addresses. * Here we define all the compile-time 'special' virtual addresses.
* The point is to have a constant address at compile time, but to * The point is to have a constant address at compile time, but to
...@@ -42,4 +43,5 @@ extern void __set_fixmap(enum fixed_addresses idx, ...@@ -42,4 +43,5 @@ extern void __set_fixmap(enum fixed_addresses idx,
#include <asm-generic/fixmap.h> #include <asm-generic/fixmap.h>
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_FIXMAP_H */ #endif /* _ASM_RISCV_FIXMAP_H */
...@@ -12,6 +12,12 @@ ...@@ -12,6 +12,12 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
/* We don't even really need the extable code, but for now keep it simple */
#ifndef CONFIG_MMU
#define __enable_user_access() do { } while (0)
#define __disable_user_access() do { } while (0)
#endif
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \ { \
uintptr_t tmp; \ uintptr_t tmp; \
......
...@@ -24,8 +24,10 @@ ...@@ -24,8 +24,10 @@
/* /*
* I/O port access constants. * I/O port access constants.
*/ */
#ifdef CONFIG_MMU
#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
#define PCI_IOBASE ((void __iomem *)PCI_IO_START) #define PCI_IOBASE ((void __iomem *)PCI_IO_START)
#endif /* CONFIG_MMU */
/* /*
* Emulation routines for the port-mapped IO space used by some PCI drivers. * Emulation routines for the port-mapped IO space used by some PCI drivers.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/mmiowb.h> #include <asm/mmiowb.h>
#ifdef CONFIG_MMU
void __iomem *ioremap(phys_addr_t offset, unsigned long size); void __iomem *ioremap(phys_addr_t offset, unsigned long size);
/* /*
...@@ -26,6 +27,9 @@ void __iomem *ioremap(phys_addr_t offset, unsigned long size); ...@@ -26,6 +27,9 @@ void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#define ioremap_wt(addr, size) ioremap((addr), (size)) #define ioremap_wt(addr, size) ioremap((addr), (size))
void iounmap(volatile void __iomem *addr); void iounmap(volatile void __iomem *addr);
#else
#define pgprot_noncached(x) (x)
#endif /* CONFIG_MMU */
/* Generic IO read/write. These perform native-endian accesses. */ /* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb #define __raw_writeb __raw_writeb
......
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct { typedef struct {
#ifndef CONFIG_MMU
unsigned long end_brk;
#endif
void *vdso; void *vdso;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */ /* A local icache flush is needed before user execution can resume. */
......
...@@ -88,8 +88,14 @@ typedef struct page *pgtable_t; ...@@ -88,8 +88,14 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx" #define PTE_FMT "%08lx"
#endif #endif
#ifdef CONFIG_MMU
extern unsigned long va_pa_offset; extern unsigned long va_pa_offset;
extern unsigned long pfn_base; extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#else
#define va_pa_offset 0
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
extern unsigned long max_low_pfn; extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn; extern unsigned long min_low_pfn;
...@@ -112,11 +118,9 @@ extern unsigned long min_low_pfn; ...@@ -112,11 +118,9 @@ extern unsigned long min_low_pfn;
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \ #define pfn_valid(pfn) \
(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr)) (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
#endif #endif
#define ARCH_PFN_OFFSET (pfn_base)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifdef CONFIG_MMU
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
static inline void pmd_populate_kernel(struct mm_struct *mm, static inline void pmd_populate_kernel(struct mm_struct *mm,
...@@ -81,5 +82,6 @@ do { \ ...@@ -81,5 +82,6 @@ do { \
pgtable_pte_page_dtor(pte); \ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \ tlb_remove_page((tlb), pte); \
} while (0) } while (0)
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */ #endif /* _ASM_RISCV_PGALLOC_H */
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/pgtable-32.h> #include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#ifdef CONFIG_MMU
/* Number of entries in the page global directory */ /* Number of entries in the page global directory */
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */ /* Number of entries in the page table */
...@@ -32,7 +33,6 @@ ...@@ -32,7 +33,6 @@
/* Number of PGD entries that a user-mode program can use */ /* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
/* Page protection bits */ /* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
...@@ -84,42 +84,6 @@ extern pgd_t swapper_pg_dir[]; ...@@ -84,42 +84,6 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC #define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define PCI_IO_SIZE SZ_16M
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
#define PCI_IO_END VMEMMAP_START
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
...@@ -430,11 +394,34 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, ...@@ -430,11 +394,34 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define kern_addr_valid(addr) (1) /* FIXME */ #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
extern void *dtb_early_va; /*
extern void setup_bootmem(void); * Roughly size the vmemmap space to be large enough to fit enough
extern void paging_init(void); * struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/* /*
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
...@@ -446,6 +433,31 @@ extern void paging_init(void); ...@@ -446,6 +433,31 @@ extern void paging_init(void);
#define TASK_SIZE FIXADDR_START #define TASK_SIZE FIXADDR_START
#endif #endif
#else /* CONFIG_MMU */
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define VMALLOC_START 0
#define TASK_SIZE 0xffffffffUL
#endif /* !CONFIG_MMU */
#define kern_addr_valid(addr) (1) /* FIXME */
extern void *dtb_early_va;
void setup_bootmem(void);
void paging_init(void);
#define FIRST_USER_ADDRESS 0
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <asm/smp.h> #include <asm/smp.h>
#ifdef CONFIG_MMU
static inline void local_flush_tlb_all(void) static inline void local_flush_tlb_all(void)
{ {
__asm__ __volatile__ ("sfence.vma" : : : "memory"); __asm__ __volatile__ ("sfence.vma" : : : "memory");
...@@ -20,14 +21,19 @@ static inline void local_flush_tlb_page(unsigned long addr) ...@@ -20,14 +21,19 @@ static inline void local_flush_tlb_page(unsigned long addr)
{ {
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
} }
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
#endif /* CONFIG_MMU */
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void); void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm); void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP && CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
...@@ -38,7 +44,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -38,7 +44,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
} }
#define flush_tlb_mm(mm) flush_tlb_all() #define flush_tlb_mm(mm) flush_tlb_all()
#endif /* CONFIG_SMP */ #endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */ /* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#ifdef CONFIG_MMU
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
...@@ -475,4 +476,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) ...@@ -475,4 +476,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__ret; \ __ret; \
}) })
#else /* CONFIG_MMU */
#include <asm-generic/uaccess.h>
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_UACCESS_H */ #endif /* _ASM_RISCV_UACCESS_H */
...@@ -25,9 +25,8 @@ obj-y += time.o ...@@ -25,9 +25,8 @@ obj-y += time.o
obj-y += traps.o obj-y += traps.o
obj-y += riscv_ksyms.o obj-y += riscv_ksyms.o
obj-y += stacktrace.o obj-y += stacktrace.o
obj-y += vdso.o
obj-y += cacheinfo.o obj-y += cacheinfo.o
obj-y += vdso/ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += clint.o obj-$(CONFIG_RISCV_M_MODE) += clint.o
obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += fpu.o
......
...@@ -398,6 +398,10 @@ ENTRY(__switch_to) ...@@ -398,6 +398,10 @@ ENTRY(__switch_to)
ret ret
ENDPROC(__switch_to) ENDPROC(__switch_to)
#ifndef CONFIG_MMU
#define do_page_fault do_trap_unknown
#endif
.section ".rodata" .section ".rodata"
/* Exception vector table */ /* Exception vector table */
ENTRY(excp_vect_table) ENTRY(excp_vect_table)
...@@ -419,3 +423,10 @@ ENTRY(excp_vect_table) ...@@ -419,3 +423,10 @@ ENTRY(excp_vect_table)
RISCV_PTR do_page_fault /* store page fault */ RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end: excp_vect_table_end:
END(excp_vect_table) END(excp_vect_table)
#ifndef CONFIG_MMU
ENTRY(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
scall
END(__user_rt_sigreturn)
#endif
...@@ -109,8 +109,10 @@ clear_bss_done: ...@@ -109,8 +109,10 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE la sp, init_thread_union + THREAD_SIZE
mv a0, s1 mv a0, s1
call setup_vm call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir la a0, early_pg_dir
call relocate call relocate
#endif /* CONFIG_MMU */
/* Restore C environment */ /* Restore C environment */
la tp, init_task la tp, init_task
...@@ -121,6 +123,7 @@ clear_bss_done: ...@@ -121,6 +123,7 @@ clear_bss_done:
call parse_dtb call parse_dtb
tail start_kernel tail start_kernel
#ifdef CONFIG_MMU
relocate: relocate:
/* Relocate return address */ /* Relocate return address */
li a1, PAGE_OFFSET li a1, PAGE_OFFSET
...@@ -171,6 +174,7 @@ relocate: ...@@ -171,6 +174,7 @@ relocate:
sfence.vma sfence.vma
ret ret
#endif /* CONFIG_MMU */
.Lsecondary_start: .Lsecondary_start:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -196,9 +200,11 @@ relocate: ...@@ -196,9 +200,11 @@ relocate:
beqz tp, .Lwait_for_cpu_up beqz tp, .Lwait_for_cpu_up
fence fence
#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */ /* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir la a0, swapper_pg_dir
call relocate call relocate
#endif
tail smp_callin tail smp_callin
#endif #endif
......
...@@ -17,11 +17,16 @@ ...@@ -17,11 +17,16 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/csr.h> #include <asm/csr.h>
extern u32 __user_rt_sigreturn[2];
#define DEBUG_SIG 0 #define DEBUG_SIG 0
struct rt_sigframe { struct rt_sigframe {
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
#ifndef CONFIG_MMU
u32 sigreturn_code[2];
#endif
}; };
#ifdef CONFIG_FPU #ifdef CONFIG_FPU
...@@ -166,7 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig, ...@@ -166,7 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
return (void __user *)sp; return (void __user *)sp;
} }
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
...@@ -189,8 +193,19 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -189,8 +193,19 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. */ /* Set up to return from userspace. */
#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL( regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn); current->mm->context.vdso, rt_sigreturn);
#else
/*
* For the nommu case we don't have a VDSO. Instead we push two
* instructions to call the rt_sigreturn syscall onto the user stack.
*/
if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
sizeof(frame->sigreturn_code)))
return -EFAULT;
regs->ra = (unsigned long)&frame->sigreturn_code;
#endif /* CONFIG_MMU */
/* /*
* Set up registers for signal handler. * Set up registers for signal handler.
......
...@@ -2,6 +2,5 @@ ...@@ -2,6 +2,5 @@
lib-y += delay.o lib-y += delay.o
lib-y += memcpy.o lib-y += memcpy.o
lib-y += memset.o lib-y += memset.o
lib-y += uaccess.o lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o lib-$(CONFIG_64BIT) += tishift.o
...@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg ...@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg
endif endif
obj-y += init.o obj-y += init.o
obj-y += fault.o
obj-y += extable.o obj-y += extable.o
obj-y += ioremap.o obj-$(CONFIG_MMU) += fault.o ioremap.o
obj-y += cacheflush.o obj-y += cacheflush.o
obj-y += context.o obj-y += context.o
obj-y += sifive_l2_cache.o obj-y += sifive_l2_cache.o
......
...@@ -78,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -78,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte) void flush_icache_pte(pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
...@@ -85,3 +86,4 @@ void flush_icache_pte(pte_t pte) ...@@ -85,3 +86,4 @@ void flush_icache_pte(pte_t pte)
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
flush_icache_all(); flush_icache_all();
} }
#endif /* CONFIG_MMU */
...@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
#ifdef CONFIG_MMU
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
#endif
flush_icache_deferred(next); flush_icache_deferred(next);
} }
...@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] ...@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
extern char _start[]; extern char _start[];
void *dtb_early_va;
static void __init zone_sizes_init(void) static void __init zone_sizes_init(void)
{ {
...@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void) ...@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
} }
void setup_zero_page(void) static void setup_zero_page(void)
{ {
memset((void *)empty_zero_page, 0, PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE);
} }
...@@ -142,12 +143,12 @@ void __init setup_bootmem(void) ...@@ -142,12 +143,12 @@ void __init setup_bootmem(void)
} }
} }
#ifdef CONFIG_MMU
unsigned long va_pa_offset; unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset); EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base; unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
void *dtb_early_va;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
...@@ -449,6 +450,16 @@ static void __init setup_vm_final(void) ...@@ -449,6 +450,16 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
} }
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
dtb_early_va = (void *)dtb_pa;
}
static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
void __init paging_init(void) void __init paging_init(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment