Commit 0593c1b4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-5.10-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull more RISC-V updates from Palmer Dabbelt:
 "Just a single patch set: the remainder of Christoph's work to remove
  set_fs, including the RISC-V portion"

* tag 'riscv-for-linus-5.10-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: remove address space overrides using set_fs()
  riscv: implement __get_kernel_nofault and __put_user_nofault
  riscv: refactor __get_user and __put_user
  riscv: use memcpy based uaccess for nommu again
  asm-generic: make the set_fs implementation optional
  asm-generic: add nommu implementations of __{get,put}_kernel_nofault
  asm-generic: improve the nommu {get,put}_user handling
  uaccess: provide a generic TASK_SIZE_MAX definition
parents 45fe6058 e8d444d3
......@@ -88,7 +88,7 @@ config RISCV
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select SET_FS
select UACCESS_MEMCPY if !MMU
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
......
......@@ -24,10 +24,6 @@
#include <asm/processor.h>
#include <asm/csr.h>
typedef struct {
unsigned long seg;
} mm_segment_t;
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
......@@ -39,7 +35,6 @@ typedef struct {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0=>preemptible, <0=>BUG */
mm_segment_t addr_limit;
/*
* These stack pointers are overwritten on every system call or
* exception. SP is also saved to the stack it can be recovered when
......@@ -59,7 +54,6 @@ struct thread_info {
{ \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
}
#endif /* !__ASSEMBLY__ */
......
......@@ -13,24 +13,6 @@
/*
* User space memory access functions
*/
extern unsigned long __must_check __asm_copy_to_user(void __user *to,
const void *from, unsigned long n);
extern unsigned long __must_check __asm_copy_from_user(void *to,
const void __user *from, unsigned long n);
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __asm_copy_from_user(to, from, n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __asm_copy_to_user(to, from, n);
}
#ifdef CONFIG_MMU
#include <linux/errno.h>
#include <linux/compiler.h>
......@@ -44,29 +26,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define __disable_user_access() \
__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(~0UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
}
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#define user_addr_max() (get_fs().seg)
/**
* access_ok: - Checks if a user space pointer is valid
* @addr: User space pointer to start of block to check
......@@ -94,9 +53,7 @@ static inline void set_fs(mm_segment_t fs)
*/
static inline int __access_ok(unsigned long addr, unsigned long size)
{
const mm_segment_t fs = get_fs();
return size <= fs.seg && addr <= fs.seg - size;
return size <= TASK_SIZE && addr <= TASK_SIZE - size;
}
/*
......@@ -125,7 +82,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
do { \
uintptr_t __tmp; \
__typeof__(x) __x; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1:\n" \
" " insn " %1, %3\n" \
......@@ -143,7 +99,6 @@ do { \
" .previous" \
: "+r" (err), "=&r" (__x), "=r" (__tmp) \
: "m" (*(ptr)), "i" (-EFAULT)); \
__disable_user_access(); \
(x) = __x; \
} while (0)
......@@ -156,7 +111,6 @@ do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
uintptr_t __tmp; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1:\n" \
" lw %1, %4\n" \
......@@ -180,12 +134,30 @@ do { \
"=r" (__tmp) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
"i" (-EFAULT)); \
__disable_user_access(); \
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
#endif /* CONFIG_64BIT */
#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
do { \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__get_user_asm("lb", (x), __gu_ptr, __gu_err); \
break; \
case 2: \
__get_user_asm("lh", (x), __gu_ptr, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (x), __gu_ptr, __gu_err); \
break; \
case 8: \
__get_user_8((x), __gu_ptr, __gu_err); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
/**
* __get_user: - Get a simple variable from user space, with less checking.
......@@ -209,25 +181,15 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
register long __gu_err = 0; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
long __gu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__get_user_asm("lb", (x), __gu_ptr, __gu_err); \
break; \
case 2: \
__get_user_asm("lh", (x), __gu_ptr, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (x), __gu_ptr, __gu_err); \
break; \
case 8: \
__get_user_8((x), __gu_ptr, __gu_err); \
break; \
default: \
BUILD_BUG(); \
} \
\
__enable_user_access(); \
__get_user_nocheck(x, __gu_ptr, __gu_err); \
__disable_user_access(); \
\
__gu_err; \
})
......@@ -261,7 +223,6 @@ do { \
do { \
uintptr_t __tmp; \
__typeof__(*(ptr)) __x = x; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1:\n" \
" " insn " %z3, %2\n" \
......@@ -278,7 +239,6 @@ do { \
" .previous" \
: "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
: "rJ" (__x), "i" (-EFAULT)); \
__disable_user_access(); \
} while (0)
#ifdef CONFIG_64BIT
......@@ -290,7 +250,6 @@ do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \
uintptr_t __tmp; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1:\n" \
" sw %z4, %2\n" \
......@@ -312,10 +271,28 @@ do { \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
__disable_user_access(); \
} while (0)
#endif /* CONFIG_64BIT */
#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
do { \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__put_user_asm("sb", (x), __gu_ptr, __pu_err); \
break; \
case 2: \
__put_user_asm("sh", (x), __gu_ptr, __pu_err); \
break; \
case 4: \
__put_user_asm("sw", (x), __gu_ptr, __pu_err); \
break; \
case 8: \
__put_user_8((x), __gu_ptr, __pu_err); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
/**
* __put_user: - Write a simple value into user space, with less checking.
......@@ -338,25 +315,15 @@ do { \
*/
#define __put_user(x, ptr) \
({ \
register long __pu_err = 0; \
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
long __pu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__put_user_asm("sb", (x), __gu_ptr, __pu_err); \
break; \
case 2: \
__put_user_asm("sh", (x), __gu_ptr, __pu_err); \
break; \
case 4: \
__put_user_asm("sw", (x), __gu_ptr, __pu_err); \
break; \
case 8: \
__put_user_8((x), __gu_ptr, __pu_err); \
break; \
default: \
BUILD_BUG(); \
} \
\
__enable_user_access(); \
__put_user_nocheck(x, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
})
......@@ -385,6 +352,24 @@ do { \
-EFAULT; \
})
unsigned long __must_check __asm_copy_to_user(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check __asm_copy_from_user(void *to,
const void __user *from, unsigned long n);
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __asm_copy_from_user(to, from, n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __asm_copy_to_user(to, from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern long __must_check strlen_user(const char __user *str);
......@@ -476,6 +461,26 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__ret; \
})
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
long __kr_err; \
\
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
long __kr_err; \
\
__put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
#else /* CONFIG_MMU */
#include <asm-generic/uaccess.h>
#endif /* CONFIG_MMU */
......
......@@ -84,7 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
}
regs->epc = pc;
regs->sp = sp;
set_fs(USER_DS);
}
void flush_thread(void)
......
......@@ -2,5 +2,5 @@
lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += uaccess.o
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
......@@ -10,28 +10,76 @@
#include <linux/string.h>
#ifdef CONFIG_UACCESS_MEMCPY
static inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user * from, unsigned long n)
#include <asm/unaligned.h>
static inline int __get_user_fn(size_t size, const void __user *from, void *to)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 *)to = *(u64 __force *)from;
return 0;
#endif
}
BUILD_BUG_ON(!__builtin_constant_p(size));
switch (size) {
case 1:
*(u8 *)to = get_unaligned((u8 __force *)from);
return 0;
case 2:
*(u16 *)to = get_unaligned((u16 __force *)from);
return 0;
case 4:
*(u32 *)to = get_unaligned((u32 __force *)from);
return 0;
case 8:
*(u64 *)to = get_unaligned((u64 __force *)from);
return 0;
default:
BUILD_BUG();
return 0;
}
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
static inline int __put_user_fn(size_t size, void __user *to, void *from)
{
BUILD_BUG_ON(!__builtin_constant_p(size));
switch (size) {
case 1:
put_unaligned(*(u8 *)from, (u8 __force *)to);
return 0;
case 2:
put_unaligned(*(u16 *)from, (u16 __force *)to);
return 0;
case 4:
put_unaligned(*(u32 *)from, (u32 __force *)to);
return 0;
case 8:
put_unaligned(*(u64 *)from, (u64 __force *)to);
return 0;
default:
BUILD_BUG();
return 0;
}
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
*((type *)dst) = get_unaligned((type *)(src)); \
if (0) /* make sure the label looks used to the compiler */ \
goto err_label; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
put_unaligned(*((type *)src), (type *)(dst)); \
if (0) /* make sure the label looks used to the compiler */ \
goto err_label; \
} while (0)
#define HAVE_GET_KERNEL_NOFAULT 1
static inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
memcpy(to, (const void __force *)from, n);
return 0;
}
......@@ -39,27 +87,6 @@ raw_copy_from_user(void *to, const void __user * from, unsigned long n)
static inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
#endif
default:
break;
}
}
memcpy((void __force *)to, from, n);
return 0;
}
......@@ -67,6 +94,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_TO_USER
#endif /* CONFIG_UACCESS_MEMCPY */
#ifdef CONFIG_SET_FS
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#ifndef KERNEL_DS
......@@ -89,6 +117,7 @@ static inline void set_fs(mm_segment_t fs)
#ifndef uaccess_kernel
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif
#endif /* CONFIG_SET_FS */
#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
......
......@@ -33,6 +33,10 @@ typedef struct {
/* empty dummy */
} mm_segment_t;
#ifndef TASK_SIZE_MAX
#define TASK_SIZE_MAX TASK_SIZE
#endif
#define uaccess_kernel() (false)
#define user_addr_max() (TASK_SIZE_MAX)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment