Commit db68ce10 authored by Al Viro's avatar Al Viro

new helper: uaccess_kernel()

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent aaa2e7ac
...@@ -425,7 +425,7 @@ clear_user(void __user *to, long len) ...@@ -425,7 +425,7 @@ clear_user(void __user *to, long len)
#undef __module_call #undef __module_call
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <linux/string.h> /* for generic string functions */ #include <linux/string.h> /* for generic string functions */
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
/* /*
* Algorithmically, for __user_ok() we want do: * Algorithmically, for __user_ok() we want do:
......
...@@ -266,7 +266,7 @@ static inline void set_fs(mm_segment_t fs) ...@@ -266,7 +266,7 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) (uaccess_kernel() ? ~0UL : get_fs())
/* /*
* The "__xxx" versions of the user access functions do not verify the * The "__xxx" versions of the user access functions do not verify the
......
...@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) ...@@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
unsigned long ua_flags; unsigned long ua_flags;
int atomic; int atomic;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (uaccess_kernel()) {
memcpy((void *)to, from, n); memcpy((void *)to, from, n);
return 0; return 0;
} }
...@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n) ...@@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
{ {
unsigned long ua_flags; unsigned long ua_flags;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { if (uaccess_kernel()) {
memset((void *)addr, 0, n); memset((void *)addr, 0, n);
return 0; return 0;
} }
......
...@@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size) ...@@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size)
/* Check that things do not wrap around */ /* Check that things do not wrap around */
if (addr > ULONG_MAX - size) if (addr > ULONG_MAX - size)
return 0; return 0;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return 1; return 1;
#ifdef CONFIG_MTD_UCLINUX #ifdef CONFIG_MTD_UCLINUX
if (1) if (1)
......
...@@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size) ...@@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size)
if (!addr || addr > (0xffffffffUL - (size - 1))) if (!addr || addr > (0xffffffffUL - (size - 1)))
goto _bad_access; goto _bad_access;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return 1; return 1;
if (memory_start <= addr && (addr + size - 1) < memory_end) if (memory_start <= addr && (addr + size - 1) < memory_end)
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) \ #define __user_ok(addr, size) \
(((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
......
...@@ -375,7 +375,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -375,7 +375,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
#define copy_to_user(to, from, n) __copy_to_user(to, from, n) #define copy_to_user(to, from, n) __copy_to_user(to, from, n)
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
/* /*
* Explicitly allow NULL pointers here. Parts of the kernel such * Explicitly allow NULL pointers here. Parts of the kernel such
* as readv/writev use access_ok to validate pointers, but want * as readv/writev use access_ok to validate pointers, but want
......
...@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, ...@@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr) __wsum sum, int *err_ptr)
{ {
might_fault(); might_fault();
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
return __csum_partial_copy_kernel((__force void *)src, dst, return __csum_partial_copy_kernel((__force void *)src, dst,
len, sum, err_ptr); len, sum, err_ptr);
else else
...@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, ...@@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
{ {
might_fault(); might_fault();
if (access_ok(VERIFY_WRITE, dst, len)) { if (access_ok(VERIFY_WRITE, dst, len)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
return __csum_partial_copy_kernel(src, return __csum_partial_copy_kernel(src,
(__force void *)dst, (__force void *)dst,
len, sum, err_ptr); len, sum, err_ptr);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <linux/uaccess.h> /* for segment_eq() */ #include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void); extern void (*r4k_blast_icache)(void);
...@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ ...@@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
\ \
__##pfx##flush_prologue \ __##pfx##flush_prologue \
\ \
if (segment_eq(get_fs(), USER_DS)) { \ if (!uaccess_kernel()) { \
while (1) { \ while (1) { \
protected_cachee_op(hitop, addr); \ protected_cachee_op(hitop, addr); \
if (addr == aend) \ if (addr == aend) \
......
...@@ -88,7 +88,7 @@ static inline bool eva_kernel_access(void) ...@@ -88,7 +88,7 @@ static inline bool eva_kernel_access(void)
if (!IS_ENABLED(CONFIG_EVA)) if (!IS_ENABLED(CONFIG_EVA))
return false; return false;
return segment_eq(get_fs(), get_ds()); return uaccess_kernel();
} }
/* /*
......
...@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadHW(addr, value, res); LoadHW(addr, value, res);
else else
LoadHWE(addr, value, res); LoadHWE(addr, value, res);
...@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadW(addr, value, res); LoadW(addr, value, res);
else else
LoadWE(addr, value, res); LoadWE(addr, value, res);
...@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigbus; goto sigbus;
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
LoadHWU(addr, value, res); LoadHWU(addr, value, res);
else else
LoadHWUE(addr, value, res); LoadHWUE(addr, value, res);
...@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt]; value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
StoreHW(addr, value, res); StoreHW(addr, value, res);
else else
StoreHWE(addr, value, res); StoreHWE(addr, value, res);
...@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, ...@@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
value = regs->regs[insn.i_format.rt]; value = regs->regs[insn.i_format.rt];
if (IS_ENABLED(CONFIG_EVA)) { if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds())) if (uaccess_kernel())
StoreW(addr, value, res); StoreW(addr, value, res);
else else
StoreWE(addr, value, res); StoreWE(addr, value, res);
......
...@@ -292,7 +292,7 @@ clear_user(void *addr, unsigned long size) ...@@ -292,7 +292,7 @@ clear_user(void *addr, unsigned long size)
} }
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count); extern long strncpy_from_user(char *dest, const char __user *src, long count);
......
...@@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble... * our gateway page, and causes no end of trouble...
*/ */
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) if (uaccess_kernel() && !uaddr)
return -EFAULT; return -EFAULT;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
......
...@@ -76,7 +76,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data); ...@@ -76,7 +76,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
goto label; \ goto label; \
} while (0) } while (0)
#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) #define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
#define get_kernel_space() (0) #define get_kernel_space() (0)
#define MERGE(w0, sh_1, w1, sh_2) ({ \ #define MERGE(w0, sh_1, w1, sh_2) ({ \
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
static inline void set_fs(mm_segment_t fs) static inline void set_fs(mm_segment_t fs)
{ {
current->thread.mm_segment = fs; current->thread.mm_segment = fs;
if (segment_eq(fs, KERNEL_DS)) { if (uaccess_kernel()) {
set_cpu_flag(CIF_ASCE_SECONDARY); set_cpu_flag(CIF_ASCE_SECONDARY);
__ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
} else { } else {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#endif #endif
#define user_addr_max() \ #define user_addr_max() \
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) (uaccess_kernel() ? ~0UL : TASK_SIZE)
long strncpy_from_user(char *dest, const char __user *src, long count); long strncpy_from_user(char *dest, const char __user *src, long count);
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
* large size and address near to PAGE_OFFSET - a fault will break his intentions. * large size and address near to PAGE_OFFSET - a fault will break his intentions.
*/ */
#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
({ (void)(type); __access_ok((unsigned long)(addr), size); }) ({ (void)(type); __access_ok((unsigned long)(addr), size); })
......
...@@ -45,7 +45,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ...@@ -45,7 +45,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
return __addr_range_nowrap(addr, size) && return __addr_range_nowrap(addr, size) &&
(__under_task_size(addr, size) || (__under_task_size(addr, size) ||
__access_ok_vsyscall(addr, size) || __access_ok_vsyscall(addr, size) ||
segment_eq(get_fs(), KERNEL_DS)); uaccess_kernel());
} }
#endif #endif
...@@ -141,7 +141,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg) ...@@ -141,7 +141,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg)
long __copy_from_user(void *to, const void __user *from, unsigned long n) long __copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memcpy(to, (__force void*)from, n); memcpy(to, (__force void*)from, n);
return 0; return 0;
} }
...@@ -161,7 +161,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg) ...@@ -161,7 +161,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg)
long __copy_to_user(void __user *to, const void *from, unsigned long n) long __copy_to_user(void __user *to, const void *from, unsigned long n)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memcpy((__force void *) to, from, n); memcpy((__force void *) to, from, n);
return 0; return 0;
} }
...@@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count)
long n; long n;
char *ptr = dst; char *ptr = dst;
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
strncpy(dst, (__force void *) src, count); strncpy(dst, (__force void *) src, count);
return strnlen(dst, count); return strnlen(dst, count);
} }
...@@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused) ...@@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused)
unsigned long __clear_user(void __user *mem, unsigned long len) unsigned long __clear_user(void __user *mem, unsigned long len)
{ {
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
memset((__force void*)mem, 0, len); memset((__force void*)mem, 0, len);
return 0; return 0;
} }
...@@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len) ...@@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len)
{ {
int count = 0, n; int count = 0, n;
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return strnlen((__force char*)str, len) + 1; return strnlen((__force char*)str, len) + 1;
n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#define __strnlen_user __strnlen_user #define __strnlen_user __strnlen_user
#define __clear_user __clear_user #define __clear_user __clear_user
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) (((size) <= TASK_SIZE) \ #define __user_ok(addr, size) (((size) <= TASK_SIZE) \
&& ((addr) <= TASK_SIZE - (size))) && ((addr) <= TASK_SIZE - (size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
......
...@@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs) ...@@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs)
buf, interrupts_enabled(regs) ? "n" : "ff", buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)], processor_modes[processor_mode(regs)],
segment_eq(get_fs(), get_ds()) ? "kernel" : "user"); uaccess_kernel() ? "kernel" : "user");
{ {
unsigned int ctrl; unsigned int ctrl;
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg) #define segment_eq(a, b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (uaccess_kernel())
#define __user_ok(addr, size) \ #define __user_ok(addr, size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
......
...@@ -650,7 +650,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ...@@ -650,7 +650,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %zd bytes\n", bd->name, count); dprintk("%s: write %zd bytes\n", bd->name, count);
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EINVAL; return -EINVAL;
bsg_set_block(bd, file); bsg_set_block(bd, file);
......
...@@ -581,7 +581,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) ...@@ -581,7 +581,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp; sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE]; unsigned char cmnd[SG_MAX_CDB_SIZE];
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EINVAL; return -EINVAL;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h> #include <asm/uaccess.h>
static __always_inline void pagefault_disabled_inc(void) static __always_inline void pagefault_disabled_inc(void)
......
...@@ -100,7 +100,7 @@ struct sockaddr_ib { ...@@ -100,7 +100,7 @@ struct sockaddr_ib {
*/ */
static inline bool ib_safe_file_access(struct file *filp) static inline bool ib_safe_file_access(struct file *filp)
{ {
return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); return filp->f_cred == current_cred() && !uaccess_kernel();
} }
#endif /* _RDMA_IB_H */ #endif /* _RDMA_IB_H */
...@@ -96,7 +96,7 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, ...@@ -96,7 +96,7 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
if (unlikely(in_interrupt() || if (unlikely(in_interrupt() ||
current->flags & (PF_KTHREAD | PF_EXITING))) current->flags & (PF_KTHREAD | PF_EXITING)))
return -EPERM; return -EPERM;
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) if (unlikely(uaccess_kernel()))
return -EPERM; return -EPERM;
if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
return -EPERM; return -EPERM;
......
...@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, ...@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
size_t count) size_t count)
{ {
/* It will get better. Eventually... */ /* It will get better. Eventually... */
if (segment_eq(get_fs(), KERNEL_DS)) { if (uaccess_kernel()) {
direction |= ITER_KVEC; direction |= ITER_KVEC;
i->type = direction; i->type = direction;
i->kvec = (struct kvec *)iov; i->kvec = (struct kvec *)iov;
......
...@@ -4136,7 +4136,7 @@ void __might_fault(const char *file, int line) ...@@ -4136,7 +4136,7 @@ void __might_fault(const char *file, int line)
* get paged out, therefore we'll never actually fault, and the * get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives. * below annotations will generate false positives.
*/ */
if (segment_eq(get_fs(), KERNEL_DS)) if (uaccess_kernel())
return; return;
if (pagefault_disabled()) if (pagefault_disabled())
return; return;
......
...@@ -608,7 +608,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr, ...@@ -608,7 +608,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
static bool tomoyo_kernel_service(void) static bool tomoyo_kernel_service(void)
{ {
/* Nothing to do if I am a kernel service. */ /* Nothing to do if I am a kernel service. */
return segment_eq(get_fs(), KERNEL_DS); return uaccess_kernel();
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment