Commit 2692a71b authored by Al Viro's avatar Al Viro

Merge branch 'work.uaccess' into for-linus

parents 7041c577 b0654442
......@@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
long res = n;
if (likely(__access_ok((unsigned long)from, n, get_fs())))
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
else
memset(to, 0, n);
return n;
res = __copy_from_user_inatomic(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
extern void __do_clear_user(void);
......
......@@ -124,22 +124,8 @@ $65:
bis $31,$31,$0
$41:
$35:
$exitout:
ret $31,($28),1
$exitin:
/* A stupid byte-by-byte zeroing of the rest of the output
buffer. This cures security holes by never leaving
random kernel data around to be copied elsewhere. */
mov $0,$1
$101:
EXO ( ldq_u $2,0($6) )
subq $1,1,$1
mskbl $2,$6,$2
EXO ( stq_u $2,0($6) )
addq $6,1,$6
bgt $1,$101
$exitout:
ret $31,($28),1
.end __copy_user
......@@ -227,33 +227,12 @@ $dirtyentry:
bgt $0,$onebyteloop # U .. .. .. : U L U L
$zerolength:
$exitin:
$exitout: # Destination for exception recovery(?)
nop # .. .. .. E
nop # .. .. E ..
nop # .. E .. ..
ret $31,($28),1 # L0 .. .. .. : L U L U
$exitin:
/* A stupid byte-by-byte zeroing of the rest of the output
buffer. This cures security holes by never leaving
random kernel data around to be copied elsewhere. */
nop
nop
nop
mov $0,$1
$101:
EXO ( stb $31,0($6) ) # L
subq $1,1,$1 # E
addq $6,1,$6 # E
bgt $1,$101 # U
nop
nop
nop
ret $31,($28),1 # L0
.end __copy_user
......@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
struct user_regs_struct uregs;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (!err)
set_current_blocked(&set);
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (err)
return err;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
regs->lp_start = uregs.scratch.lp_start;
regs->lp_end = uregs.scratch.lp_end;
......@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
regs->r0 = uregs.scratch.r0;
regs->sp = uregs.scratch.sp;
return err;
return 0;
}
static inline int is_do_ss_needed(unsigned int magic)
......
......@@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else /* security hole - plug it */
memset(to, 0, n);
return n;
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
......
......@@ -98,12 +98,9 @@ ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2}
sub r3, r0, r1
rsb r1, r3, r2
str r1, [sp]
bl __memzero
ldr r0, [sp], #4
ldmfd sp!, {r1, r2, r3}
sub r0, r0, r1
rsb r0, r0, r2
copy_abort_end
.popsection
......@@ -278,14 +278,16 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
kasan_check_write(to, n);
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false);
n = __arch_copy_from_user(to, from, n);
} else /* security hole - plug it */
memset(to, 0, n);
return n;
res = __arch_copy_from_user(to, from, n);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
......
......@@ -79,11 +79,6 @@ ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
9998:
sub x0, end, dst
9999:
strb wzr, [dst], #1 // zero remaining buffer space
cmp dst, end
b.lo 9999b
9998: sub x0, end, dst // bytes not copied
ret
.previous
......@@ -163,18 +163,29 @@ static inline int bad_user_access_length(void)
: "a" (__ptr(ptr))); \
})
#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n);
return 0;
}
}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
memcpy((void __force *)to, from, n);
SSYNC();
return 0;
}
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_from_user(to, from, n);
memset(to, 0, n);
return n;
}
......@@ -182,12 +193,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
memcpy((void __force *)to, from, n);
else
if (likely(access_ok(VERIFY_WRITE, to, n)))
return __copy_to_user(to, from, n);
return n;
SSYNC();
return 0;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment