Commit 9085b34d authored by Robin Murphy's avatar Robin Murphy Committed by Catalin Marinas

arm64: uaccess: Formalise types for access_ok()

In converting __range_ok() into a static inline, I inadvertently made
it more type-safe, but without considering the ordering of the relevant
conversions. This leads to quite a lot of Sparse noise about the fact
that we use __chk_user_ptr() after addr has already been converted from
a user pointer to an unsigned long.

Rather than just adding another cast for the sake of shutting Sparse up,
it seems reasonable to rework the types to make logical sense (although
the resulting codegen for __range_ok() remains identical). The only
callers this affects directly are our compat traps where the inferred
"user-pointer-ness" of a register value now warrants explicit casting.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 04c49273
...@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs) ...@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
* This is equivalent to the following test: * This is equivalent to the following test:
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/ */
static inline unsigned long __range_ok(unsigned long addr, unsigned long size) static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
{ {
unsigned long limit = current_thread_info()->addr_limit; unsigned long ret, limit = current_thread_info()->addr_limit;
__chk_user_ptr(addr); __chk_user_ptr(addr);
asm volatile( asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps: // A + B <= C + 1 for all A,B,C, in four easy steps:
// 1: X = A + B; X' = X % 2^64 // 1: X = A + B; X' = X % 2^64
" adds %0, %0, %2\n" " adds %0, %3, %2\n"
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
" csel %1, xzr, %1, hi\n" " csel %1, xzr, %1, hi\n"
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
...@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size) ...@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
// testing X' - C == 0, subject to the previous adjustments. // testing X' - C == 0, subject to the previous adjustments.
" sbcs xzr, %0, %1\n" " sbcs xzr, %0, %1\n"
" cset %0, ls\n" " cset %0, ls\n"
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
return addr; return ret;
} }
/* /*
...@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size) ...@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
*/ */
#define untagged_addr(addr) sign_extend64(addr, 55) #define untagged_addr(addr) sign_extend64(addr, 55)
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs #define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \ #define _ASM_EXTABLE(from, to) \
......
...@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) ...@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
static int swp_handler(struct pt_regs *regs, u32 instr) static int swp_handler(struct pt_regs *regs, u32 instr)
{ {
u32 destreg, data, type, address = 0; u32 destreg, data, type, address = 0;
const void __user *user_ptr;
int rn, rt2, res = 0; int rn, rt2, res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
...@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr) ...@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */ /* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { user_ptr = (const void __user *)(unsigned long)(address & ~3);
if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
address); address);
goto fault; goto fault;
......
...@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags) if (end < start || flags)
return -EINVAL; return -EINVAL;
if (!access_ok(VERIFY_READ, start, end - start)) if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
return -EFAULT; return -EFAULT;
return __do_compat_cache_op(start, end); return __do_compat_cache_op(start, end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment