Commit 3fa3db32 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/align: Convert emulate_spe() to user_access_begin

This patch converts emulate_spe() to using user_access_begin
logic.

Since commit 662bbcb2 ("mm, sched: Allow uaccess in atomic with
pagefault_disable()"), might_fault() doesn't fire when called from
sections where pagefaults are disabled, which must be the case
when using _inatomic variants of __get_user and __put_user. So
the might_fault() in user_access_begin() is not a problem.

There was a verification of user_mode() together with the access_ok(),
but there is a second verification of user_mode() just after, that
leads to immediate return. The access_ok() is now part of the
user_access_begin which is called after that other user_mode()
verification, so no need to check user_mode() again.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: default avatarDaniel Axtens <dja@axtens.net>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c95a648fdf75992c9d88f3c73cc23e7537fcf2ad.1615555354.git.christophe.leroy@csgroup.eu
parent 9bd68dc5
...@@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = { ...@@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = {
static int emulate_spe(struct pt_regs *regs, unsigned int reg, static int emulate_spe(struct pt_regs *regs, unsigned int reg,
struct ppc_inst ppc_instr) struct ppc_inst ppc_instr)
{ {
int ret;
union { union {
u64 ll; u64 ll;
u32 w[2]; u32 w[2];
...@@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ...@@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
nb = spe_aligninfo[instr].len; nb = spe_aligninfo[instr].len;
flags = spe_aligninfo[instr].flags; flags = spe_aligninfo[instr].flags;
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
!access_ok(addr, nb)))
return -EFAULT;
/* userland only */ /* userland only */
if (unlikely(!user_mode(regs))) if (unlikely(!user_mode(regs)))
return 0; return 0;
...@@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ...@@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
} }
} else { } else {
temp.ll = data.ll = 0; temp.ll = data.ll = 0;
ret = 0;
p = addr; p = addr;
if (!user_read_access_begin(addr, nb))
return -EFAULT;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __get_user_inatomic(temp.v[0], p++); unsafe_get_user(temp.v[0], p++, Efault_read);
ret |= __get_user_inatomic(temp.v[1], p++); unsafe_get_user(temp.v[1], p++, Efault_read);
ret |= __get_user_inatomic(temp.v[2], p++); unsafe_get_user(temp.v[2], p++, Efault_read);
ret |= __get_user_inatomic(temp.v[3], p++); unsafe_get_user(temp.v[3], p++, Efault_read);
fallthrough; fallthrough;
case 4: case 4:
ret |= __get_user_inatomic(temp.v[4], p++); unsafe_get_user(temp.v[4], p++, Efault_read);
ret |= __get_user_inatomic(temp.v[5], p++); unsafe_get_user(temp.v[5], p++, Efault_read);
fallthrough; fallthrough;
case 2: case 2:
ret |= __get_user_inatomic(temp.v[6], p++); unsafe_get_user(temp.v[6], p++, Efault_read);
ret |= __get_user_inatomic(temp.v[7], p++); unsafe_get_user(temp.v[7], p++, Efault_read);
if (unlikely(ret))
return -EFAULT;
} }
user_read_access_end();
switch (instr) { switch (instr) {
case EVLDD: case EVLDD:
...@@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, ...@@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
/* Store result to memory or update registers */ /* Store result to memory or update registers */
if (flags & ST) { if (flags & ST) {
ret = 0;
p = addr; p = addr;
if (!user_write_access_begin(addr, nb))
return -EFAULT;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __put_user_inatomic(data.v[0], p++); unsafe_put_user(data.v[0], p++, Efault_write);
ret |= __put_user_inatomic(data.v[1], p++); unsafe_put_user(data.v[1], p++, Efault_write);
ret |= __put_user_inatomic(data.v[2], p++); unsafe_put_user(data.v[2], p++, Efault_write);
ret |= __put_user_inatomic(data.v[3], p++); unsafe_put_user(data.v[3], p++, Efault_write);
fallthrough; fallthrough;
case 4: case 4:
ret |= __put_user_inatomic(data.v[4], p++); unsafe_put_user(data.v[4], p++, Efault_write);
ret |= __put_user_inatomic(data.v[5], p++); unsafe_put_user(data.v[5], p++, Efault_write);
fallthrough; fallthrough;
case 2: case 2:
ret |= __put_user_inatomic(data.v[6], p++); unsafe_put_user(data.v[6], p++, Efault_write);
ret |= __put_user_inatomic(data.v[7], p++); unsafe_put_user(data.v[7], p++, Efault_write);
} }
if (unlikely(ret)) user_write_access_end();
return -EFAULT;
} else { } else {
*evr = data.w[0]; *evr = data.w[0];
regs->gpr[reg] = data.w[1]; regs->gpr[reg] = data.w[1];
} }
return 1; return 1;
Efault_read:
user_read_access_end();
return -EFAULT;
Efault_write:
user_write_access_end();
return -EFAULT;
} }
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment