Commit a6e3cf70 authored by Tony Luck's avatar Tony Luck Committed by Borislav Petkov

x86/mce: Change to not send SIGBUS error during copy from user

Sending a SIGBUS for a copy from user is not the correct semantic.
System calls should return -EFAULT (or a short count for write(2)).
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210818002942.1607544-3-tony.luck@intel.com
parent e4e737bb
...@@ -1272,7 +1272,7 @@ static void kill_me_maybe(struct callback_head *cb) ...@@ -1272,7 +1272,7 @@ static void kill_me_maybe(struct callback_head *cb)
flags |= MF_MUST_KILL; flags |= MF_MUST_KILL;
ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags); ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { if (!ret) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
sync_core(); sync_core();
return; return;
...@@ -1286,15 +1286,21 @@ static void kill_me_maybe(struct callback_head *cb) ...@@ -1286,15 +1286,21 @@ static void kill_me_maybe(struct callback_head *cb)
if (ret == -EHWPOISON) if (ret == -EHWPOISON)
return; return;
if (p->mce_vaddr != (void __user *)-1l) { pr_err("Memory error not recovered");
force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT); kill_me_now(cb);
} else { }
pr_err("Memory error not recovered");
kill_me_now(cb); static void kill_me_never(struct callback_head *cb)
} {
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
p->mce_count = 0;
pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
if (!memory_failure(p->mce_addr >> PAGE_SHIFT, 0))
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
} }
static void queue_task_work(struct mce *m, char *msg, int kill_current_task) static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
{ {
int count = ++current->mce_count; int count = ++current->mce_count;
...@@ -1304,11 +1310,7 @@ static void queue_task_work(struct mce *m, char *msg, int kill_current_task) ...@@ -1304,11 +1310,7 @@ static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
current->mce_kflags = m->kflags; current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m); current->mce_whole_page = whole_page(m);
current->mce_kill_me.func = func;
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
} }
/* Ten is likely overkill. Don't expect more than two faults before task_work() */ /* Ten is likely overkill. Don't expect more than two faults before task_work() */
...@@ -1459,7 +1461,10 @@ noinstr void do_machine_check(struct pt_regs *regs) ...@@ -1459,7 +1461,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */ /* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs)); BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, msg, kill_current_task); if (kill_current_task)
queue_task_work(&m, msg, kill_me_now);
else
queue_task_work(&m, msg, kill_me_maybe);
} else { } else {
/* /*
...@@ -1477,7 +1482,7 @@ noinstr void do_machine_check(struct pt_regs *regs) ...@@ -1477,7 +1482,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
} }
if (m.kflags & MCE_IN_KERNEL_COPYIN) if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, msg, kill_current_task); queue_task_work(&m, msg, kill_me_never);
} }
out: out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment