Commit ab29b33a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'seccomp-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull seccomp fixes from Kees Cook:
 "Fix UM seccomp vs ptrace, after reordering landed"

* tag 'seccomp-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  seccomp: Remove 2-phase API documentation
  um/ptrace: Fix the syscall number update after a ptrace
  um/ptrace: Fix the syscall_trace_leave call
parents 08411a75 4fadd04d
...@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER ...@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
results in the system call being skipped immediately. results in the system call being skipped immediately.
- seccomp syscall wired up - seccomp syscall wired up
For best performance, an arch should use seccomp_phase1 and
seccomp_phase2 directly. It should call seccomp_phase1 for all
syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
need to be called from a ptrace-safe context. It must then
call seccomp_phase2 if seccomp_phase1 returns anything other
than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
As an additional optimization, an arch may provide seccomp_data
directly to seccomp_phase1; this avoids multiple calls
to the syscall_xyz helpers for every syscall.
config SECCOMP_FILTER config SECCOMP_FILTER
def_bool y def_bool y
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
......
...@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r) ...@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
if (syscall_trace_enter(regs)) if (syscall_trace_enter(regs))
return; goto out;
/* Do the seccomp check after ptrace; failures should be fast. */ /* Do the seccomp check after ptrace; failures should be fast. */
if (secure_computing(NULL) == -1) if (secure_computing(NULL) == -1)
return; goto out;
/* Update the syscall number after orig_ax has potentially been updated
* with ptrace.
*/
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
syscall = UPT_SYSCALL_NR(r); syscall = UPT_SYSCALL_NR(r);
if (syscall >= 0 && syscall <= __NR_syscall_max) if (syscall >= 0 && syscall <= __NR_syscall_max)
PT_REGS_SET_SYSCALL_RETURN(regs, PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs)); EXECUTE_SYSCALL(syscall, regs));
out:
syscall_trace_leave(regs); syscall_trace_leave(regs);
} }
...@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value) ...@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
case EAX: case EAX:
case EIP: case EIP:
case UESP: case UESP:
break;
case ORIG_EAX: case ORIG_EAX:
/* Update the syscall number. */
UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
break; break;
case FS: case FS:
if (value && (value & 3) != 3) if (value && (value & 3) != 3)
......
...@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value) ...@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
case RSI: case RSI:
case RDI: case RDI:
case RBP: case RBP:
break;
case ORIG_RAX: case ORIG_RAX:
/* Update the syscall number. */
UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
break; break;
case FS: case FS:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment