Commit ae4ea397 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull rseq fixes from Ingo Molnar:
 "Various rseq ABI fixes and cleanups: use get_user()/put_user(),
  validate parameters and use proper uapi types, etc"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  rseq/selftests: cleanup: Update comment above rseq_prepare_unload
  rseq: Remove unused types_32_64.h uapi header
  rseq: uapi: Declare rseq_cs field as union, update includes
  rseq: uapi: Update uapi comments
  rseq: Use get_user/put_user rather than __get_user/__put_user
  rseq: Use __u64 for rseq_cs fields, validate user inputs
parents 4659fc84 8a465801
...@@ -10,13 +10,8 @@ ...@@ -10,13 +10,8 @@
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/ */
#ifdef __KERNEL__ #include <linux/types.h>
# include <linux/types.h> #include <asm/byteorder.h>
#else
# include <stdint.h>
#endif
#include <linux/types_32_64.h>
enum rseq_cpu_id_state { enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1, RSEQ_CPU_ID_UNINITIALIZED = -1,
...@@ -52,10 +47,10 @@ struct rseq_cs { ...@@ -52,10 +47,10 @@ struct rseq_cs {
__u32 version; __u32 version;
/* enum rseq_cs_flags */ /* enum rseq_cs_flags */
__u32 flags; __u32 flags;
LINUX_FIELD_u32_u64(start_ip); __u64 start_ip;
/* Offset from start_ip. */ /* Offset from start_ip. */
LINUX_FIELD_u32_u64(post_commit_offset); __u64 post_commit_offset;
LINUX_FIELD_u32_u64(abort_ip); __u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64)))); } __attribute__((aligned(4 * sizeof(__u64))));
/* /*
...@@ -67,28 +62,30 @@ struct rseq_cs { ...@@ -67,28 +62,30 @@ struct rseq_cs {
struct rseq { struct rseq {
/* /*
* Restartable sequences cpu_id_start field. Updated by the * Restartable sequences cpu_id_start field. Updated by the
* kernel, and read by user-space with single-copy atomicity * kernel. Read by user-space with single-copy atomicity
* semantics. Aligned on 32-bit. Always contains a value in the * semantics. This field should only be read by the thread which
* range of possible CPUs, although the value may not be the * registered this data structure. Aligned on 32-bit. Always
* actual current CPU (e.g. if rseq is not initialized). This * contains a value in the range of possible CPUs, although the
* CPU number value should always be compared against the value * value may not be the actual current CPU (e.g. if rseq is not
* of the cpu_id field before performing a rseq commit or * initialized). This CPU number value should always be compared
* returning a value read from a data structure indexed using * against the value of the cpu_id field before performing a rseq
* the cpu_id_start value. * commit or returning a value read from a data structure indexed
* using the cpu_id_start value.
*/ */
__u32 cpu_id_start; __u32 cpu_id_start;
/* /*
* Restartable sequences cpu_id field. Updated by the kernel, * Restartable sequences cpu_id field. Updated by the kernel.
* and read by user-space with single-copy atomicity semantics. * Read by user-space with single-copy atomicity semantics. This
* Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and * field should only be read by the thread which registered this
* RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the * data structure. Aligned on 32-bit. Values
* former means "rseq uninitialized", and latter means "rseq * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
* initialization failed". This value is meant to be read within * have a special semantic: the former means "rseq uninitialized",
* rseq critical sections and compared with the cpu_id_start * and latter means "rseq initialization failed". This value is
* value previously read, before performing the commit instruction, * meant to be read within rseq critical sections and compared
* or read and compared with the cpu_id_start value before returning * with the cpu_id_start value previously read, before performing
* a value loaded from a data structure indexed using the * the commit instruction, or read and compared with the
* cpu_id_start value. * cpu_id_start value before returning a value loaded from a data
* structure indexed using the cpu_id_start value.
*/ */
__u32 cpu_id; __u32 cpu_id;
/* /*
...@@ -105,27 +102,44 @@ struct rseq { ...@@ -105,27 +102,44 @@ struct rseq {
* targeted by the rseq_cs. Also needs to be set to NULL by user-space * targeted by the rseq_cs. Also needs to be set to NULL by user-space
* before reclaiming memory that contains the targeted struct rseq_cs. * before reclaiming memory that contains the targeted struct rseq_cs.
* *
* Read and set by the kernel with single-copy atomicity semantics. * Read and set by the kernel. Set by user-space with single-copy
* Set by user-space with single-copy atomicity semantics. Aligned * atomicity semantics. This field should only be updated by the
* on 64-bit. * thread which registered this data structure. Aligned on 64-bit.
*/ */
LINUX_FIELD_u32_u64(rseq_cs); union {
__u64 ptr64;
#ifdef __LP64__
__u64 ptr;
#else
struct {
#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
__u32 padding; /* Initialized to zero. */
__u32 ptr32;
#else /* LITTLE */
__u32 ptr32;
__u32 padding; /* Initialized to zero. */
#endif /* ENDIAN */
} ptr;
#endif
} rseq_cs;
/* /*
* - RSEQ_DISABLE flag: * Restartable sequences flags field.
*
* This field should only be updated by the thread which
* registered this data structure. Read by the kernel.
* Mainly used for single-stepping through rseq critical sections
* with debuggers.
* *
* Fallback fast-track flag for single-stepping.
* Set by user-space if lack of progress is detected.
* Cleared by user-space after rseq finish.
* Read by the kernel.
* - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
* Inhibit instruction sequence block restart and event * Inhibit instruction sequence block restart on preemption
* counter increment on preemption for this thread. * for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
* Inhibit instruction sequence block restart and event * Inhibit instruction sequence block restart on signal
* counter increment on signal delivery for this thread. * delivery for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
* Inhibit instruction sequence block restart and event * Inhibit instruction sequence block restart on migration for
* counter increment on migration for this thread. * this thread.
*/ */
__u32 flags; __u32 flags;
} __attribute__((aligned(4 * sizeof(__u64)))); } __attribute__((aligned(4 * sizeof(__u64))));
......
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
#ifndef _UAPI_LINUX_TYPES_32_64_H
#define _UAPI_LINUX_TYPES_32_64_H
/*
* linux/types_32_64.h
*
* Integer type declaration for pointers across 32-bit and 64-bit systems.
*
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
#ifdef __KERNEL__
# include <linux/types.h>
#else
# include <stdint.h>
#endif
#include <asm/byteorder.h>
#ifdef __BYTE_ORDER
# if (__BYTE_ORDER == __BIG_ENDIAN)
# define LINUX_BYTE_ORDER_BIG_ENDIAN
# else
# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
# endif
#else
# ifdef __BIG_ENDIAN
# define LINUX_BYTE_ORDER_BIG_ENDIAN
# else
# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
# endif
#endif
#ifdef __LP64__
# define LINUX_FIELD_u32_u64(field) __u64 field
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
#else
# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
field ## _padding = 0, field = (intptr_t)v
# else
# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
field = (intptr_t)v, field ## _padding = 0
# endif
#endif
#endif /* _UAPI_LINUX_TYPES_32_64_H */
...@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t) ...@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
{ {
u32 cpu_id = raw_smp_processor_id(); u32 cpu_id = raw_smp_processor_id();
if (__put_user(cpu_id, &t->rseq->cpu_id_start)) if (put_user(cpu_id, &t->rseq->cpu_id_start))
return -EFAULT; return -EFAULT;
if (__put_user(cpu_id, &t->rseq->cpu_id)) if (put_user(cpu_id, &t->rseq->cpu_id))
return -EFAULT; return -EFAULT;
trace_rseq_update(t); trace_rseq_update(t);
return 0; return 0;
...@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) ...@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
/* /*
* Reset cpu_id_start to its initial state (0). * Reset cpu_id_start to its initial state (0).
*/ */
if (__put_user(cpu_id_start, &t->rseq->cpu_id_start)) if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
return -EFAULT; return -EFAULT;
/* /*
* Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
* in after unregistration can figure out that rseq needs to be * in after unregistration can figure out that rseq needs to be
* registered again. * registered again.
*/ */
if (__put_user(cpu_id, &t->rseq->cpu_id)) if (put_user(cpu_id, &t->rseq->cpu_id))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
...@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t) ...@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
{ {
struct rseq_cs __user *urseq_cs; struct rseq_cs __user *urseq_cs;
unsigned long ptr; u64 ptr;
u32 __user *usig; u32 __user *usig;
u32 sig; u32 sig;
int ret; int ret;
ret = __get_user(ptr, &t->rseq->rseq_cs); if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
if (ret) return -EFAULT;
return ret;
if (!ptr) { if (!ptr) {
memset(rseq_cs, 0, sizeof(*rseq_cs)); memset(rseq_cs, 0, sizeof(*rseq_cs));
return 0; return 0;
} }
urseq_cs = (struct rseq_cs __user *)ptr; if (ptr >= TASK_SIZE)
return -EINVAL;
urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs))) if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
return -EFAULT; return -EFAULT;
if (rseq_cs->version > 0)
return -EINVAL;
if (rseq_cs->start_ip >= TASK_SIZE ||
rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
rseq_cs->abort_ip >= TASK_SIZE ||
rseq_cs->version > 0)
return -EINVAL;
/* Check for overflow. */
if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
return -EINVAL;
/* Ensure that abort_ip is not in the critical section. */ /* Ensure that abort_ip is not in the critical section. */
if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset) if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
return -EINVAL; return -EINVAL;
usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32)); usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
ret = get_user(sig, usig); ret = get_user(sig, usig);
if (ret) if (ret)
return ret; return ret;
...@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) ...@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
printk_ratelimited(KERN_WARNING printk_ratelimited(KERN_WARNING
"Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n", "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
sig, current->rseq_sig, current->pid, usig); sig, current->rseq_sig, current->pid, usig);
return -EPERM; return -EINVAL;
} }
return 0; return 0;
} }
...@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags) ...@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
int ret; int ret;
/* Get thread flags. */ /* Get thread flags. */
ret = __get_user(flags, &t->rseq->flags); ret = get_user(flags, &t->rseq->flags);
if (ret) if (ret)
return ret; return ret;
...@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t) ...@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t)
* of code outside of the rseq assembly block. This performs * of code outside of the rseq assembly block. This performs
* a lazy clear of the rseq_cs field. * a lazy clear of the rseq_cs field.
* *
* Set rseq_cs to NULL with single-copy atomicity. * Set rseq_cs to NULL.
*/ */
return __put_user(0UL, &t->rseq->rseq_cs); if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
return -EFAULT;
return 0;
} }
/* /*
......
...@@ -133,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void) ...@@ -133,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void)
return cpu; return cpu;
} }
static inline void rseq_clear_rseq_cs(void)
{
#ifdef __LP64__
__rseq_abi.rseq_cs.ptr = 0;
#else
__rseq_abi.rseq_cs.ptr.ptr32 = 0;
#endif
}
/* /*
* rseq_prepare_unload() should be invoked by each thread using rseq_finish*() * rseq_prepare_unload() should be invoked by each thread executing a rseq
* at least once between their last rseq_finish*() and library unload of the * critical section at least once between their last critical section and
* library defining the rseq critical section (struct rseq_cs). This also * library unload of the library defining the rseq critical section
* applies to use of rseq in code generated by JIT: rseq_prepare_unload() * (struct rseq_cs). This also applies to use of rseq in code generated by
* should be invoked at least once by each thread using rseq_finish*() before * JIT: rseq_prepare_unload() should be invoked at least once by each
* reclaim of the memory holding the struct rseq_cs. * thread executing a rseq critical section before reclaim of the memory
* holding the struct rseq_cs.
*/ */
static inline void rseq_prepare_unload(void) static inline void rseq_prepare_unload(void)
{ {
__rseq_abi.rseq_cs = 0; rseq_clear_rseq_cs();
} }
#endif /* RSEQ_H_ */ #endif /* RSEQ_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment