Commit 70216e18 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar

membarrier: Provide core serializing command, *_SYNC_CORE

Provide core serializing membarrier command to support memory reclaim
by JIT.

Each architecture needs to explicitly opt into that support by
documenting in their architecture code how they provide the core
serializing instructions required when returning from the membarrier
IPI, and after the scheduler has updated the curr->mm pointer (before
going back to user-space). They should then select
ARCH_HAS_MEMBARRIER_SYNC_CORE to enable support for that command on
their architecture.

Architectures selecting this feature need to either document that
they issue core serializing instructions when returning to user-space,
or implement their architecture-specific sync_core_before_usermode().
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Andrew Hunter <ahh@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Avi Kivity <avi@scylladb.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Dave Watson <davejwatson@fb.com>
Cc: David Sehr <sehr@google.com>
Cc: Greg Hackmann <ghackmann@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maged Michael <maged.michael@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-api@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/20180129202020.8515-9-mathieu.desnoyers@efficios.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ac1ab12a
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/sync_core.h>
/* /*
* Routines for handling mm_structs * Routines for handling mm_structs
...@@ -223,12 +224,26 @@ enum { ...@@ -223,12 +224,26 @@ enum {
MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
};
enum {
MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
}; };
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
#include <asm/membarrier.h> #include <asm/membarrier.h>
#endif #endif
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
if (likely(!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
return;
sync_core_before_usermode();
}
static inline void membarrier_execve(struct task_struct *t) static inline void membarrier_execve(struct task_struct *t)
{ {
atomic_set(&t->mm->membarrier_state, 0); atomic_set(&t->mm->membarrier_state, 0);
...@@ -244,6 +259,9 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev, ...@@ -244,6 +259,9 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
static inline void membarrier_execve(struct task_struct *t) static inline void membarrier_execve(struct task_struct *t)
{ {
} }
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
}
#endif #endif
#endif /* _LINUX_SCHED_MM_H */ #endif /* _LINUX_SCHED_MM_H */
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
* to and return from the system call * to and return from the system call
* (non-running threads are de facto in such a * (non-running threads are de facto in such a
* state). This only covers threads from the * state). This only covers threads from the
* same processes as the caller thread. This * same process as the caller thread. This
* command returns 0 on success. The * command returns 0 on success. The
* "expedited" commands complete faster than * "expedited" commands complete faster than
* the non-expedited ones, they never block, * the non-expedited ones, they never block,
...@@ -86,6 +86,34 @@ ...@@ -86,6 +86,34 @@
* Register the process intent to use * Register the process intent to use
* MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
* returns 0. * returns 0.
* @MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
* In addition to provide memory ordering
* guarantees described in
* MEMBARRIER_CMD_PRIVATE_EXPEDITED, ensure
* the caller thread, upon return from system
* call, that all its running threads siblings
* have executed a core serializing
* instruction. (architectures are required to
* guarantee that non-running threads issue
* core serializing instructions before they
* resume user-space execution). This only
* covers threads from the same process as the
* caller thread. This command returns 0 on
* success. The "expedited" commands complete
* faster than the non-expedited ones, they
* never block, but have the downside of
* causing extra overhead. If this command is
* not implemented by an architecture, -EINVAL
* is returned. A process needs to register its
* intent to use the private expedited sync
* core command prior to using it, otherwise
* this command returns -EPERM.
* @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
* Register the process intent to use
* MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE.
* If this command is not implemented by an
* architecture, -EINVAL is returned.
* Returns 0 on success.
* @MEMBARRIER_CMD_SHARED: * @MEMBARRIER_CMD_SHARED:
* Alias to MEMBARRIER_CMD_GLOBAL. Provided for * Alias to MEMBARRIER_CMD_GLOBAL. Provided for
* header backward compatibility. * header backward compatibility.
...@@ -101,6 +129,8 @@ enum membarrier_cmd { ...@@ -101,6 +129,8 @@ enum membarrier_cmd {
MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2), MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
/* Alias for header backward compatibility. */ /* Alias for header backward compatibility. */
MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL, MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
......
...@@ -1415,6 +1415,9 @@ config USERFAULTFD ...@@ -1415,6 +1415,9 @@ config USERFAULTFD
config ARCH_HAS_MEMBARRIER_CALLBACKS config ARCH_HAS_MEMBARRIER_CALLBACKS
bool bool
config ARCH_HAS_MEMBARRIER_SYNC_CORE
bool
config EMBEDDED config EMBEDDED
bool "Embedded system" bool "Embedded system"
option allnoconfig_y option allnoconfig_y
......
...@@ -2704,13 +2704,21 @@ static struct rq *finish_task_switch(struct task_struct *prev) ...@@ -2704,13 +2704,21 @@ static struct rq *finish_task_switch(struct task_struct *prev)
fire_sched_in_preempt_notifiers(current); fire_sched_in_preempt_notifiers(current);
/* /*
* When transitioning from a kernel thread to a userspace * When switching through a kernel thread, the loop in
* thread, mmdrop()'s implicit full barrier is required by the * membarrier_{private,global}_expedited() may have observed that
* membarrier system call, because the current ->active_mm can * kernel thread and not issued an IPI. It is therefore possible to
* become the current mm without going through switch_mm(). * schedule between user->kernel->user threads without passing though
*/ * switch_mm(). Membarrier requires a barrier after storing to
if (mm) * rq->curr, before returning to userspace, so provide them here:
*
* - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
if (mm) {
membarrier_mm_sync_core_before_usermode(mm);
mmdrop(mm); mmdrop(mm);
}
if (unlikely(prev_state == TASK_DEAD)) { if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead) if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev); prev->sched_class->task_dead(prev);
......
...@@ -26,11 +26,20 @@ ...@@ -26,11 +26,20 @@
* Bitmask made from a "or" of all commands within enum membarrier_cmd, * Bitmask made from a "or" of all commands within enum membarrier_cmd,
* except MEMBARRIER_CMD_QUERY. * except MEMBARRIER_CMD_QUERY.
*/ */
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
#else
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
#endif
#define MEMBARRIER_CMD_BITMASK \ #define MEMBARRIER_CMD_BITMASK \
(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \ (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED) | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
static void ipi_mb(void *info) static void ipi_mb(void *info)
{ {
...@@ -104,15 +113,23 @@ static int membarrier_global_expedited(void) ...@@ -104,15 +113,23 @@ static int membarrier_global_expedited(void)
return 0; return 0;
} }
static int membarrier_private_expedited(void) static int membarrier_private_expedited(int flags)
{ {
int cpu; int cpu;
bool fallback = false; bool fallback = false;
cpumask_var_t tmpmask; cpumask_var_t tmpmask;
if (!(atomic_read(&current->mm->membarrier_state) if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
& MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
return -EINVAL;
if (!(atomic_read(&current->mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM; return -EPERM;
} else {
if (!(atomic_read(&current->mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
return -EPERM;
}
if (num_online_cpus() == 1) if (num_online_cpus() == 1)
return 0; return 0;
...@@ -205,20 +222,29 @@ static int membarrier_register_global_expedited(void) ...@@ -205,20 +222,29 @@ static int membarrier_register_global_expedited(void)
return 0; return 0;
} }
static int membarrier_register_private_expedited(void) static int membarrier_register_private_expedited(int flags)
{ {
struct task_struct *p = current; struct task_struct *p = current;
struct mm_struct *mm = p->mm; struct mm_struct *mm = p->mm;
int state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY;
if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
return -EINVAL;
state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
}
/* /*
* We need to consider threads belonging to different thread * We need to consider threads belonging to different thread
* groups, which use the same mm. (CLONE_VM but not * groups, which use the same mm. (CLONE_VM but not
* CLONE_THREAD). * CLONE_THREAD).
*/ */
if (atomic_read(&mm->membarrier_state) if (atomic_read(&mm->membarrier_state) & state)
& MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
return 0; return 0;
atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state); atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
if (flags & MEMBARRIER_FLAG_SYNC_CORE)
atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
&mm->membarrier_state);
if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) { if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
/* /*
* Ensure all future scheduler executions will observe the * Ensure all future scheduler executions will observe the
...@@ -226,8 +252,7 @@ static int membarrier_register_private_expedited(void) ...@@ -226,8 +252,7 @@ static int membarrier_register_private_expedited(void)
*/ */
synchronize_sched(); synchronize_sched();
} }
atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY, atomic_or(state, &mm->membarrier_state);
&mm->membarrier_state);
return 0; return 0;
} }
...@@ -283,9 +308,13 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) ...@@ -283,9 +308,13 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED: case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
return membarrier_register_global_expedited(); return membarrier_register_global_expedited();
case MEMBARRIER_CMD_PRIVATE_EXPEDITED: case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
return membarrier_private_expedited(); return membarrier_private_expedited(0);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
return membarrier_register_private_expedited(); return membarrier_register_private_expedited(0);
case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
default: default:
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment