Commit c2b3496b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/ldt: Rework locking

The LDT is duplicated on fork() and on exec(), which is wrong as exec()
should start from a clean state, i.e. without LDT. To fix this the LDT
duplication code will be moved into arch_dup_mmap() which is only called
for fork().

This introduces a locking problem. arch_dup_mmap() holds mmap_sem of the
parent process, but the LDT duplication code needs to acquire
mm->context.lock to access the LDT data safely, which is the reverse lock
order of write_ldt() where mmap_sem nests into context.lock.

Solve this by introducing a new rw semaphore which serializes the
read/write_ldt() syscall operations and use context.lock to protect the
actual installment of the LDT descriptor.

So context.lock stabilizes mm->context.ldt and can nest inside of the new
semaphore or mmap_sem.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirsky <luto@kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: dan.j.williams@intel.com
Cc: hughd@google.com
Cc: keescook@google.com
Cc: kirill.shutemov@linux.intel.com
Cc: linux-mm@kvack.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c10e83f5
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define _ASM_X86_MMU_H #define _ASM_X86_MMU_H
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/atomic.h> #include <linux/atomic.h>
...@@ -27,7 +28,8 @@ typedef struct { ...@@ -27,7 +28,8 @@ typedef struct {
atomic64_t tlb_gen; atomic64_t tlb_gen;
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt; struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -132,6 +132,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); ...@@ -132,6 +132,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
mutex_init(&mm->context.lock);
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
atomic64_set(&mm->context.tlb_gen, 0); atomic64_set(&mm->context.tlb_gen, 0);
......
...@@ -5,6 +5,11 @@ ...@@ -5,6 +5,11 @@
* Copyright (C) 2002 Andi Kleen * Copyright (C) 2002 Andi Kleen
* *
* This handles calls from both 32bit and 64bit mode. * This handles calls from both 32bit and 64bit mode.
*
* Lock order:
* contex.ldt_usr_sem
* mmap_sem
* context.lock
*/ */
#include <linux/errno.h> #include <linux/errno.h>
...@@ -42,7 +47,7 @@ static void refresh_ldt_segments(void) ...@@ -42,7 +47,7 @@ static void refresh_ldt_segments(void)
#endif #endif
} }
/* context.lock is held for us, so we don't need any locking. */ /* context.lock is held by the task which issued the smp function call */
static void flush_ldt(void *__mm) static void flush_ldt(void *__mm)
{ {
struct mm_struct *mm = __mm; struct mm_struct *mm = __mm;
...@@ -99,15 +104,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt) ...@@ -99,15 +104,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
} }
/* context.lock is held */ static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{ {
mutex_lock(&mm->context.lock);
/* Synchronizes with READ_ONCE in load_mm_ldt. */ /* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt); smp_store_release(&mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using current_mm. */ /* Activate the LDT for all CPUs using currents mm. */
on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
mutex_unlock(&mm->context.lock);
} }
static void free_ldt_struct(struct ldt_struct *ldt) static void free_ldt_struct(struct ldt_struct *ldt)
...@@ -133,7 +140,8 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) ...@@ -133,7 +140,8 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
struct mm_struct *old_mm; struct mm_struct *old_mm;
int retval = 0; int retval = 0;
mutex_init(&mm->context.lock); init_rwsem(&mm->context.ldt_usr_sem);
old_mm = current->mm; old_mm = current->mm;
if (!old_mm) { if (!old_mm) {
mm->context.ldt = NULL; mm->context.ldt = NULL;
...@@ -180,7 +188,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) ...@@ -180,7 +188,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
unsigned long entries_size; unsigned long entries_size;
int retval; int retval;
mutex_lock(&mm->context.lock); down_read(&mm->context.ldt_usr_sem);
if (!mm->context.ldt) { if (!mm->context.ldt) {
retval = 0; retval = 0;
...@@ -209,7 +217,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) ...@@ -209,7 +217,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
retval = bytecount; retval = bytecount;
out_unlock: out_unlock:
mutex_unlock(&mm->context.lock); up_read(&mm->context.ldt_usr_sem);
return retval; return retval;
} }
...@@ -269,7 +277,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -269,7 +277,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
ldt.avl = 0; ldt.avl = 0;
} }
mutex_lock(&mm->context.lock); if (down_write_killable(&mm->context.ldt_usr_sem))
return -EINTR;
old_ldt = mm->context.ldt; old_ldt = mm->context.ldt;
old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
...@@ -291,7 +300,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -291,7 +300,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
error = 0; error = 0;
out_unlock: out_unlock:
mutex_unlock(&mm->context.lock); up_write(&mm->context.ldt_usr_sem);
out: out:
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment