Commit bbf79d21 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/ldt: Rename ldt_struct::size to ::nr_entries

... because this is exactly what it is: the number of entries in the
LDT. Calling it "size" is simply confusing and it is actually begging
to be called "nr_entries" or somesuch, especially if you see constructs
like:

	alloc_size = size * LDT_ENTRY_SIZE;

since LDT_ENTRY_SIZE is the size of a single entry.

There should be no functionality change resulting from this patch, as
the before/after output from tools/testing/selftests/x86/ldt_gdt.c
shows.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170606173116.13977-1-bp@alien8.de
[ Renamed 'n_entries' to 'nr_entries' ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5dd0b16c
...@@ -2333,7 +2333,7 @@ static unsigned long get_segment_base(unsigned int segment) ...@@ -2333,7 +2333,7 @@ static unsigned long get_segment_base(unsigned int segment)
/* IRQs are off, so this synchronizes with smp_store_release */ /* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt); ldt = lockless_dereference(current->active_mm->context.ldt);
if (!ldt || idx > ldt->size) if (!ldt || idx > ldt->nr_entries)
return 0; return 0;
desc = &ldt->entries[idx]; desc = &ldt->entries[idx];
......
...@@ -47,7 +47,7 @@ struct ldt_struct { ...@@ -47,7 +47,7 @@ struct ldt_struct {
* allocations, but it's not worth trying to optimize. * allocations, but it's not worth trying to optimize.
*/ */
struct desc_struct *entries; struct desc_struct *entries;
unsigned int size; unsigned int nr_entries;
}; };
/* /*
...@@ -87,7 +87,7 @@ static inline void load_mm_ldt(struct mm_struct *mm) ...@@ -87,7 +87,7 @@ static inline void load_mm_ldt(struct mm_struct *mm)
*/ */
if (unlikely(ldt)) if (unlikely(ldt))
set_ldt(ldt->entries, ldt->size); set_ldt(ldt->entries, ldt->nr_entries);
else else
clear_LDT(); clear_LDT();
#else #else
......
...@@ -31,16 +31,16 @@ static void flush_ldt(void *__mm) ...@@ -31,16 +31,16 @@ static void flush_ldt(void *__mm)
return; return;
pc = &mm->context; pc = &mm->context;
set_ldt(pc->ldt->entries, pc->ldt->size); set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
} }
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(unsigned int size) static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
{ {
struct ldt_struct *new_ldt; struct ldt_struct *new_ldt;
unsigned int alloc_size; unsigned int alloc_size;
if (size > LDT_ENTRIES) if (num_entries > LDT_ENTRIES)
return NULL; return NULL;
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
...@@ -48,7 +48,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size) ...@@ -48,7 +48,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
return NULL; return NULL;
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
alloc_size = size * LDT_ENTRY_SIZE; alloc_size = num_entries * LDT_ENTRY_SIZE;
/* /*
* Xen is very picky: it requires a page-aligned LDT that has no * Xen is very picky: it requires a page-aligned LDT that has no
...@@ -66,14 +66,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size) ...@@ -66,14 +66,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
return NULL; return NULL;
} }
new_ldt->size = size; new_ldt->nr_entries = num_entries;
return new_ldt; return new_ldt;
} }
/* After calling this, the LDT is immutable. */ /* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt) static void finalize_ldt_struct(struct ldt_struct *ldt)
{ {
paravirt_alloc_ldt(ldt->entries, ldt->size); paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
} }
/* context.lock is held */ /* context.lock is held */
...@@ -92,8 +92,8 @@ static void free_ldt_struct(struct ldt_struct *ldt) ...@@ -92,8 +92,8 @@ static void free_ldt_struct(struct ldt_struct *ldt)
if (likely(!ldt)) if (likely(!ldt))
return; return;
paravirt_free_ldt(ldt->entries, ldt->size); paravirt_free_ldt(ldt->entries, ldt->nr_entries);
if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree_atomic(ldt->entries); vfree_atomic(ldt->entries);
else else
free_page((unsigned long)ldt->entries); free_page((unsigned long)ldt->entries);
...@@ -123,14 +123,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) ...@@ -123,14 +123,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
goto out_unlock; goto out_unlock;
} }
new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
if (!new_ldt) { if (!new_ldt) {
retval = -ENOMEM; retval = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
memcpy(new_ldt->entries, old_mm->context.ldt->entries, memcpy(new_ldt->entries, old_mm->context.ldt->entries,
new_ldt->size * LDT_ENTRY_SIZE); new_ldt->nr_entries * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt); finalize_ldt_struct(new_ldt);
mm->context.ldt = new_ldt; mm->context.ldt = new_ldt;
...@@ -153,9 +153,9 @@ void destroy_context_ldt(struct mm_struct *mm) ...@@ -153,9 +153,9 @@ void destroy_context_ldt(struct mm_struct *mm)
static int read_ldt(void __user *ptr, unsigned long bytecount) static int read_ldt(void __user *ptr, unsigned long bytecount)
{ {
int retval;
unsigned long size;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long entries_size;
int retval;
mutex_lock(&mm->context.lock); mutex_lock(&mm->context.lock);
...@@ -167,18 +167,18 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) ...@@ -167,18 +167,18 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
size = mm->context.ldt->size * LDT_ENTRY_SIZE; entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
if (size > bytecount) if (entries_size > bytecount)
size = bytecount; entries_size = bytecount;
if (copy_to_user(ptr, mm->context.ldt->entries, size)) { if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
retval = -EFAULT; retval = -EFAULT;
goto out_unlock; goto out_unlock;
} }
if (size != bytecount) { if (entries_size != bytecount) {
/* Zero-fill the rest and pretend we read bytecount bytes. */ /* Zero-fill the rest and pretend we read bytecount bytes. */
if (clear_user(ptr + size, bytecount - size)) { if (clear_user(ptr + entries_size, bytecount - entries_size)) {
retval = -EFAULT; retval = -EFAULT;
goto out_unlock; goto out_unlock;
} }
...@@ -209,7 +209,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -209,7 +209,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct ldt_struct *new_ldt, *old_ldt; struct ldt_struct *new_ldt, *old_ldt;
unsigned int oldsize, newsize; unsigned int old_nr_entries, new_nr_entries;
struct user_desc ldt_info; struct user_desc ldt_info;
struct desc_struct ldt; struct desc_struct ldt;
int error; int error;
...@@ -248,17 +248,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -248,17 +248,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
mutex_lock(&mm->context.lock); mutex_lock(&mm->context.lock);
old_ldt = mm->context.ldt; old_ldt = mm->context.ldt;
oldsize = old_ldt ? old_ldt->size : 0; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
newsize = max(ldt_info.entry_number + 1, oldsize); new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
error = -ENOMEM; error = -ENOMEM;
new_ldt = alloc_ldt_struct(newsize); new_ldt = alloc_ldt_struct(new_nr_entries);
if (!new_ldt) if (!new_ldt)
goto out_unlock; goto out_unlock;
if (old_ldt) if (old_ldt)
memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
new_ldt->entries[ldt_info.entry_number] = ldt; new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt); finalize_ldt_struct(new_ldt);
......
...@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task) ...@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task)
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm, dead_task->comm,
dead_task->mm->context.ldt->entries, dead_task->mm->context.ldt->entries,
dead_task->mm->context.ldt->size); dead_task->mm->context.ldt->nr_entries);
BUG(); BUG();
} }
#endif #endif
......
...@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re ...@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
mutex_lock(&child->mm->context.lock); mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt || if (unlikely(!child->mm->context.ldt ||
seg >= child->mm->context.ldt->size)) seg >= child->mm->context.ldt->nr_entries))
addr = -1L; /* bogus selector, access would fault */ addr = -1L; /* bogus selector, access would fault */
else { else {
desc = &child->mm->context.ldt->entries[seg]; desc = &child->mm->context.ldt->entries[seg];
......
...@@ -27,7 +27,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) ...@@ -27,7 +27,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
seg >>= 3; seg >>= 3;
mutex_lock(&current->mm->context.lock); mutex_lock(&current->mm->context.lock);
if (current->mm->context.ldt && seg < current->mm->context.ldt->size) if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
ret = current->mm->context.ldt->entries[seg]; ret = current->mm->context.ldt->entries[seg];
mutex_unlock(&current->mm->context.lock); mutex_unlock(&current->mm->context.lock);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment