Commit e5e3a042 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Andi Kleen

[PATCH] i386: remove default_ldt, and simplify ldt-setting.

This patch removes the default_ldt[] array, as it has been unused since
iBCS stopped being supported.  This means it is now possible to actually
set an empty LDT segment.

In order to deal with this, the set_ldt_desc/load_LDT pair has been
replaced with a single set_ldt() operation which is responsible for both
setting up the LDT descriptor in the GDT, and reloading the LDT register.
If there are no LDT entries, the LDT register is loaded with a NULL
descriptor.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Acked-by: default avatarZachary Amsden <zach@vmware.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
parent e2764a1e
...@@ -160,16 +160,14 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount) ...@@ -160,16 +160,14 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{ {
int err; int err;
unsigned long size; unsigned long size;
void *address;
err = 0; err = 0;
address = &default_ldt[0];
size = 5*sizeof(struct desc_struct); size = 5*sizeof(struct desc_struct);
if (size > bytecount) if (size > bytecount)
size = bytecount; size = bytecount;
err = size; err = size;
if (copy_to_user(ptr, address, size)) if (clear_user(ptr, size))
err = -EFAULT; err = -EFAULT;
return err; return err;
......
...@@ -61,9 +61,6 @@ int panic_on_unrecovered_nmi; ...@@ -61,9 +61,6 @@ int panic_on_unrecovered_nmi;
asmlinkage int system_call(void); asmlinkage int system_call(void);
struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
{ 0, 0 }, { 0, 0 } };
/* Do we ignore FPU interrupts ? */ /* Do we ignore FPU interrupts ? */
char ignore_fpu_irq = 0; char ignore_fpu_irq = 0;
......
...@@ -33,11 +33,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) ...@@ -33,11 +33,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
} }
/*
* This is the ldt that every process will get unless we need
* something other than this.
*/
extern struct desc_struct default_ldt[];
extern struct desc_struct idt_table[]; extern struct desc_struct idt_table[];
extern void set_intr_gate(unsigned int irq, void * addr); extern void set_intr_gate(unsigned int irq, void * addr);
...@@ -65,7 +60,6 @@ static inline void pack_gate(__u32 *a, __u32 *b, ...@@ -65,7 +60,6 @@ static inline void pack_gate(__u32 *a, __u32 *b,
#define DESCTYPE_S 0x10 /* !system */ #define DESCTYPE_S 0x10 /* !system */
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
...@@ -115,13 +109,20 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo ...@@ -115,13 +109,20 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
} }
static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) static inline void set_ldt(void *addr, unsigned int entries)
{ {
if (likely(entries == 0))
__asm__ __volatile__("lldt %w0"::"q" (0));
else {
unsigned cpu = smp_processor_id();
__u32 a, b; __u32 a, b;
pack_descriptor(&a, &b, (unsigned long)addr, pack_descriptor(&a, &b, (unsigned long)addr,
entries * sizeof(struct desc_struct) - 1, entries * sizeof(struct desc_struct) - 1,
DESCTYPE_LDT, 0); DESCTYPE_LDT, 0);
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
__asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
}
} }
#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
...@@ -153,35 +154,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri ...@@ -153,35 +154,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri
static inline void clear_LDT(void) static inline void clear_LDT(void)
{ {
int cpu = get_cpu(); set_ldt(NULL, 0);
set_ldt_desc(cpu, &default_ldt[0], 5);
load_LDT_desc();
put_cpu();
} }
/* /*
* load one particular LDT into the current CPU * load one particular LDT into the current CPU
*/ */
static inline void load_LDT_nolock(mm_context_t *pc, int cpu) static inline void load_LDT_nolock(mm_context_t *pc)
{ {
void *segments = pc->ldt; set_ldt(pc->ldt, pc->size);
int count = pc->size;
if (likely(!count)) {
segments = &default_ldt[0];
count = 5;
}
set_ldt_desc(cpu, segments, count);
load_LDT_desc();
} }
static inline void load_LDT(mm_context_t *pc) static inline void load_LDT(mm_context_t *pc)
{ {
int cpu = get_cpu(); preempt_disable();
load_LDT_nolock(pc, cpu); load_LDT_nolock(pc);
put_cpu(); preempt_enable();
} }
static inline unsigned long get_desc_base(unsigned long *desc) static inline unsigned long get_desc_base(unsigned long *desc)
......
...@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev,
* load the LDT, if the LDT is different: * load the LDT, if the LDT is different:
*/ */
if (unlikely(prev->context.ldt != next->context.ldt)) if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context, cpu); load_LDT_nolock(&next->context);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
...@@ -56,7 +56,7 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -56,7 +56,7 @@ static inline void switch_mm(struct mm_struct *prev,
* tlb flush IPI delivery. We must reload %cr3. * tlb flush IPI delivery. We must reload %cr3.
*/ */
load_cr3(next->pgd); load_cr3(next->pgd);
load_LDT_nolock(&next->context, cpu); load_LDT_nolock(&next->context);
} }
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment