Commit 34273f41 authored by H. Peter Anvin's avatar H. Peter Anvin

x86, espfix: Make it possible to disable 16-bit support

Embedded systems, which may be very memory-size-sensitive, are
extremely unlikely to ever encounter any 16-bit software, so make it
a CONFIG_EXPERT option to turn off support for any 16-bit software
whatsoever.
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
parent 197725de
...@@ -909,14 +909,27 @@ config VM86 ...@@ -909,14 +909,27 @@ config VM86
default y default y
depends on X86_32 depends on X86_32
---help--- ---help---
This option is required by programs like DOSEMU to run 16-bit legacy This option is required by programs like DOSEMU to run
code on X86 processors. It also may be needed by software like 16-bit real mode legacy code on x86 processors. It also may
XFree86 to initialize some video cards via BIOS. Disabling this be needed by software like XFree86 to initialize some video
option saves about 6k. cards via BIOS. Disabling this option saves about 6K.
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
default y
---help---
This option is required by programs like Wine to run 16-bit
protected mode legacy code on x86 processors. Disabling
this option saves about 300 bytes on i386, or around 6K text
plus 16K runtime memory on x86-64,
config X86_ESPFIX32
def_bool y
depends on X86_16BIT && X86_32
config X86_ESPFIX64 config X86_ESPFIX64
def_bool y def_bool y
depends on X86_64 depends on X86_16BIT && X86_64
config TOSHIBA config TOSHIBA
tristate "Toshiba Laptop support" tristate "Toshiba Laptop support"
......
...@@ -527,6 +527,7 @@ syscall_exit: ...@@ -527,6 +527,7 @@ syscall_exit:
restore_all: restore_all:
TRACE_IRQS_IRET TRACE_IRQS_IRET
restore_all_notrace: restore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel. # are returning to the kernel.
...@@ -537,6 +538,7 @@ restore_all_notrace: ...@@ -537,6 +538,7 @@ restore_all_notrace:
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
#endif
restore_nocheck: restore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code RESTORE_REGS 4 # skip orig_eax/error_code
irq_return: irq_return:
...@@ -549,6 +551,7 @@ ENTRY(iret_exc) ...@@ -549,6 +551,7 @@ ENTRY(iret_exc)
.previous .previous
_ASM_EXTABLE(irq_return,iret_exc) _ASM_EXTABLE(irq_return,iret_exc)
#ifdef CONFIG_X86_ESPFIX32
CFI_RESTORE_STATE CFI_RESTORE_STATE
ldt_ss: ldt_ss:
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
...@@ -592,6 +595,7 @@ ldt_ss: ...@@ -592,6 +595,7 @@ ldt_ss:
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck jmp restore_nocheck
#endif
CFI_ENDPROC CFI_ENDPROC
ENDPROC(system_call) ENDPROC(system_call)
...@@ -699,6 +703,7 @@ END(syscall_badsys) ...@@ -699,6 +703,7 @@ END(syscall_badsys)
* the high word of the segment base from the GDT and swiches to the * the high word of the segment base from the GDT and swiches to the
* normal stack and adjusts ESP with the matching offset. * normal stack and adjusts ESP with the matching offset.
*/ */
#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */ /* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
...@@ -708,8 +713,10 @@ END(syscall_badsys) ...@@ -708,8 +713,10 @@ END(syscall_badsys)
pushl_cfi %eax pushl_cfi %eax
lss (%esp), %esp /* switch to the normal stack segment */ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
#endif
.endm .endm
.macro UNWIND_ESPFIX_STACK .macro UNWIND_ESPFIX_STACK
#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax movl %ss, %eax
/* see if on espfix stack */ /* see if on espfix stack */
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
...@@ -720,6 +727,7 @@ END(syscall_badsys) ...@@ -720,6 +727,7 @@ END(syscall_badsys)
/* switch to normal stack */ /* switch to normal stack */
FIXUP_ESPFIX_STACK FIXUP_ESPFIX_STACK
27: 27:
#endif
.endm .endm
/* /*
...@@ -1350,11 +1358,13 @@ END(debug) ...@@ -1350,11 +1358,13 @@ END(debug)
ENTRY(nmi) ENTRY(nmi)
RING0_INT_FRAME RING0_INT_FRAME
ASM_CLAC ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax pushl_cfi %eax
movl %ss, %eax movl %ss, %eax
cmpw $__ESPFIX_SS, %ax cmpw $__ESPFIX_SS, %ax
popl_cfi %eax popl_cfi %eax
je nmi_espfix_stack je nmi_espfix_stack
#endif
cmpl $ia32_sysenter_target,(%esp) cmpl $ia32_sysenter_target,(%esp)
je nmi_stack_fixup je nmi_stack_fixup
pushl_cfi %eax pushl_cfi %eax
...@@ -1394,6 +1404,7 @@ nmi_debug_stack_check: ...@@ -1394,6 +1404,7 @@ nmi_debug_stack_check:
FIX_STACK 24, nmi_stack_correct, 1 FIX_STACK 24, nmi_stack_correct, 1
jmp nmi_stack_correct jmp nmi_stack_correct
#ifdef CONFIG_X86_ESPFIX32
nmi_espfix_stack: nmi_espfix_stack:
/* We have a RING0_INT_FRAME here. /* We have a RING0_INT_FRAME here.
* *
...@@ -1415,6 +1426,7 @@ nmi_espfix_stack: ...@@ -1415,6 +1426,7 @@ nmi_espfix_stack:
lss 12+4(%esp), %esp # back to espfix stack lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24 CFI_ADJUST_CFA_OFFSET -24
jmp irq_return jmp irq_return
#endif
CFI_ENDPROC CFI_ENDPROC
END(nmi) END(nmi)
......
...@@ -1045,8 +1045,10 @@ irq_return: ...@@ -1045,8 +1045,10 @@ irq_return:
* Are we returning to a stack segment from the LDT? Note: in * Are we returning to a stack segment from the LDT? Note: in
* 64-bit mode SS:RSP on the exception stack is always valid. * 64-bit mode SS:RSP on the exception stack is always valid.
*/ */
#ifdef CONFIG_X86_ESPFIX64
testb $4,(SS-RIP)(%rsp) testb $4,(SS-RIP)(%rsp)
jnz irq_return_ldt jnz irq_return_ldt
#endif
irq_return_iret: irq_return_iret:
INTERRUPT_RETURN INTERRUPT_RETURN
...@@ -1058,6 +1060,7 @@ ENTRY(native_iret) ...@@ -1058,6 +1060,7 @@ ENTRY(native_iret)
_ASM_EXTABLE(native_iret, bad_iret) _ASM_EXTABLE(native_iret, bad_iret)
#endif #endif
#ifdef CONFIG_X86_ESPFIX64
irq_return_ldt: irq_return_ldt:
pushq_cfi %rax pushq_cfi %rax
pushq_cfi %rdi pushq_cfi %rdi
...@@ -1081,6 +1084,7 @@ irq_return_ldt: ...@@ -1081,6 +1084,7 @@ irq_return_ldt:
movq %rax,%rsp movq %rax,%rsp
popq_cfi %rax popq_cfi %rax
jmp irq_return_iret jmp irq_return_iret
#endif
.section .fixup,"ax" .section .fixup,"ax"
bad_iret: bad_iret:
...@@ -1152,6 +1156,7 @@ END(common_interrupt) ...@@ -1152,6 +1156,7 @@ END(common_interrupt)
* modify the stack to make it look like we just entered * modify the stack to make it look like we just entered
* the #GP handler from user space, similar to bad_iret. * the #GP handler from user space, similar to bad_iret.
*/ */
#ifdef CONFIG_X86_ESPFIX64
ALIGN ALIGN
__do_double_fault: __do_double_fault:
XCPT_FRAME 1 RDI+8 XCPT_FRAME 1 RDI+8
...@@ -1177,6 +1182,9 @@ __do_double_fault: ...@@ -1177,6 +1182,9 @@ __do_double_fault:
retq retq
CFI_ENDPROC CFI_ENDPROC
END(__do_double_fault) END(__do_double_fault)
#else
# define __do_double_fault do_double_fault
#endif
/* /*
* End of kprobes section * End of kprobes section
......
...@@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ...@@ -229,6 +229,11 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
} }
} }
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
goto out_unlock;
}
fill_ldt(&ldt, &ldt_info); fill_ldt(&ldt, &ldt_info);
if (oldmode) if (oldmode)
ldt.avl = 0; ldt.avl = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment