Commit a24ca997 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/iopl: Remove legacy IOPL option

The IOPL emulation via the I/O bitmap is sufficient. Remove the legacy
cruft dealing with the (e)flags based IOPL mechanism.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: Juergen Gross <jgross@suse.com> (Paravirt and Xen parts)
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
parent c8137ace
...@@ -1254,12 +1254,9 @@ config X86_VSYSCALL_EMULATION ...@@ -1254,12 +1254,9 @@ config X86_VSYSCALL_EMULATION
Disabling this option saves about 7K of kernel size and Disabling this option saves about 7K of kernel size and
possibly 4K of additional runtime pagetable memory. possibly 4K of additional runtime pagetable memory.
choice
prompt "IOPL"
default X86_IOPL_EMULATION
config X86_IOPL_EMULATION config X86_IOPL_EMULATION
bool "IOPL Emulation" bool "IOPL Emulation"
default y
---help--- ---help---
Legacy IOPL support is an overbroad mechanism which allows user Legacy IOPL support is an overbroad mechanism which allows user
space aside of accessing all 65536 I/O ports also to disable space aside of accessing all 65536 I/O ports also to disable
...@@ -1269,22 +1266,8 @@ config X86_IOPL_EMULATION ...@@ -1269,22 +1266,8 @@ config X86_IOPL_EMULATION
The emulation restricts the functionality of the syscall to The emulation restricts the functionality of the syscall to
only allowing the full range I/O port access, but prevents the only allowing the full range I/O port access, but prevents the
ability to disable interrupts from user space. ability to disable interrupts from user space which would be
granted if the hardware IOPL mechanism would be used.
config X86_IOPL_LEGACY
bool "IOPL Legacy"
---help---
Allow the full IOPL permissions, i.e. user space access to all
65536 I/O ports and also the ability to disable interrupts, which
is overbroad and can result in system lockups.
config X86_IOPL_NONE
bool "IOPL None"
---help---
Disable the IOPL permission syscall. That's the safest option as
no sane application should depend on this functionality.
endchoice
config TOSHIBA config TOSHIBA
tristate "Toshiba Laptop support" tristate "Toshiba Laptop support"
......
...@@ -294,10 +294,6 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) ...@@ -294,10 +294,6 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
{ {
PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
} }
static inline void set_iopl_mask(unsigned mask)
{
PVOP_VCALL1(cpu.set_iopl_mask, mask);
}
static inline void paravirt_activate_mm(struct mm_struct *prev, static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next) struct mm_struct *next)
......
...@@ -140,8 +140,6 @@ struct pv_cpu_ops { ...@@ -140,8 +140,6 @@ struct pv_cpu_ops {
void (*load_sp0)(unsigned long sp0); void (*load_sp0)(unsigned long sp0);
void (*set_iopl_mask)(unsigned mask);
void (*wbinvd)(void); void (*wbinvd)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */ /* cpuid emulation, mostly so that caps bits can be disabled */
......
...@@ -516,10 +516,10 @@ struct thread_struct { ...@@ -516,10 +516,10 @@ struct thread_struct {
struct io_bitmap *io_bitmap; struct io_bitmap *io_bitmap;
/* /*
* IOPL. Priviledge level dependent I/O permission which includes * IOPL. Priviledge level dependent I/O permission which is
* user space CLI/STI when granted. * emulated via the I/O bitmap to prevent user space from disabling
* interrupts.
*/ */
unsigned long iopl;
unsigned long iopl_emul; unsigned long iopl_emul;
mm_segment_t addr_limit; mm_segment_t addr_limit;
...@@ -552,25 +552,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, ...@@ -552,25 +552,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
*/ */
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
/*
* Set IOPL bits in EFLAGS from given mask
*/
static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
unsigned int reg;
asm volatile ("pushfl;"
"popl %0;"
"andl %1, %0;"
"orl %2, %0;"
"pushl %0;"
"popfl"
: "=&r" (reg)
: "i" (~X86_EFLAGS_IOPL), "r" (mask));
#endif
}
static inline void static inline void
native_load_sp0(unsigned long sp0) native_load_sp0(unsigned long sp0)
{ {
...@@ -610,7 +591,6 @@ static inline void load_sp0(unsigned long sp0) ...@@ -610,7 +591,6 @@ static inline void load_sp0(unsigned long sp0)
native_load_sp0(sp0); native_load_sp0(sp0);
} }
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_PARAVIRT_XXL */
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
......
...@@ -62,6 +62,4 @@ void xen_arch_register_cpu(int num); ...@@ -62,6 +62,4 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num); void xen_arch_unregister_cpu(int num);
#endif #endif
extern void xen_set_iopl_mask(unsigned mask);
#endif /* _ASM_X86_XEN_HYPERVISOR_H */ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
...@@ -153,28 +153,23 @@ SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) ...@@ -153,28 +153,23 @@ SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
/* /*
* The sys_iopl functionality depends on the level argument, which if * The sys_iopl functionality depends on the level argument, which if
* granted for the task is used by the CPU to check I/O instruction and * granted for the task is used to enable access to all 65536 I/O ports.
* CLI/STI against the current priviledge level (CPL). If CPL is less than
* or equal the tasks IOPL level the instructions take effect. If not a #GP
* is raised. The default IOPL is 0, i.e. no permissions.
* *
* Setting IOPL to level 0-2 is disabling the userspace access. Only level * This does not use the IOPL mechanism provided by the CPU as that would
* 3 enables it. If set it allows the user space thread: * also allow the user space task to use the CLI/STI instructions.
* *
* - Unrestricted access to all 65535 I/O ports * Disabling interrupts in a user space task is dangerous as it might lock
* - The usage of CLI/STI instructions * up the machine and the semantics vs. syscalls and exceptions is
* undefined.
* *
* The advantage over ioperm is that the context switch does not require to * Setting IOPL to level 0-2 is disabling I/O permissions. Level 3
* update the I/O bitmap which is especially true when a large number of * 3 enables them.
* ports is accessed. But the allowance of CLI/STI in userspace is
* considered a major problem.
* *
* IOPL is strictly per thread and inherited on fork. * IOPL is strictly per thread and inherited on fork.
*/ */
SYSCALL_DEFINE1(iopl, unsigned int, level) SYSCALL_DEFINE1(iopl, unsigned int, level)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
struct pt_regs *regs = current_pt_regs();
unsigned int old; unsigned int old;
/* /*
...@@ -187,10 +182,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) ...@@ -187,10 +182,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
if (level > 3) if (level > 3)
return -EINVAL; return -EINVAL;
if (IS_ENABLED(CONFIG_X86_IOPL_EMULATION)) old = t->iopl_emul;
old = t->iopl_emul;
else
old = t->iopl >> X86_EFLAGS_IOPL_BIT;
/* No point in going further if nothing changes */ /* No point in going further if nothing changes */
if (level == old) if (level == old)
...@@ -203,25 +195,8 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) ...@@ -203,25 +195,8 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
return -EPERM; return -EPERM;
} }
if (IS_ENABLED(CONFIG_X86_IOPL_EMULATION)) { t->iopl_emul = level;
t->iopl_emul = level; task_update_io_bitmap();
task_update_io_bitmap();
} else {
/*
* Change the flags value on the return stack, which has
* been set up on system-call entry. See also the fork and
* signal handling code how this is handled.
*/
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
(level << X86_EFLAGS_IOPL_BIT);
/* Store the new level in the thread struct */
t->iopl = level << X86_EFLAGS_IOPL_BIT;
/*
* X86_32 switches immediately and XEN handles it via
* emulation.
*/
set_iopl_mask(t->iopl);
}
return 0; return 0;
} }
...@@ -341,8 +341,6 @@ struct paravirt_patch_template pv_ops = { ...@@ -341,8 +341,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.iret = native_iret, .cpu.iret = native_iret,
.cpu.swapgs = native_swapgs, .cpu.swapgs = native_swapgs,
.cpu.set_iopl_mask = native_set_iopl_mask,
.cpu.start_context_switch = paravirt_nop, .cpu.start_context_switch = paravirt_nop,
.cpu.end_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop,
......
...@@ -187,15 +187,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -187,15 +187,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/ */
load_TLS(next, cpu); load_TLS(next, cpu);
/*
* Restore IOPL if needed. In normal use, the flags restore
* in the switch assembly will handle this. But if the kernel
* is running virtualized at a non-zero CPL, the popf will
* not restore flags, so it must be done in a separate step.
*/
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
set_iopl_mask(next->iopl);
switch_to_extra(prev_p, next_p); switch_to_extra(prev_p, next_p);
/* /*
......
...@@ -497,17 +497,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -497,17 +497,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_to_extra(prev_p, next_p); switch_to_extra(prev_p, next_p);
#ifdef CONFIG_XEN_PV
/*
* On Xen PV, IOPL bits in pt_regs->flags have no effect, and
* current_pt_regs()->flags may not match the current task's
* intended IOPL. We need to switch it manually.
*/
if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
prev->iopl != next->iopl))
xen_set_iopl_mask(next->iopl);
#endif
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/* /*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but * AMD CPUs have a misfeature: SYSRET sets the SS selector but
......
...@@ -837,15 +837,6 @@ static void xen_load_sp0(unsigned long sp0) ...@@ -837,15 +837,6 @@ static void xen_load_sp0(unsigned long sp0)
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
} }
void xen_set_iopl_mask(unsigned mask)
{
struct physdev_set_iopl set_iopl;
/* Force the change at ring 0. */
set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
}
static void xen_io_delay(void) static void xen_io_delay(void)
{ {
} }
...@@ -1055,7 +1046,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1055,7 +1046,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.write_idt_entry = xen_write_idt_entry, .write_idt_entry = xen_write_idt_entry,
.load_sp0 = xen_load_sp0, .load_sp0 = xen_load_sp0,
.set_iopl_mask = xen_set_iopl_mask,
.io_delay = xen_io_delay, .io_delay = xen_io_delay,
/* Xen takes care of %gs when switching to usermode for us */ /* Xen takes care of %gs when switching to usermode for us */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment