Commit 077fbc21 authored by Luca Barbieri's avatar Luca Barbieri Committed by Linus Torvalds

[PATCH] Use %ebp rather than %ebx for thread_info pointer

This patch changes assembly code that accesses thread_info to use %ebp
rather than %ebx.

This allows me to take advantage of the fact that %ebp is restored by
user mode in the sysenter register pop removal patch.

vm86() direct return code updated to match [ Linus ]
parent a0e6cf38
...@@ -145,16 +145,16 @@ ENTRY(lcall7) ...@@ -145,16 +145,16 @@ ENTRY(lcall7)
# gates, which has to be cleaned up later.. # gates, which has to be cleaned up later..
pushl %eax pushl %eax
SAVE_ALL SAVE_ALL
movl %esp, %ebx movl %esp, %ebp
pushl %ebx pushl %ebp
pushl $0x7 pushl $0x7
do_lcall: do_lcall:
movl EIP(%ebx), %eax # due to call gates, this is eflags, not eip.. movl EIP(%ebp), %eax # due to call gates, this is eflags, not eip..
movl CS(%ebx), %edx # this is eip.. movl CS(%ebp), %edx # this is eip..
movl EFLAGS(%ebx), %ecx # and this is cs.. movl EFLAGS(%ebp), %ecx # and this is cs..
movl %eax,EFLAGS(%ebx) # movl %eax,EFLAGS(%ebp) #
movl %edx,EIP(%ebx) # Now we move them to their "normal" places movl %edx,EIP(%ebp) # Now we move them to their "normal" places
movl %ecx,CS(%ebx) # movl %ecx,CS(%ebp) #
# #
# Call gates don't clear TF and NT in eflags like # Call gates don't clear TF and NT in eflags like
...@@ -166,8 +166,8 @@ do_lcall: ...@@ -166,8 +166,8 @@ do_lcall:
pushl %eax pushl %eax
popfl popfl
andl $-8192, %ebx # GET_THREAD_INFO andl $-8192, %ebp # GET_THREAD_INFO
movl TI_EXEC_DOMAIN(%ebx), %edx # Get the execution domain movl TI_EXEC_DOMAIN(%ebp), %edx # Get the execution domain
call *4(%edx) # Call the lcall7 handler for the domain call *4(%edx) # Call the lcall7 handler for the domain
addl $4, %esp addl $4, %esp
popl %eax popl %eax
...@@ -178,8 +178,8 @@ ENTRY(lcall27) ...@@ -178,8 +178,8 @@ ENTRY(lcall27)
# gates, which has to be cleaned up later.. # gates, which has to be cleaned up later..
pushl %eax pushl %eax
SAVE_ALL SAVE_ALL
movl %esp, %ebx movl %esp, %ebp
pushl %ebx pushl %ebp
pushl $0x27 pushl $0x27
jmp do_lcall jmp do_lcall
...@@ -187,7 +187,7 @@ ENTRY(lcall27) ...@@ -187,7 +187,7 @@ ENTRY(lcall27)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
# NOTE: this function takes a parameter but it's unused on x86. # NOTE: this function takes a parameter but it's unused on x86.
call schedule_tail call schedule_tail
GET_THREAD_INFO(%ebx) GET_THREAD_INFO(%ebp)
jmp syscall_exit jmp syscall_exit
/* /*
...@@ -202,7 +202,7 @@ ENTRY(ret_from_fork) ...@@ -202,7 +202,7 @@ ENTRY(ret_from_fork)
ret_from_exception: ret_from_exception:
preempt_stop preempt_stop
ret_from_intr: ret_from_intr:
GET_THREAD_INFO(%ebx) GET_THREAD_INFO(%ebp)
movl EFLAGS(%esp), %eax # mix EFLAGS and CS movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al movb CS(%esp), %al
testl $(VM_MASK | 3), %eax testl $(VM_MASK | 3), %eax
...@@ -211,7 +211,7 @@ ENTRY(resume_userspace) ...@@ -211,7 +211,7 @@ ENTRY(resume_userspace)
cli # make sure we don't miss an interrupt cli # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
movl TI_FLAGS(%ebx), %ecx movl TI_FLAGS(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return? # int/exception return?
jne work_pending jne work_pending
...@@ -219,18 +219,18 @@ ENTRY(resume_userspace) ...@@ -219,18 +219,18 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel) ENTRY(resume_kernel)
cmpl $0,TI_PRE_COUNT(%ebx) # non-zero preempt_count ? cmpl $0,TI_PRE_COUNT(%ebp) # non-zero preempt_count ?
jnz restore_all jnz restore_all
need_resched: need_resched:
movl TI_FLAGS(%ebx), %ecx # need_resched set ? movl TI_FLAGS(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz restore_all jz restore_all
testl $IF_MASK,EFLAGS(%esp) # interrupts off (execption path) ? testl $IF_MASK,EFLAGS(%esp) # interrupts off (execption path) ?
jz restore_all jz restore_all
movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebx) movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebp)
sti sti
call schedule call schedule
movl $0,TI_PRE_COUNT(%ebx) movl $0,TI_PRE_COUNT(%ebp)
cli cli
jmp need_resched jmp need_resched
#endif #endif
...@@ -262,16 +262,16 @@ ENTRY(sysenter_entry) ...@@ -262,16 +262,16 @@ ENTRY(sysenter_entry)
pushl %eax pushl %eax
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebx) GET_THREAD_INFO(%ebp)
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jae syscall_badsys jae syscall_badsys
testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx) testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp)
jnz syscall_trace_entry jnz syscall_trace_entry
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) movl %eax,EAX(%esp)
cli cli
movl TI_FLAGS(%ebx), %ecx movl TI_FLAGS(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work jne syscall_exit_work
RESTORE_INT_REGS RESTORE_INT_REGS
...@@ -286,11 +286,11 @@ ENTRY(sysenter_entry) ...@@ -286,11 +286,11 @@ ENTRY(sysenter_entry)
ENTRY(system_call) ENTRY(system_call)
pushl %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebx) GET_THREAD_INFO(%ebp)
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jae syscall_badsys jae syscall_badsys
# system call tracing in operation # system call tracing in operation
testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx) testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebp)
jnz syscall_trace_entry jnz syscall_trace_entry
syscall_call: syscall_call:
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
...@@ -299,7 +299,7 @@ syscall_exit: ...@@ -299,7 +299,7 @@ syscall_exit:
cli # make sure we don't miss an interrupt cli # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
movl TI_FLAGS(%ebx), %ecx movl TI_FLAGS(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work jne syscall_exit_work
restore_all: restore_all:
...@@ -315,7 +315,7 @@ work_resched: ...@@ -315,7 +315,7 @@ work_resched:
cli # make sure we don't miss an interrupt cli # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
movl TI_FLAGS(%ebx), %ecx movl TI_FLAGS(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing? # than syscall tracing?
jz restore_all jz restore_all
...@@ -370,7 +370,7 @@ syscall_exit_work: ...@@ -370,7 +370,7 @@ syscall_exit_work:
syscall_fault: syscall_fault:
pushl %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebx) GET_THREAD_INFO(%ebp)
movl $-EFAULT,EAX(%esp) movl $-EFAULT,EAX(%esp)
jmp resume_userspace jmp resume_userspace
......
...@@ -298,9 +298,10 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk ...@@ -298,9 +298,10 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
__asm__ __volatile__( __asm__ __volatile__(
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t" "movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
"jmp resume_userspace" "jmp resume_userspace"
: /* no outputs */ : /* no outputs */
:"r" (&info->regs), "b" (tsk->thread_info) : "ax"); :"r" (&info->regs), "r" (tsk->thread_info) : "ax");
/* we never return here */ /* we never return here */
} }
...@@ -311,8 +312,9 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) ...@@ -311,8 +312,9 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
regs32 = save_v86_state(regs16); regs32 = save_v86_state(regs16);
regs32->eax = retval; regs32->eax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t" __asm__ __volatile__("movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
"jmp resume_userspace" "jmp resume_userspace"
: : "r" (regs32), "b" (current_thread_info())); : : "r" (regs32), "r" (current_thread_info()));
} }
static inline void set_IF(struct kernel_vm86_regs * regs) static inline void set_IF(struct kernel_vm86_regs * regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment