Commit 683f27ab authored by Alan Cox's avatar Alan Cox Committed by Linus Torvalds

[PATCH] x86-64 typo fixes

(Steven Cole)
parent 55bf1ffd
...@@ -17,7 +17,7 @@ static __inline__ __const__ __u32 ___arch__swab32(__u32 x) ...@@ -17,7 +17,7 @@ static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
return x; return x;
} }
/* Do not define swab16. Gcc is smart enought to recognize "C" version and /* Do not define swab16. Gcc is smart enough to recognize "C" version and
convert it into rotation or exhange. */ convert it into rotation or exhange. */
#define __arch__swab32(x) ___arch__swab32(x) #define __arch__swab32(x) ___arch__swab32(x)
......
...@@ -66,7 +66,7 @@ extern void __this_fixmap_does_not_exist(void); ...@@ -66,7 +66,7 @@ extern void __this_fixmap_does_not_exist(void);
/* /*
* 'index to address' translation. If anyone tries to use the idx * 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference * directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too. * kernel oops. Illegal ranges of incoming indices are caught too.
*/ */
extern inline unsigned long fix_to_virt(const unsigned int idx) extern inline unsigned long fix_to_virt(const unsigned int idx)
......
/* K8 NUMA support */ /* K8 NUMA support */
/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ /* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
/* 2.5 Version losely based on the NUMAQ Code by Pat Gaughen. */ /* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
#ifndef _ASM_X86_64_MMZONE_H #ifndef _ASM_X86_64_MMZONE_H
#define _ASM_X86_64_MMZONE_H 1 #define _ASM_X86_64_MMZONE_H 1
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* This should be totally fair - if anything is waiting, a process that wants a * This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is * lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only * released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the * that will be woken up; if there's a bunch of consecutive readers at the
* front, then they'll all be woken up, but no other readers will be. * front, then they'll all be woken up, but no other readers will be.
*/ */
......
...@@ -40,7 +40,7 @@ struct save_context_frame { ...@@ -40,7 +40,7 @@ struct save_context_frame {
/* It would be more efficient to let the compiler clobber most of these registers. /* It would be more efficient to let the compiler clobber most of these registers.
Clobbering all is not possible because that lets reload freak out. Even just Clobbering all is not possible because that lets reload freak out. Even just
clobbering six generates wrong code with gcc 3.1 for me so do it this way for now. clobbering six generates wrong code with gcc 3.1 for me so do it this way for now.
rbp needs to be always explicitely saved because gcc cannot clobber the rbp needs to be always explicitly saved because gcc cannot clobber the
frame pointer and the scheduler is compiled with frame pointers. -AK */ frame pointer and the scheduler is compiled with frame pointers. -AK */
#define SAVE_CONTEXT \ #define SAVE_CONTEXT \
__PUSH(rsi) __PUSH(rdi) \ __PUSH(rsi) __PUSH(rdi) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment