Commit 676b1855 authored by Simon Arlott's avatar Simon Arlott Committed by Adrian Bunk

spelling fixes: arch/x86_64/

Spelling fixes in arch/x86_64/.
Signed-off-by: default avatarSimon Arlott <simon@fire.lp0.eu>
Signed-off-by: default avatarAdrian Bunk <bunk@kernel.org>
parent 5b20311e
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
/* /*
* Getting to provable safe in place decompression is hard. * Getting to provable safe in place decompression is hard.
* Worst case behaviours need to be analized. * Worst case behaviours need to be analyzed.
* Background information: * Background information:
* *
* The file layout is: * The file layout is:
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
* Adding 32768 instead of 32767 just makes for round numbers. * Adding 32768 instead of 32767 just makes for round numbers.
* Adding the decompressor_size is necessary as it musht live after all * Adding the decompressor_size is necessary as it musht live after all
* of the data as well. Last I measured the decompressor is about 14K. * of the data as well. Last I measured the decompressor is about 14K.
* 10K of actuall data and 4K of bss. * 10K of actual data and 4K of bss.
* *
*/ */
......
...@@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck); ...@@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck);
/* /*
* *
* IRQ's that are handled by the PIC in the MPS IOAPIC case. * IRQs that are handled by the PIC in the MPS IOAPIC case.
* - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
* Linux doesn't really care, as it's not actually used * Linux doesn't really care, as it's not actually used
* for any interrupt handling anyway. * for any interrupt handling anyway.
...@@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq) ...@@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq)
} }
/* /*
* MSI mesage composition * MSI message composition
*/ */
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
......
...@@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) ...@@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
#ifdef CONFIG_X86_MCE_INTEL #ifdef CONFIG_X86_MCE_INTEL
/*** /***
* mce_log_therm_throt_event - Logs the thermal throttling event to mcelog * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
* @cpu: The CPU on which the event occured. * @cpu: The CPU on which the event occurred.
* @status: Event status information * @status: Event status information
* *
* This function should be called by the thermal interrupt after the * This function should be called by the thermal interrupt after the
...@@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str) ...@@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str)
return 1; return 1;
} }
/* mce=off disables machine check. Note you can reenable it later /* mce=off disables machine check. Note you can re-enable it later
using sysfs. using sysfs.
mce=TOLERANCELEVEL (number, see above) mce=TOLERANCELEVEL (number, see above)
mce=bootlog Log MCEs from before booting. Disabled by default on AMD. mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
......
...@@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs) ...@@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) { if (signr > 0) {
/* Reenable any watchpoints before delivering the /* Re-enable any watchpoints before delivering the
* signal to user space. The processor register will * signal to user space. The processor register will
* have been cleared if the watchpoint triggered * have been cleared if the watchpoint triggered
* inside the kernel. * inside the kernel.
......
...@@ -350,7 +350,7 @@ void __cpuinit start_secondary(void) ...@@ -350,7 +350,7 @@ void __cpuinit start_secondary(void)
/* /*
* We need to hold call_lock, so there is no inconsistency * We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of * between the time smp_call_function() determines number of
* IPI receipients, and the time when the determination is made * IPI recipients, and the time when the determination is made
* for which cpus receive the IPI in genapic_flat.c. Holding this * for which cpus receive the IPI in genapic_flat.c. Holding this
* lock helps us to not include this cpu in a currently in progress * lock helps us to not include this cpu in a currently in progress
* smp_call_function(). * smp_call_function().
......
...@@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, ...@@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
#define MSG(txt) ops->warning(data, txt) #define MSG(txt) ops->warning(data, txt)
/* /*
* x86-64 can have upto three kernel stacks: * x86-64 can have up to three kernel stacks:
* process stack * process stack
* interrupt stack * interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
/* /*
* vsyscall_gtod_data contains data that is : * vsyscall_gtod_data contains data that is :
* - readonly from vsyscalls * - readonly from vsyscalls
* - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
* Try to keep this structure as small as possible to avoid cache line ping pongs * Try to keep this structure as small as possible to avoid cache line ping pongs
*/ */
int __vgetcpu_mode __section_vgetcpu_mode; int __vgetcpu_mode __section_vgetcpu_mode;
......
...@@ -378,7 +378,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -378,7 +378,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
again: again:
/* When running in the kernel we expect faults to occur only to /* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the * addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunatly, in the case of an * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem * erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the * we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user * address space. Luckily the kernel only validly references user
...@@ -386,7 +386,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -386,7 +386,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table. * exceptions table.
* *
* As the vast majority of faults will be valid we will only perform * As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibilty of a deadlock. * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the * Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check, * source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock. * thus avoiding the deadlock.
......
...@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;} ...@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;}
/* /*
* Update nodes_add and decide if to include add are in the zone. * Update nodes_add and decide if to include add are in the zone.
* Both SPARSE and RESERVE need nodes_add infomation. * Both SPARSE and RESERVE need nodes_add infomation.
* This code supports one contigious hot add area per node. * This code supports one contiguous hot add area per node.
*/ */
static int reserve_hotadd(int node, unsigned long start, unsigned long end) static int reserve_hotadd(int node, unsigned long start, unsigned long end)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment