Commit 7423cc0c authored by Adam Buchbinder's avatar Adam Buchbinder Committed by Vineet Gupta

ARC: Fix misspellings in comments.

Signed-off-by: default avatarAdam Buchbinder <adam.buchbinder@gmail.com>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent fc77dbd3
...@@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, ...@@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* Since xchg() doesn't always do that, it would seem that following defintion * Since xchg() doesn't always do that, it would seem that following defintion
* is incorrect. But here's the rationale: * is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK. * SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
* is natively "SMP safe", no serialization required). * is natively "SMP safe", no serialization required).
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg() * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
* could clobber them. atomic_xchg() itself would be 1 insn, so it * could clobber them. atomic_xchg() itself would be 1 insn, so it
......
...@@ -231,7 +231,7 @@ ...@@ -231,7 +231,7 @@
/* free up r9 as scratchpad */ /* free up r9 as scratchpad */
PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
/* Which mode (user/kernel) was the system in when intr occured */ /* Which mode (user/kernel) was the system in when intr occurred */
lr r9, [status32_l\LVL\()] lr r9, [status32_l\LVL\()]
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* - Utilise some unused free bits to confine PTE flags to 12 bits * - Utilise some unused free bits to confine PTE flags to 12 bits
* This is a must for 4k pg-sz * This is a must for 4k pg-sz
* *
* vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
* -TLB Locking never really existed, except for initial specs * -TLB Locking never really existed, except for initial specs
* -SILENT_xxx not needed for our port * -SILENT_xxx not needed for our port
* -Per my request, MMU V3 changes the layout of some of the bits * -Per my request, MMU V3 changes the layout of some of the bits
......
...@@ -232,7 +232,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) ...@@ -232,7 +232,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
} }
/* Another API expected by schedular, shows up in "ps" as Wait Channel /* Another API expected by schedular, shows up in "ps" as Wait Channel
* Ofcourse just returning schedule( ) would be pointless so unwind until * Of course just returning schedule( ) would be pointless so unwind until
* the function is not in schedular code * the function is not in schedular code
*/ */
unsigned int get_wchan(struct task_struct *tsk) unsigned int get_wchan(struct task_struct *tsk)
......
...@@ -55,8 +55,8 @@ ...@@ -55,8 +55,8 @@
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */ #define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */ #define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */ #define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */ #define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMER_MAX 0xFFFFFFFF #define ARC_TIMER_MAX 0xFFFFFFFF
......
...@@ -650,7 +650,7 @@ static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) ...@@ -650,7 +650,7 @@ static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
/* /*
* DMA ops for systems with both L1 and L2 caches, but without IOC * DMA ops for systems with both L1 and L2 caches, but without IOC
* Both L1 and L2 lines need to be explicity flushed/invalidated * Both L1 and L2 lines need to be explicitly flushed/invalidated
*/ */
static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
{ {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
/* /*
* HIGHMEM API: * HIGHMEM API:
* *
* kmap() API provides sleep semantics hence refered to as "permanent maps" * kmap() API provides sleep semantics hence referred to as "permanent maps"
* It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
* for book-keeping * for book-keeping
* *
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
* in interrupt-safe region. * in interrupt-safe region.
* *
* Vineetg: April 23rd Bug #93131 * Vineetg: April 23rd Bug #93131
* Problem: tlb_flush_kernel_range() doesnt do anything if the range to * Problem: tlb_flush_kernel_range() doesn't do anything if the range to
* flush is more than the size of TLB itself. * flush is more than the size of TLB itself.
* *
* Rahul Trivedi : Codito Technologies 2004 * Rahul Trivedi : Codito Technologies 2004
...@@ -167,7 +167,7 @@ static void utlb_invalidate(void) ...@@ -167,7 +167,7 @@ static void utlb_invalidate(void)
/* MMU v2 introduced the uTLB Flush command. /* MMU v2 introduced the uTLB Flush command.
* There was however an obscure hardware bug, where uTLB flush would * There was however an obscure hardware bug, where uTLB flush would
* fail when a prior probe for J-TLB (both totally unrelated) would * fail when a prior probe for J-TLB (both totally unrelated) would
* return lkup err - because the entry didnt exist in MMU. * return lkup err - because the entry didn't exist in MMU.
* The Workround was to set Index reg with some valid value, prior to * The Workround was to set Index reg with some valid value, prior to
* flush. This was fixed in MMU v3 hence not needed any more * flush. This was fixed in MMU v3 hence not needed any more
*/ */
...@@ -210,7 +210,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1) ...@@ -210,7 +210,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
/* /*
* Commit the Entry to MMU * Commit the Entry to MMU
* It doesnt sound safe to use the TLBWriteNI cmd here * It doesn't sound safe to use the TLBWriteNI cmd here
* which doesn't flush uTLBs. I'd rather be safe than sorry. * which doesn't flush uTLBs. I'd rather be safe than sorry.
*/ */
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
...@@ -636,7 +636,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -636,7 +636,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* support. * support.
* *
* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
* new bit "SZ" in TLB page desciptor to distinguish between them. * new bit "SZ" in TLB page descriptor to distinguish between them.
* Super Page size is configurable in hardware (4K to 16M), but fixed once * Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds. * RTL builds.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment