Commit 837371c9 authored by Michael Hayes's avatar Michael Hayes Committed by Linus Torvalds

[PATCH] Spelling fixes: boundary

    boundry -> boundary
    boundries -> boundaries
parent 4acd5d44
......@@ -119,7 +119,7 @@ void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_
{
unsigned long start_page;
/* align start/end to page boundries */
/* align start/end to page boundaries */
memory_start = ((memory_start + (PAGE_SIZE-1)) & PAGE_MASK);
memory_end = memory_end & PAGE_MASK;
......
......@@ -12,7 +12,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
struct task_struct init_task = INIT_TASK(init_task);
/* .text section in head.S is aligned at 8k boundry and this gets linked
/* .text section in head.S is aligned at 8k boundary and this gets linked
* right after that so that the init_thread_union is aligned properly as well.
* If this is not aligned on a 8k boundry, then you should change code
* in etrap.S which assumes it.
......
......@@ -38,7 +38,7 @@
* and (2 * PAGE_SIZE) (for kernel stacks)
* and with a second arg of zero. We assume in
* all of these cases that the buffer is aligned
* on at least an 8 byte boundry.
* on at least an 8 byte boundary.
*
* Therefore we special case them to make them
* as fast as possible.
......
......@@ -336,7 +336,7 @@ C_LABEL(__csum_partial_copy_sparc_generic):
bne cc_dword_align ! yes, we check for short lengths there
andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?
3: be 3f ! nope, less than one loop remains
andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry?
andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary?
be ccdbl + 4 ! 8 byte aligned, kick ass
5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
......
......@@ -533,7 +533,7 @@ static inline void sun4c_init_ss2_cache_bug(void)
}
}
/* Addr is always aligned on a page boundry for us already. */
/* Addr is always aligned on a page boundary for us already. */
static void sun4c_map_dma_area(unsigned long va, u32 addr, int len)
{
unsigned long page, end;
......
......@@ -12,7 +12,7 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
/* .text section in head.S is aligned at 2 page boundry and this gets linked
/* .text section in head.S is aligned at 2 page boundary and this gets linked
* right after that so that the init_thread_union is aligned properly as well.
* We really don't need this special alignment like the Intel does, but
* I do it anyways for completeness.
......
......@@ -40,7 +40,7 @@ extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int
/* Two addresses are "virtually contiguous" if and only if:
* 1) They are equal, or...
* 2) They are both on a page boundry
* 2) They are both on a page boundary
*/
#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
(((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
......
......@@ -24,7 +24,7 @@
#include "iommu_common.h"
/* These should be allocated on an SMP_CACHE_BYTES
* aligned boundry for optimal performance.
* aligned boundary for optimal performance.
*
* On SYSIO, using an 8K page size we have 1GB of SBUS
* DMA space mapped. We divide this space into equally
......
......@@ -215,7 +215,7 @@ static ssize_t flash_write(struct file *file, const char *buf, size_t size, loff
temp = ((int) (p + count) >> 16) - nBlock + 1;
/*
* write ends at exactly 64k boundry?
* write ends at exactly 64k boundary?
*/
if (((int) (p + count) & 0xFFFF) == 0)
temp -= 1;
......
......@@ -1378,7 +1378,7 @@ static int __init ace_init(struct net_device *dev)
* On this platform, we know what the best dma settings
* are. We use 64-byte maximum bursts, because if we
* burst larger than the cache line size (or even cross
* a 64byte boundry in a single burst) the UltraSparc
* a 64byte boundary in a single burst) the UltraSparc
* PCI controller will disconnect at 64-byte multiples.
*
* Read-multiple will be properly enabled above, and when
......
......@@ -424,7 +424,7 @@ struct dma_desc {
};
/* There are only 12 bits in the DMA engine for descriptor offsetting
* so the table must be aligned on a boundry of this.
* so the table must be aligned on a boundary of this.
*/
#define DMA_TABLE_BYTES 4096
......
......@@ -499,7 +499,7 @@ u_char *addr;
fi->q.ptr_tachyon_header[i] = fi->q.ptr_tachyon_header_base + 16*i;
/* Allocate memory for indices.
* Indices should be aligned on 32 byte boundries.
* Indices should be aligned on 32 byte boundaries.
*/
fi->q.host_ocq_cons_indx = kmalloc(2*32, GFP_KERNEL);
if (fi->q.host_ocq_cons_indx == NULL){
......
......@@ -1198,7 +1198,7 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, unsigned long tr
* (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
*
* First our alloc_skb() routine aligns the data base to a 64 byte
* boundry. We now have 0xf001b040 as our skb data address. We
* boundary. We now have 0xf001b040 as our skb data address. We
* plug this into the receive descriptor address.
*
* Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
......
......@@ -298,7 +298,7 @@
#define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */
/* Happy Meal descriptor rings and such.
* All descriptor rings must be aligned on a 2K boundry.
* All descriptor rings must be aligned on a 2K boundary.
* All receive buffers must be 64 byte aligned.
* Always write the address first before setting the ownership
* bits to avoid races with the hardware scanning the ring.
......
......@@ -647,7 +647,7 @@ static void lance_piocopy_to_skb(struct sk_buff *skb, volatile void *piobuf, int
u8 *p8;
unsigned long pbuf = (unsigned long) piobuf;
/* We know here that both src and dest are on a 16bit boundry. */
/* We know here that both src and dest are on a 16bit boundary. */
*p16++ = sbus_readw(pbuf);
p32 = (u32 *) p16;
pbuf += 2;
......
......@@ -5941,7 +5941,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundry
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
* to match the cacheline size. The Broadcom driver have this
* workaround but turns MWI off all the times so never uses
* it. This seems to suggest that the workaround is insufficient.
......
......@@ -417,7 +417,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
/* Allocate MAC receive data buffers.
* MAC Rx buffer doesn't have to be on a 256 byte boundry.
* MAC Rx buffer doesn't have to be on a 256 byte boundary.
*/
tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
......@@ -438,7 +438,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
* To guarantee a minimum of 256 contigous memory to
* UM_Receive_Packet's lookahead pointer, before a page
* change or ring end is encountered, place each rx buffer on
* a 256 byte boundry.
* a 256 byte boundary.
*/
smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
......@@ -1331,7 +1331,7 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
mem_used += tp->tx_buff_size[BUG_QUEUE];
/* Allocate MAC receive data buffers.
* MAC receive buffers don't have to be on a 256 byte boundry.
* MAC receive buffers don't have to be on a 256 byte boundary.
*/
mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
......@@ -1348,7 +1348,7 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
*
* Make sure the mem_used offset at this point is the
* same as in allocate_shared memory or the following
* boundry adjustment will be incorrect (i.e. not allocating
* boundary adjustment will be incorrect (i.e. not allocating
* the non-mac receive buffers above cannot change the 256
* byte offset).
*
......@@ -3930,7 +3930,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
return (err);
}
/* Adapter RAM test. Incremental word ODD boundry data test. */
/* Adapter RAM test. Incremental word ODD boundary data test. */
static int smctr_ram_memory_test(struct net_device *dev)
{
struct net_local *tp = (struct net_local *)dev->priv;
......@@ -3947,7 +3947,7 @@ static int smctr_ram_memory_test(struct net_device *dev)
pages_of_ram = tp->ram_size / tp->ram_usable;
pword = tp->ram_access;
/* Incremental word ODD boundry test. */
/* Incremental word ODD boundary test. */
for(page = 0; (page < pages_of_ram) && (~err);
page++, start_pattern += 0x8000)
{
......
......@@ -1114,7 +1114,7 @@ sba_fill_pdir(
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
** "start of next" are both on a page boundary.
**
** (shift left is a quick trick to mask off upper bits)
*/
......
......@@ -776,7 +776,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
}
/* The request and response queues must each be aligned
* on a page boundry.
* on a page boundary.
*/
static int __init qpti_map_queues(struct qlogicpti *qpti)
{
......
......@@ -1112,7 +1112,7 @@ int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
* current allocation in place if the number of additional blocks
* can fit into a dmap, the last block of the current allocation
* is not the last block of the file system, and the start of the
* inplace extension is not on an allocation group boundry.
* inplace extension is not on an allocation group boundary.
*/
if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
(extblkno & (bmp->db_agsize - 1)) == 0) {
......
......@@ -45,7 +45,7 @@ extern void dvma_free(void *vaddr);
#define IOMMU_ENTRIES 120
/* empirical kludge -- dvma regions only seem to work right on 0x10000
byte boundries */
byte boundaries */
#define DVMA_REGION_SIZE 0x10000
#define DVMA_ALIGN(addr) (((addr)+DVMA_REGION_SIZE-1) & \
~(DVMA_REGION_SIZE-1))
......
......@@ -58,7 +58,7 @@ typedef unsigned long pgprot_t;
#endif
/* align addr on a size boundry - adjust address up if needed -- Cort */
/* align addr on a size boundary - adjust address up if needed -- Cort */
#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
/* to align the pointer to the (next) page boundary */
......
......@@ -114,11 +114,11 @@ static inline int get_order(unsigned long size)
#endif /* __ASSEMBLY__ */
/* align addr on a size boundry - adjust address up/down if needed */
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundry - adjust address up if needed */
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/* to align the pointer to the (next) double word boundary */
......
......@@ -1016,7 +1016,7 @@ static inline unsigned cs_get_dma_addr(struct cs_state *state)
}
/*
* ganularity is byte boundry, good part.
* ganularity is byte boundary, good part.
*/
if(dmabuf->enable & DAC_RUNNING)
{
......
......@@ -1916,8 +1916,8 @@ allocate_dmabuf(struct pci_dev *pci_dev, struct dmabuf *db)
* the amazingly complicated prog_dmabuf wants it.
*
* pci_alloc_sonsistent guarantees that it won't cross a natural
* boundry; the m3 hardware can't have dma cross a 64k bus
* address boundry.
* boundary; the m3 hardware can't have dma cross a 64k bus
* address boundary.
*/
for (order = 16-PAGE_SHIFT; order >= 1; order--) {
db->rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment