Commit 2e7f5efb authored by Andrew Morton's avatar Andrew Morton Committed by Jens Axboe

[PATCH] much miscellany

- add locking comments to do_mmap_pgoff(), filemap.c

- used unsigned long for cpu flags in aio.c (Andi)

- An x86-64 typo fix from Andi.

- Fix a tpyo

- Fix an unused var warning in the stack overflow check code

- mptlan compile fix (Rasmus Andersen)

- Update misleading comment in ia32 highmem.c

- "attempting to mount an ext3 fs on a stopped md/raid1 array caused a
   divide by 0 error in ext3_fill_super.  Fix duplicates check already
   in ext2." - Angus Sawyer <angus.sawyer@dsl.pipex.com>

- Someone changed the return type of inl() again! Fix up compiler
  warnings in 3c59x.c again.
parent c4c95471
......@@ -328,20 +328,21 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
unsigned int status;
long esp;
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
__asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
extern void show_stack(unsigned long *);
printk("do_IRQ: stack overflow: %ld\n",
esp - sizeof(struct task_struct));
__asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
show_stack((void *)esp);
{
long esp;
__asm__ __volatile__("andl %%esp,%0" :
"=r" (esp) : "0" (8191));
if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
printk("do_IRQ: stack overflow: %ld\n",
esp - sizeof(struct task_struct));
dump_stack();
}
}
#endif
kstat.irqs[cpu][irq]++;
......
......@@ -19,10 +19,12 @@ void kunmap(struct page *page)
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic(struct page *page, enum km_type type)
{
......
......@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
#include <linux/tqueue.h>
#else
......
......@@ -1803,11 +1803,11 @@ static void vortex_tx_timeout(struct net_device *dev)
dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS));
EL3WINDOW(4);
printk(KERN_ERR " diagnostics: net %04x media %04x dma %08lx fifo %04x\n",
inw(ioaddr + Wn4_NetDiag),
inw(ioaddr + Wn4_Media),
inl(ioaddr + PktStatus),
inw(ioaddr + Wn4_FIFODiag));
printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
(unsigned)inw(ioaddr + Wn4_NetDiag),
(unsigned)inw(ioaddr + Wn4_Media),
(unsigned)inl(ioaddr + PktStatus),
(unsigned)inw(ioaddr + Wn4_FIFODiag));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
......@@ -2643,8 +2643,8 @@ dump_tx_ring(struct net_device *dev)
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8lx vs. %p.\n",
inl(ioaddr + DownListPtr),
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
(unsigned)inl(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
......
......@@ -608,7 +608,7 @@ void kick_iocb(struct kiocb *iocb)
}
if (!kiocbTryKick(iocb)) {
long flags;
unsigned long flags;
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&iocb->ki_run_list, &ctx->run_list);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
......
......@@ -986,6 +986,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto out_fail;
blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
if (!blocksize) {
printk(KERN_ERR "EXT3-fs: unable to set blocksize\n");
goto out_fail;
}
/*
* The ext3 superblock will not be buffer aligned for other than 1kB
......
......@@ -226,7 +226,7 @@ __writeback_single_inode(struct inode *inode, int sync,
* The inodes to be written are parked on sb->s_io. They are moved back onto
* sb->s_dirty as they are selected for writing. This way, none can be missed
* on the writer throttling path, and we get decent balancing between many
* throlttled threads: we don't want them all piling up on __wait_on_inode.
* throttled threads: we don't want them all piling up on __wait_on_inode.
*/
static void
sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
......
......@@ -258,7 +258,7 @@ static inline void clear_in_cr4 (unsigned long mask)
#define TASK_UNMAPPED_32 0x40000000
#define TASK_UNMAPPED_64 (TASK_SIZE/3)
#define TASK_UNMAPPED_BASE \
(test_thread_flags(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
/*
* Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
......
......@@ -61,6 +61,10 @@
* ->mapping->page_lock
* ->inode_lock
* ->sb_lock (fs/fs-writeback.c)
* ->page_table_lock
* ->swap_device_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->page_lock (try_to_unmap_one)
*/
/*
......
......@@ -399,6 +399,10 @@ static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
return 0;
}
/*
* The caller must hold down_write(current->mm->mmap_sem).
*/
unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment