Commit 0e1e2d82 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_bar
parents b2a5f08a 09589177
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
static struct fs_struct init_fs = INIT_FS; static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES; static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
/* /*
......
...@@ -166,6 +166,22 @@ static void __init pci_fixup_via_northbridge_bug(struct pci_dev *d) ...@@ -166,6 +166,22 @@ static void __init pci_fixup_via_northbridge_bug(struct pci_dev *d)
} }
} }
/*
* For some reasons Intel decided that certain parts of their
* 815, 845 and some other chipsets must look like PCI-to-PCI bridges
* while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
* BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
* to Intel terminology. These devices do forward all addresses from
* system to PCI bus no matter what are their window settings, so they are
* "transparent" (or subtractive decoding) from programmers point of view.
*/
static void __init pci_fixup_transparent_bridge(struct pci_dev *dev)
{
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
(dev->device & 0xff00) == 0x2400)
dev->transparent = 1;
}
struct pci_fixup pcibios_fixups[] = { struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx },
...@@ -183,5 +199,6 @@ struct pci_fixup pcibios_fixups[] = { ...@@ -183,5 +199,6 @@ struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810 }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810 },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixup_transparent_bridge },
{ 0 } { 0 }
}; };
...@@ -102,7 +102,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -102,7 +102,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* !CONFIG_4xx */ #endif /* !CONFIG_4xx */
#endif /* CONFIG_XMON || CONFIG_KGDB */ #endif /* CONFIG_XMON || CONFIG_KGDB */
if (in_interrupt() || mm == NULL) { if (in_atomic() || mm == NULL) {
bad_page_fault(regs, address, SIGSEGV); bad_page_fault(regs, address, SIGSEGV);
return; return;
} }
......
...@@ -233,7 +233,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -233,7 +233,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user * If we're in an interrupt or have no user
* context, we must not take the fault.. * context, we must not take the fault..
*/ */
if (in_interrupt() || !mm) if (in_atomic() || !mm)
goto no_context; goto no_context;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
......
...@@ -210,8 +210,7 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) ...@@ -210,8 +210,7 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
goto fail; goto fail;
if (aops->prepare_write(file, page, offset, offset+size)) if (aops->prepare_write(file, page, offset, offset+size))
goto unlock; goto unlock;
kaddr = page_address(page); kaddr = kmap(page);
flush_dcache_page(page);
transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV); transfer_result = lo_do_transfer(lo, WRITE, kaddr + offset, data, size, IV);
if (transfer_result) { if (transfer_result) {
/* /*
...@@ -221,6 +220,8 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos) ...@@ -221,6 +220,8 @@ do_lo_send(struct loop_device *lo, struct bio_vec *bvec, int bsize, loff_t pos)
printk(KERN_ERR "loop: transfer error block %ld\n", index); printk(KERN_ERR "loop: transfer error block %ld\n", index);
memset(kaddr + offset, 0, size); memset(kaddr + offset, 0, size);
} }
flush_dcache_page(page);
kunmap(page);
if (aops->commit_write(file, page, offset, offset+size)) if (aops->commit_write(file, page, offset, offset+size))
goto unlock; goto unlock;
if (transfer_result) if (transfer_result)
......
...@@ -109,9 +109,11 @@ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */ ...@@ -109,9 +109,11 @@ int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */
static int ramdisk_readpage(struct file *file, struct page * page) static int ramdisk_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
unlock_page(page); unlock_page(page);
...@@ -121,9 +123,11 @@ static int ramdisk_readpage(struct file *file, struct page * page) ...@@ -121,9 +123,11 @@ static int ramdisk_readpage(struct file *file, struct page * page)
static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
void *addr = page_address(page); void *kaddr = kmap_atomic(page, KM_USER0);
memset(addr, 0, PAGE_CACHE_SIZE);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
SetPageDirty(page); SetPageDirty(page);
...@@ -178,8 +182,11 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, ...@@ -178,8 +182,11 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec,
err = 0; err = 0;
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
......
...@@ -128,6 +128,13 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) ...@@ -128,6 +128,13 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (!dev) /* It's a host bus, nothing to read */ if (!dev) /* It's a host bus, nothing to read */
return; return;
if (dev->transparent) {
printk("Transparent bridge - %s\n", dev->name);
for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++)
child->resource[i] = child->parent->resource[i];
return;
}
for(i=0; i<3; i++) for(i=0; i<3; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
...@@ -149,13 +156,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) ...@@ -149,13 +156,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
res->start = base; res->start = base;
res->end = limit + 0xfff; res->end = limit + 0xfff;
} else {
/*
* Ugh. We don't know enough about this bridge. Just assume
* that it's entirely transparent.
*/
printk(KERN_ERR "Unknown bridge resource %d: assuming transparent\n", 0);
child->resource[0] = child->parent->resource[0];
} }
res = child->resource[1]; res = child->resource[1];
...@@ -167,10 +167,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) ...@@ -167,10 +167,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
res->start = base; res->start = base;
res->end = limit + 0xfffff; res->end = limit + 0xfffff;
} else {
/* See comment above. Same thing */
printk(KERN_ERR "Unknown bridge resource %d: assuming transparent\n", 1);
child->resource[1] = child->parent->resource[1];
} }
res = child->resource[2]; res = child->resource[2];
...@@ -197,10 +193,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) ...@@ -197,10 +193,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
res->start = base; res->start = base;
res->end = limit + 0xfffff; res->end = limit + 0xfffff;
} else {
/* See comments above */
printk(KERN_ERR "Unknown bridge resource %d: assuming transparent\n", 2);
child->resource[2] = child->parent->resource[2];
} }
} }
...@@ -389,6 +381,10 @@ int pci_setup_device(struct pci_dev * dev) ...@@ -389,6 +381,10 @@ int pci_setup_device(struct pci_dev * dev)
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
if (class != PCI_CLASS_BRIDGE_PCI) if (class != PCI_CLASS_BRIDGE_PCI)
goto bad; goto bad;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
interface code of 0x01. */
dev->transparent = ((class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
break; break;
......
...@@ -471,6 +471,11 @@ static void __init quirk_dunord ( struct pci_dev * dev ) ...@@ -471,6 +471,11 @@ static void __init quirk_dunord ( struct pci_dev * dev )
r -> end = 0xffffff; r -> end = 0xffffff;
} }
static void __init quirk_transparent_bridge(struct pci_dev *dev)
{
dev->transparent = 1;
}
/* /*
* The main table of quirks. * The main table of quirks.
*/ */
...@@ -525,6 +530,13 @@ static struct pci_fixup pci_fixups[] __initdata = { ...@@ -525,6 +530,13 @@ static struct pci_fixup pci_fixups[] __initdata = {
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering },
/*
* i82380FB mobile docking controller: its PCI-to-PCI bridge
* is subtractive decoding (transparent), and does indicate this
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge },
{ 0 } { 0 }
}; };
......
...@@ -73,7 +73,7 @@ static int pci_assign_bus_resource(const struct pci_bus *bus, ...@@ -73,7 +73,7 @@ static int pci_assign_bus_resource(const struct pci_bus *bus,
int i; int i;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM; type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
for (i = 0 ; i < 4; i++) { for (i = 0 ; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i]; struct resource *r = bus->resource[i];
if (!r) if (!r)
continue; continue;
......
...@@ -1342,18 +1342,11 @@ int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *)) ...@@ -1342,18 +1342,11 @@ int isp2x00_queuecommand(Scsi_Cmnd * Cmnd, void (*done) (Scsi_Cmnd *))
num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr); num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
num_free = (num_free > 2) ? num_free - 2 : 0; num_free = (num_free > 2) ? num_free - 2 : 0;
host->can_queue = hostdata->queued + num_free; host->can_queue = host->host_busy + num_free;
if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN) if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN)
host->can_queue = QLOGICFC_REQ_QUEUE_LEN; host->can_queue = QLOGICFC_REQ_QUEUE_LEN;
host->sg_tablesize = QLOGICFC_MAX_SG(num_free); host->sg_tablesize = QLOGICFC_MAX_SG(num_free);
/* this is really gross */
if (host->can_queue <= host->host_busy){
if (host->can_queue+2 < host->host_busy)
DEBUG(printk("qlogicfc%d.c crosses its fingers.\n", hostdata->host_id));
host->can_queue = host->host_busy + 1;
}
LEAVE("isp2x00_queuecommand"); LEAVE("isp2x00_queuecommand");
return 0; return 0;
...@@ -1623,17 +1616,11 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs) ...@@ -1623,17 +1616,11 @@ void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr); num_free = QLOGICFC_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
num_free = (num_free > 2) ? num_free - 2 : 0; num_free = (num_free > 2) ? num_free - 2 : 0;
host->can_queue = hostdata->queued + num_free; host->can_queue = host->host_busy + num_free;
if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN) if (host->can_queue > QLOGICFC_REQ_QUEUE_LEN)
host->can_queue = QLOGICFC_REQ_QUEUE_LEN; host->can_queue = QLOGICFC_REQ_QUEUE_LEN;
host->sg_tablesize = QLOGICFC_MAX_SG(num_free); host->sg_tablesize = QLOGICFC_MAX_SG(num_free);
if (host->can_queue <= host->host_busy){
if (host->can_queue+2 < host->host_busy)
DEBUG(printk("qlogicfc%d : crosses its fingers.\n", hostdata->host_id));
host->can_queue = host->host_busy + 1;
}
outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR); outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
LEAVE_INTR("isp2x00_intr_handler"); LEAVE_INTR("isp2x00_intr_handler");
} }
......
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
#define DATASEGS_PER_COMMAND 2 #define DATASEGS_PER_COMMAND 2
#define DATASEGS_PER_CONT 5 #define DATASEGS_PER_CONT 5
#define QLOGICFC_REQ_QUEUE_LEN 127 /* must be power of two - 1 */ #define QLOGICFC_REQ_QUEUE_LEN 255 /* must be power of two - 1 */
#define QLOGICFC_MAX_SG(ql) (DATASEGS_PER_COMMAND + (((ql) > 0) ? DATASEGS_PER_CONT*((ql) - 1) : 0)) #define QLOGICFC_MAX_SG(ql) (DATASEGS_PER_COMMAND + (((ql) > 0) ? DATASEGS_PER_CONT*((ql) - 1) : 0))
#define QLOGICFC_CMD_PER_LUN 8 #define QLOGICFC_CMD_PER_LUN 8
......
...@@ -184,7 +184,7 @@ static struct Scsi_Device_Template st_template = { ...@@ -184,7 +184,7 @@ static struct Scsi_Device_Template st_template = {
static int st_compression(Scsi_Tape *, int); static int st_compression(Scsi_Tape *, int);
static int find_partition(Scsi_Tape *); static int find_partition(Scsi_Tape *);
static int update_partition(Scsi_Tape *); static int switch_partition(Scsi_Tape *);
static int st_int_ioctl(Scsi_Tape *, unsigned int, unsigned long); static int st_int_ioctl(Scsi_Tape *, unsigned int, unsigned long);
...@@ -1028,9 +1028,9 @@ static int st_flush(struct file *filp) ...@@ -1028,9 +1028,9 @@ static int st_flush(struct file *filp)
} }
if (STp->can_partitions && if (STp->can_partitions &&
(result2 = update_partition(STp)) < 0) { (result2 = switch_partition(STp)) < 0) {
DEBC(printk(ST_DEB_MSG DEBC(printk(ST_DEB_MSG
"st%d: update_partition at close failed.\n", dev)); "st%d: switch_partition at close failed.\n", dev));
if (result == 0) if (result == 0)
result = result2; result = result2;
goto out; goto out;
...@@ -1206,7 +1206,7 @@ static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t ...@@ -1206,7 +1206,7 @@ static ssize_t rw_checks(Scsi_Tape *STp, struct file *filp, size_t count, loff_t
} ) /* end DEB */ } ) /* end DEB */
if (STp->can_partitions && if (STp->can_partitions &&
(retval = update_partition(STp)) < 0) (retval = switch_partition(STp)) < 0)
goto out; goto out;
if (STp->block_size == 0 && STp->max_block > 0 && if (STp->block_size == 0 && STp->max_block > 0 &&
...@@ -2904,7 +2904,7 @@ static int find_partition(Scsi_Tape *STp) ...@@ -2904,7 +2904,7 @@ static int find_partition(Scsi_Tape *STp)
/* Change the partition if necessary */ /* Change the partition if necessary */
static int update_partition(Scsi_Tape *STp) static int switch_partition(Scsi_Tape *STp)
{ {
ST_partstat *STps; ST_partstat *STps;
...@@ -3239,7 +3239,7 @@ static int st_ioctl(struct inode *inode, struct file *file, ...@@ -3239,7 +3239,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
} }
if (STp->can_partitions && STp->ready == ST_READY && if (STp->can_partitions && STp->ready == ST_READY &&
(i = update_partition(STp)) < 0) { (i = switch_partition(STp)) < 0) {
retval = i; retval = i;
goto out; goto out;
} }
...@@ -3260,7 +3260,7 @@ static int st_ioctl(struct inode *inode, struct file *file, ...@@ -3260,7 +3260,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
goto out; goto out;
} }
if (STp->can_partitions && if (STp->can_partitions &&
(i = update_partition(STp)) < 0) { (i = switch_partition(STp)) < 0) {
retval = i; retval = i;
goto out; goto out;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/amigaffs.h> #include <linux/amigaffs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -518,6 +519,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign ...@@ -518,6 +519,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
if (from > to || to > PAGE_CACHE_SIZE) if (from > to || to > PAGE_CACHE_SIZE)
BUG(); BUG();
kmap(page);
data = page_address(page); data = page_address(page);
bsize = AFFS_SB(sb)->s_data_blksize; bsize = AFFS_SB(sb)->s_data_blksize;
tmp = (page->index << PAGE_CACHE_SHIFT) + from; tmp = (page->index << PAGE_CACHE_SHIFT) + from;
...@@ -537,6 +539,8 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign ...@@ -537,6 +539,8 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
from += tmp; from += tmp;
boff = 0; boff = 0;
} }
flush_dcache_page(page);
kunmap(page);
return 0; return 0;
} }
...@@ -656,7 +660,11 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned ...@@ -656,7 +660,11 @@ static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned
return err; return err;
} }
if (to < PAGE_CACHE_SIZE) { if (to < PAGE_CACHE_SIZE) {
memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
if (size > offset + to) { if (size > offset + to) {
if (size < offset + PAGE_CACHE_SIZE) if (size < offset + PAGE_CACHE_SIZE)
tmp = size & ~PAGE_CACHE_MASK; tmp = size & ~PAGE_CACHE_MASK;
......
...@@ -1784,6 +1784,7 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1784,6 +1784,7 @@ static int __block_write_full_page(struct inode *inode,
if (err == 0) if (err == 0)
return ret; return ret;
return err; return err;
recover: recover:
/* /*
* ENOSPC, or some other error. We may already have added some * ENOSPC, or some other error. We may already have added some
...@@ -1795,7 +1796,8 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1795,7 +1796,8 @@ static int __block_write_full_page(struct inode *inode,
bh = head; bh = head;
/* Recovery: lock and submit the mapped buffers */ /* Recovery: lock and submit the mapped buffers */
do { do {
if (buffer_mapped(bh)) { get_bh(bh);
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh); lock_buffer(bh);
mark_buffer_async_write(bh); mark_buffer_async_write(bh);
} else { } else {
...@@ -1805,21 +1807,21 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1805,21 +1807,21 @@ static int __block_write_full_page(struct inode *inode,
*/ */
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
} }
bh = bh->b_this_page; } while ((bh = bh->b_this_page) != head);
} while (bh != head); SetPageError(page);
BUG_ON(PageWriteback(page));
SetPageWriteback(page);
unlock_page(page);
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
set_buffer_uptodate(bh);
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
submit_bh(WRITE, bh); submit_bh(WRITE, bh);
nr_underway++; nr_underway++;
} }
put_bh(bh);
bh = next; bh = next;
} while (bh != head); } while (bh != head);
BUG_ON(PageWriteback(page));
SetPageWriteback(page);
unlock_page(page);
goto done; goto done;
} }
...@@ -1831,7 +1833,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1831,7 +1833,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
int err = 0; int err = 0;
unsigned blocksize, bbits; unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
char *kaddr = kmap(page);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(from > PAGE_CACHE_SIZE);
...@@ -1872,13 +1873,19 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1872,13 +1873,19 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
if (block_end > to) if (block_end > to || block_start < from) {
memset(kaddr+to, 0, block_end-to); void *kaddr;
if (block_start < from)
memset(kaddr+block_start, kaddr = kmap_atomic(page, KM_USER0);
0, from-block_start); if (block_end > to)
if (block_end > to || block_start < from) memset(kaddr+to, 0,
block_end-to);
if (block_start < from)
memset(kaddr+block_start,
0, from-block_start);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
continue; continue;
} }
} }
...@@ -1917,10 +1924,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1917,10 +1924,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (block_start >= to) if (block_start >= to)
break; break;
if (buffer_new(bh)) { if (buffer_new(bh)) {
void *kaddr;
clear_buffer_new(bh); clear_buffer_new(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
buffer_error(); buffer_error();
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+block_start, 0, bh->b_size); memset(kaddr+block_start, 0, bh->b_size);
kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
...@@ -2006,9 +2017,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block) ...@@ -2006,9 +2017,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError(page); SetPageError(page);
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
memset(kmap(page) + i*blocksize, 0, blocksize); void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
...@@ -2116,7 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2116,7 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
long status; long status;
unsigned zerofrom; unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits; unsigned blocksize = 1 << inode->i_blkbits;
char *kaddr; void *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM; status = -ENOMEM;
...@@ -2138,12 +2150,12 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2138,12 +2150,12 @@ int cont_prepare_write(struct page *page, unsigned offset,
PAGE_CACHE_SIZE, get_block); PAGE_CACHE_SIZE, get_block);
if (status) if (status)
goto out_unmap; goto out_unmap;
kaddr = page_address(new_page); kaddr = kmap_atomic(new_page, KM_USER0);
memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
flush_dcache_page(new_page); flush_dcache_page(new_page);
kunmap_atomic(kaddr, KM_USER0);
__block_commit_write(inode, new_page, __block_commit_write(inode, new_page,
zerofrom, PAGE_CACHE_SIZE); zerofrom, PAGE_CACHE_SIZE);
kunmap(new_page);
unlock_page(new_page); unlock_page(new_page);
page_cache_release(new_page); page_cache_release(new_page);
} }
...@@ -2168,21 +2180,20 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2168,21 +2180,20 @@ int cont_prepare_write(struct page *page, unsigned offset,
status = __block_prepare_write(inode, page, zerofrom, to, get_block); status = __block_prepare_write(inode, page, zerofrom, to, get_block);
if (status) if (status)
goto out1; goto out1;
kaddr = page_address(page);
if (zerofrom < offset) { if (zerofrom < offset) {
kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr+zerofrom, 0, offset-zerofrom); memset(kaddr+zerofrom, 0, offset-zerofrom);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
__block_commit_write(inode, page, zerofrom, offset); __block_commit_write(inode, page, zerofrom, offset);
} }
return 0; return 0;
out1: out1:
ClearPageUptodate(page); ClearPageUptodate(page);
kunmap(page);
return status; return status;
out_unmap: out_unmap:
ClearPageUptodate(new_page); ClearPageUptodate(new_page);
kunmap(new_page);
unlock_page(new_page); unlock_page(new_page);
page_cache_release(new_page); page_cache_release(new_page);
out: out:
...@@ -2194,10 +2205,8 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to, ...@@ -2194,10 +2205,8 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
int err = __block_prepare_write(inode, page, from, to, get_block); int err = __block_prepare_write(inode, page, from, to, get_block);
if (err) { if (err)
ClearPageUptodate(page); ClearPageUptodate(page);
kunmap(page);
}
return err; return err;
} }
...@@ -2205,7 +2214,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) ...@@ -2205,7 +2214,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
__block_commit_write(inode,page,from,to); __block_commit_write(inode,page,from,to);
kunmap(page);
return 0; return 0;
} }
...@@ -2215,7 +2223,6 @@ int generic_commit_write(struct file *file, struct page *page, ...@@ -2215,7 +2223,6 @@ int generic_commit_write(struct file *file, struct page *page,
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
__block_commit_write(inode,page,from,to); __block_commit_write(inode,page,from,to);
kunmap(page);
if (pos > inode->i_size) { if (pos > inode->i_size) {
inode->i_size = pos; inode->i_size = pos;
mark_inode_dirty(inode); mark_inode_dirty(inode);
...@@ -2232,6 +2239,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2232,6 +2239,7 @@ int block_truncate_page(struct address_space *mapping,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
void *kaddr;
int err; int err;
blocksize = 1 << inode->i_blkbits; blocksize = 1 << inode->i_blkbits;
...@@ -2284,9 +2292,10 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2284,9 +2292,10 @@ int block_truncate_page(struct address_space *mapping,
goto unlock; goto unlock;
} }
memset(kmap(page) + offset, 0, length); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
err = 0; err = 0;
...@@ -2306,7 +2315,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block) ...@@ -2306,7 +2315,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
struct inode * const inode = page->mapping->host; struct inode * const inode = page->mapping->host;
const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
unsigned offset; unsigned offset;
char *kaddr; void *kaddr;
/* Is the page fully inside i_size? */ /* Is the page fully inside i_size? */
if (page->index < end_index) if (page->index < end_index)
...@@ -2326,10 +2335,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block) ...@@ -2326,10 +2335,10 @@ int block_write_full_page(struct page *page, get_block_t *get_block)
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
kaddr = kmap(page); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
return __block_write_full_page(inode, page, get_block); return __block_write_full_page(inode, page, get_block);
} }
......
...@@ -59,9 +59,11 @@ static int mount_count = 0; ...@@ -59,9 +59,11 @@ static int mount_count = 0;
static int driverfs_readpage(struct file *file, struct page * page) static int driverfs_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
unlock_page(page); unlock_page(page);
...@@ -70,10 +72,12 @@ static int driverfs_readpage(struct file *file, struct page * page) ...@@ -70,10 +72,12 @@ static int driverfs_readpage(struct file *file, struct page * page)
static int driverfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int driverfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
void *addr = kmap(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(addr, 0, PAGE_CACHE_SIZE); void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
return 0; return 0;
...@@ -85,7 +89,6 @@ static int driverfs_commit_write(struct file *file, struct page *page, unsigned ...@@ -85,7 +89,6 @@ static int driverfs_commit_write(struct file *file, struct page *page, unsigned
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
set_page_dirty(page); set_page_dirty(page);
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
return 0; return 0;
......
...@@ -504,6 +504,8 @@ static inline int make_private_signals(void) ...@@ -504,6 +504,8 @@ static inline int make_private_signals(void)
{ {
struct signal_struct * newsig; struct signal_struct * newsig;
remove_thread_group(current, current->sig);
if (atomic_read(&current->sig->count) <= 1) if (atomic_read(&current->sig->count) <= 1)
return 0; return 0;
newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL); newsig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
...@@ -512,6 +514,8 @@ static inline int make_private_signals(void) ...@@ -512,6 +514,8 @@ static inline int make_private_signals(void)
spin_lock_init(&newsig->siglock); spin_lock_init(&newsig->siglock);
atomic_set(&newsig->count, 1); atomic_set(&newsig->count, 1);
memcpy(newsig->action, current->sig->action, sizeof(newsig->action)); memcpy(newsig->action, current->sig->action, sizeof(newsig->action));
init_sigpending(&newsig->shared_pending);
spin_lock_irq(&current->sigmask_lock); spin_lock_irq(&current->sigmask_lock);
current->sig = newsig; current->sig = newsig;
spin_unlock_irq(&current->sigmask_lock); spin_unlock_irq(&current->sigmask_lock);
...@@ -575,42 +579,10 @@ static inline void flush_old_files(struct files_struct * files) ...@@ -575,42 +579,10 @@ static inline void flush_old_files(struct files_struct * files)
*/ */
static void de_thread(struct task_struct *tsk) static void de_thread(struct task_struct *tsk)
{ {
struct task_struct *sub; if (!list_empty(&tsk->thread_group))
struct list_head *head, *ptr; BUG();
struct siginfo info; /* An exec() starts a new thread group: */
int pause; tsk->tgid = tsk->pid;
write_lock_irq(&tasklist_lock);
if (tsk->tgid != tsk->pid) {
/* subsidiary thread - just escapes the group */
list_del_init(&tsk->thread_group);
tsk->tgid = tsk->pid;
pause = 0;
}
else {
/* master thread - kill all subsidiary threads */
info.si_signo = SIGKILL;
info.si_errno = 0;
info.si_code = SI_DETHREAD;
info.si_pid = current->pid;
info.si_uid = current->uid;
head = tsk->thread_group.next;
list_del_init(&tsk->thread_group);
list_for_each(ptr,head) {
sub = list_entry(ptr,struct task_struct,thread_group);
send_sig_info(SIGKILL,&info,sub);
}
pause = 1;
}
write_unlock_irq(&tasklist_lock);
/* give the subsidiary threads a chance to clean themselves up */
if (pause) yield();
} }
int flush_old_exec(struct linux_binprm * bprm) int flush_old_exec(struct linux_binprm * bprm)
...@@ -633,6 +605,8 @@ int flush_old_exec(struct linux_binprm * bprm) ...@@ -633,6 +605,8 @@ int flush_old_exec(struct linux_binprm * bprm)
if (retval) goto mmap_failed; if (retval) goto mmap_failed;
/* This is the point of no return */ /* This is the point of no return */
de_thread(current);
release_old_signals(oldsig); release_old_signals(oldsig);
current->sas_ss_sp = current->sas_ss_size = 0; current->sas_ss_sp = current->sas_ss_size = 0;
...@@ -651,9 +625,6 @@ int flush_old_exec(struct linux_binprm * bprm) ...@@ -651,9 +625,6 @@ int flush_old_exec(struct linux_binprm * bprm)
flush_thread(); flush_thread();
if (!list_empty(&current->thread_group))
de_thread(current);
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid || if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
permission(bprm->file->f_dentry->d_inode,MAY_READ)) permission(bprm->file->f_dentry->d_inode,MAY_READ))
current->mm->dumpable = 0; current->mm->dumpable = 0;
......
...@@ -571,8 +571,8 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) ...@@ -571,8 +571,8 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
struct page *page = grab_cache_page(mapping, 0); struct page *page = grab_cache_page(mapping, 0);
unsigned chunk_size = ext2_chunk_size(inode); unsigned chunk_size = ext2_chunk_size(inode);
struct ext2_dir_entry_2 * de; struct ext2_dir_entry_2 * de;
char *base;
int err; int err;
void *kaddr;
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -581,22 +581,21 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) ...@@ -581,22 +581,21 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
unlock_page(page); unlock_page(page);
goto fail; goto fail;
} }
base = page_address(page); kaddr = kmap_atomic(page, KM_USER0);
de = (struct ext2_dir_entry_2 *)kaddr;
de = (struct ext2_dir_entry_2 *) base;
de->name_len = 1; de->name_len = 1;
de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1)); de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
memcpy (de->name, ".\0\0", 4); memcpy (de->name, ".\0\0", 4);
de->inode = cpu_to_le32(inode->i_ino); de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode); ext2_set_de_type (de, inode);
de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1)); de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
de->name_len = 2; de->name_len = 2;
de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1)); de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
de->inode = cpu_to_le32(parent->i_ino); de->inode = cpu_to_le32(parent->i_ino);
memcpy (de->name, "..\0", 4); memcpy (de->name, "..\0", 4);
ext2_set_de_type (de, inode); ext2_set_de_type (de, inode);
kunmap_atomic(kaddr, KM_USER0);
err = ext2_commit_chunk(page, 0, chunk_size); err = ext2_commit_chunk(page, 0, chunk_size);
fail: fail:
page_cache_release(page); page_cache_release(page);
......
...@@ -1082,16 +1082,6 @@ static int ext3_prepare_write(struct file *file, struct page *page, ...@@ -1082,16 +1082,6 @@ static int ext3_prepare_write(struct file *file, struct page *page,
if (ext3_should_journal_data(inode)) { if (ext3_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page), ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, do_journal_get_write_access); from, to, NULL, do_journal_get_write_access);
if (ret) {
/*
* We're going to fail this prepare_write(),
* so commit_write() will not be called.
* We need to undo block_prepare_write()'s kmap().
* AKPM: Do we need to clear PageUptodate? I don't
* think so.
*/
kunmap(page);
}
} }
prepare_write_failed: prepare_write_failed:
if (ret) if (ret)
...@@ -1151,7 +1141,6 @@ static int ext3_commit_write(struct file *file, struct page *page, ...@@ -1151,7 +1141,6 @@ static int ext3_commit_write(struct file *file, struct page *page,
from, to, &partial, commit_write_fn); from, to, &partial, commit_write_fn);
if (!partial) if (!partial)
SetPageUptodate(page); SetPageUptodate(page);
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
...@@ -1162,17 +1151,8 @@ static int ext3_commit_write(struct file *file, struct page *page, ...@@ -1162,17 +1151,8 @@ static int ext3_commit_write(struct file *file, struct page *page,
} }
/* Be careful here if generic_commit_write becomes a /* Be careful here if generic_commit_write becomes a
* required invocation after block_prepare_write. */ * required invocation after block_prepare_write. */
if (ret == 0) { if (ret == 0)
ret = generic_commit_write(file, page, from, to); ret = generic_commit_write(file, page, from, to);
} else {
/*
* block_prepare_write() was called, but we're not
* going to call generic_commit_write(). So we
* need to perform generic_commit_write()'s kunmap
* by hand.
*/
kunmap(page);
}
} }
if (inode->i_size > EXT3_I(inode)->i_disksize) { if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size; EXT3_I(inode)->i_disksize = inode->i_size;
...@@ -1535,6 +1515,7 @@ static int ext3_block_truncate_page(handle_t *handle, ...@@ -1535,6 +1515,7 @@ static int ext3_block_truncate_page(handle_t *handle,
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
void *kaddr;
blocksize = inode->i_sb->s_blocksize; blocksize = inode->i_sb->s_blocksize;
length = offset & (blocksize - 1); length = offset & (blocksize - 1);
...@@ -1590,10 +1571,11 @@ static int ext3_block_truncate_page(handle_t *handle, ...@@ -1590,10 +1571,11 @@ static int ext3_block_truncate_page(handle_t *handle,
if (err) if (err)
goto unlock; goto unlock;
} }
memset(kmap(page) + offset, 0, length); kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap_atomic(kaddr, KM_USER0);
BUFFER_TRACE(bh, "zeroed end of block"); BUFFER_TRACE(bh, "zeroed end of block");
......
...@@ -982,11 +982,24 @@ static int fat_readpage(struct file *file, struct page *page) ...@@ -982,11 +982,24 @@ static int fat_readpage(struct file *file, struct page *page)
{ {
return block_read_full_page(page,fat_get_block); return block_read_full_page(page,fat_get_block);
} }
static int fat_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
static int
fat_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{ {
kmap(page);
return cont_prepare_write(page,from,to,fat_get_block, return cont_prepare_write(page,from,to,fat_get_block,
&MSDOS_I(page->mapping->host)->mmu_private); &MSDOS_I(page->mapping->host)->mmu_private);
} }
static int
fat_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
kunmap(page);
return generic_commit_write(file, page, from, to);
}
static int _fat_bmap(struct address_space *mapping, long block) static int _fat_bmap(struct address_space *mapping, long block)
{ {
return generic_block_bmap(mapping,block,fat_get_block); return generic_block_bmap(mapping,block,fat_get_block);
...@@ -996,7 +1009,7 @@ static struct address_space_operations fat_aops = { ...@@ -996,7 +1009,7 @@ static struct address_space_operations fat_aops = {
writepage: fat_writepage, writepage: fat_writepage,
sync_page: block_sync_page, sync_page: block_sync_page,
prepare_write: fat_prepare_write, prepare_write: fat_prepare_write,
commit_write: generic_commit_write, commit_write: fat_commit_write,
bmap: _fat_bmap bmap: _fat_bmap
}; };
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -751,7 +752,6 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -751,7 +752,6 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
get_page(page); get_page(page);
/* Don't SetPageLocked(page), should be locked already */ /* Don't SetPageLocked(page), should be locked already */
buf = page_address(page);
ClearPageUptodate(page); ClearPageUptodate(page);
ClearPageError(page); ClearPageError(page);
...@@ -760,8 +760,10 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -760,8 +760,10 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
read_len = 0; read_len = 0;
result = 0; result = 0;
offset = page->index << PAGE_CACHE_SHIFT; offset = page->index << PAGE_CACHE_SHIFT;
kmap(page);
buf = page_address(page);
if (offset < inode->i_size) { if (offset < inode->i_size) {
read_len = min_t(long, inode->i_size - offset, PAGE_SIZE); read_len = min_t(long, inode->i_size - offset, PAGE_SIZE);
r = jffs_read_data(f, buf, offset, read_len); r = jffs_read_data(f, buf, offset, read_len);
...@@ -779,6 +781,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -779,6 +781,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
/* This handles the case of partial or no read in above */ /* This handles the case of partial or no read in above */
if(read_len < PAGE_SIZE) if(read_len < PAGE_SIZE)
memset(buf + read_len, 0, PAGE_SIZE - read_len); memset(buf + read_len, 0, PAGE_SIZE - read_len);
flush_dcache_page(page);
kunmap(page);
D3(printk (KERN_NOTICE "readpage(): up biglock\n")); D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
up(&c->fmc->biglock); up(&c->fmc->biglock);
...@@ -788,9 +792,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ...@@ -788,9 +792,8 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
}else { }else {
SetPageUptodate(page); SetPageUptodate(page);
} }
flush_dcache_page(page);
put_page(page); page_cache_release(page);
D3(printk("jffs_readpage(): Leaving...\n")); D3(printk("jffs_readpage(): Leaving...\n"));
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/jffs2.h> #include <linux/jffs2.h>
#include "nodelist.h" #include "nodelist.h"
...@@ -381,9 +382,10 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi ...@@ -381,9 +382,10 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi
ri->isize = (uint32_t)inode->i_size; ri->isize = (uint32_t)inode->i_size;
ri->atime = ri->ctime = ri->mtime = CURRENT_TIME; ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
/* We rely on the fact that generic_file_write() currently kmaps the page for us. */ kmap(pg);
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start, ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start,
(pg->index << PAGE_CACHE_SHIFT) + start, end - start, &writtenlen); (pg->index << PAGE_CACHE_SHIFT) + start, end - start, &writtenlen);
kunmap(pg);
if (ret) { if (ret) {
/* There was an error writing. */ /* There was an error writing. */
......
...@@ -403,7 +403,6 @@ static void __write_metapage(metapage_t * mp) ...@@ -403,7 +403,6 @@ static void __write_metapage(metapage_t * mp)
if (rc) { if (rc) {
jERROR(1, ("prepare_write return %d!\n", rc)); jERROR(1, ("prepare_write return %d!\n", rc));
ClearPageUptodate(mp->page); ClearPageUptodate(mp->page);
kunmap(mp->page);
unlock_page(mp->page); unlock_page(mp->page);
clear_bit(META_dirty, &mp->flag); clear_bit(META_dirty, &mp->flag);
return; return;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include "minix.h" #include "minix.h"
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
typedef struct minix_dir_entry minix_dirent; typedef struct minix_dir_entry minix_dirent;
...@@ -261,7 +262,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page) ...@@ -261,7 +262,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
struct inode *inode = (struct inode*)mapping->host; struct inode *inode = (struct inode*)mapping->host;
char *kaddr = (char*)page_address(page); char *kaddr = page_address(page);
unsigned from = (char*)de - kaddr; unsigned from = (char*)de - kaddr;
unsigned to = from + minix_sb(inode->i_sb)->s_dirsize; unsigned to = from + minix_sb(inode->i_sb)->s_dirsize;
int err; int err;
...@@ -286,7 +287,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) ...@@ -286,7 +287,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
struct page *page = grab_cache_page(mapping, 0); struct page *page = grab_cache_page(mapping, 0);
struct minix_sb_info * sbi = minix_sb(inode->i_sb); struct minix_sb_info * sbi = minix_sb(inode->i_sb);
struct minix_dir_entry * de; struct minix_dir_entry * de;
char *base; char *kaddr;
int err; int err;
if (!page) if (!page)
...@@ -297,15 +298,16 @@ int minix_make_empty(struct inode *inode, struct inode *dir) ...@@ -297,15 +298,16 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
goto fail; goto fail;
} }
base = (char*)page_address(page); kaddr = kmap_atomic(page, KM_USER0);
memset(base, 0, PAGE_CACHE_SIZE); memset(kaddr, 0, PAGE_CACHE_SIZE);
de = (struct minix_dir_entry *) base; de = (struct minix_dir_entry *)kaddr;
de->inode = inode->i_ino; de->inode = inode->i_ino;
strcpy(de->name,"."); strcpy(de->name,".");
de = minix_next_entry(de, sbi); de = minix_next_entry(de, sbi);
de->inode = dir->i_ino; de->inode = dir->i_ino;
strcpy(de->name,".."); strcpy(de->name,"..");
kunmap_atomic(kaddr, KM_USER0);
err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
fail: fail:
......
...@@ -2200,8 +2200,9 @@ int page_symlink(struct inode *inode, const char *symname, int len) ...@@ -2200,8 +2200,9 @@ int page_symlink(struct inode *inode, const char *symname, int len)
err = mapping->a_ops->prepare_write(NULL, page, 0, len-1); err = mapping->a_ops->prepare_write(NULL, page, 0, len-1);
if (err) if (err)
goto fail_map; goto fail_map;
kaddr = page_address(page); kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len-1); memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr, KM_USER0);
mapping->a_ops->commit_write(NULL, page, 0, len-1); mapping->a_ops->commit_write(NULL, page, 0, len-1);
/* /*
* Notice that we are _not_ going to block here - end of page is * Notice that we are _not_ going to block here - end of page is
......
...@@ -213,7 +213,6 @@ static void driverfs_remove_partitions(struct gendisk *hd) ...@@ -213,7 +213,6 @@ static void driverfs_remove_partitions(struct gendisk *hd)
static void check_partition(struct gendisk *hd, struct block_device *bdev) static void check_partition(struct gendisk *hd, struct block_device *bdev)
{ {
devfs_handle_t de = NULL; devfs_handle_t de = NULL;
dev_t dev = bdev->bd_dev;
char buf[64]; char buf[64];
struct parsed_partitions *state; struct parsed_partitions *state;
int i; int i;
...@@ -254,7 +253,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev) ...@@ -254,7 +253,7 @@ static void check_partition(struct gendisk *hd, struct block_device *bdev)
#if CONFIG_BLK_DEV_MD #if CONFIG_BLK_DEV_MD
if (!state->parts[j-1].flags) if (!state->parts[j-1].flags)
continue; continue;
md_autodetect_dev(dev+j); md_autodetect_dev(bdev->bd_dev+j);
#endif #endif
} }
return; return;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
...@@ -47,8 +48,10 @@ static struct inode_operations ramfs_dir_inode_operations; ...@@ -47,8 +48,10 @@ static struct inode_operations ramfs_dir_inode_operations;
static int ramfs_readpage(struct file *file, struct page * page) static int ramfs_readpage(struct file *file, struct page * page)
{ {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(kmap(page), 0, PAGE_CACHE_SIZE); char *kaddr = kmap_atomic(page, KM_USER0);
kunmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page); flush_dcache_page(page);
SetPageUptodate(page); SetPageUptodate(page);
} }
...@@ -58,10 +61,12 @@ static int ramfs_readpage(struct file *file, struct page * page) ...@@ -58,10 +61,12 @@ static int ramfs_readpage(struct file *file, struct page * page)
static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{ {
void *addr = kmap(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
memset(addr, 0, PAGE_CACHE_SIZE); char *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
} }
SetPageDirty(page); SetPageDirty(page);
...@@ -73,7 +78,6 @@ static int ramfs_commit_write(struct file *file, struct page *page, unsigned off ...@@ -73,7 +78,6 @@ static int ramfs_commit_write(struct file *file, struct page *page, unsigned off
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
kunmap(page);
if (pos > inode->i_size) if (pos > inode->i_size)
inode->i_size = pos; inode->i_size = pos;
return 0; return 0;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
...@@ -1692,8 +1693,6 @@ static int grab_tail_page(struct inode *p_s_inode, ...@@ -1692,8 +1693,6 @@ static int grab_tail_page(struct inode *p_s_inode,
if (error) if (error)
goto unlock ; goto unlock ;
kunmap(page) ; /* mapped by block_prepare_write */
head = page_buffers(page) ; head = page_buffers(page) ;
bh = head; bh = head;
do { do {
...@@ -1788,10 +1787,13 @@ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) { ...@@ -1788,10 +1787,13 @@ void reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps) {
length = offset & (blocksize - 1) ; length = offset & (blocksize - 1) ;
/* if we are not on a block boundary */ /* if we are not on a block boundary */
if (length) { if (length) {
char *kaddr;
length = blocksize - length ; length = blocksize - length ;
memset((char *)kmap(page) + offset, 0, length) ; kaddr = kmap_atomic(page, KM_USER0) ;
memset(kaddr + offset, 0, length) ;
flush_dcache_page(page) ; flush_dcache_page(page) ;
kunmap(page) ; kunmap_atomic(kaddr, KM_USER0) ;
if (buffer_mapped(bh) && bh->b_blocknr != 0) { if (buffer_mapped(bh) && bh->b_blocknr != 0) {
mark_buffer_dirty(bh) ; mark_buffer_dirty(bh) ;
} }
...@@ -1941,23 +1943,25 @@ static int reiserfs_write_full_page(struct page *page) { ...@@ -1941,23 +1943,25 @@ static int reiserfs_write_full_page(struct page *page) {
struct buffer_head *arr[PAGE_CACHE_SIZE/512] ; struct buffer_head *arr[PAGE_CACHE_SIZE/512] ;
int nr = 0 ; int nr = 0 ;
if (!page_has_buffers(page)) { if (!page_has_buffers(page))
block_prepare_write(page, 0, 0, NULL) ; block_prepare_write(page, 0, 0, NULL) ;
kunmap(page) ;
}
/* last page in the file, zero out any contents past the /* last page in the file, zero out any contents past the
** last byte in the file ** last byte in the file
*/ */
if (page->index >= end_index) { if (page->index >= end_index) {
char *kaddr;
last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ; last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1) ;
/* no file contents in this page */ /* no file contents in this page */
if (page->index >= end_index + 1 || !last_offset) { if (page->index >= end_index + 1 || !last_offset) {
error = -EIO ; error = -EIO ;
goto fail ; goto fail ;
} }
memset((char *)kmap(page)+last_offset, 0, PAGE_CACHE_SIZE-last_offset) ; kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE-last_offset) ;
flush_dcache_page(page) ; flush_dcache_page(page) ;
kunmap(page) ; kunmap_atomic(kaddr, KM_USER0) ;
} }
head = page_buffers(page) ; head = page_buffers(page) ;
bh = head ; bh = head ;
......
...@@ -1284,15 +1284,15 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th, ...@@ -1284,15 +1284,15 @@ int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
** **
** p_s_un_bh is from the page cache (all unformatted nodes are ** p_s_un_bh is from the page cache (all unformatted nodes are
** from the page cache) and might be a highmem page. So, we ** from the page cache) and might be a highmem page. So, we
** can't use p_s_un_bh->b_data. But, the page has already been ** can't use p_s_un_bh->b_data.
** kmapped, so we can use page_address()
** -clm ** -clm
*/ */
data = page_address(p_s_un_bh->b_page) ; data = kmap_atomic(p_s_un_bh->b_page, KM_USER0);
off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off, memcpy(data + off,
B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value); B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value);
kunmap_atomic(data, KM_USER0);
} }
/* Perform balancing after all resources have been collected at once. */ /* Perform balancing after all resources have been collected at once. */
......
...@@ -122,11 +122,12 @@ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inod ...@@ -122,11 +122,12 @@ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inod
} }
/* if we've copied bytes from disk into the page, we need to zero /* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before) ** out the unused part of the block (it was not up to date before)
** the page is still kmapped (by whoever called reiserfs_get_block)
*/ */
if (up_to_date_bh) { if (up_to_date_bh) {
unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
memset(page_address(unbh->b_page) + pgoff, 0, n_blk_size - total_tail) ; char *kaddr=kmap_atomic(up_to_date_bh->b_page, KM_USER0);
memset(kaddr + pgoff, 0, n_blk_size - total_tail) ;
kunmap_atomic(kaddr, KM_USER0);
} }
REISERFS_I(inode)->i_first_direct_byte = U32_MAX; REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include "sysv.h" #include "sysv.h"
...@@ -273,6 +274,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) ...@@ -273,6 +274,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
kmap(page);
err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE); err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * SYSV_DIRSIZE);
if (err) { if (err) {
unlock_page(page); unlock_page(page);
...@@ -291,6 +293,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) ...@@ -291,6 +293,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir)
err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
fail: fail:
kunmap(page);
page_cache_release(page); page_cache_release(page);
return err; return err;
} }
......
...@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -81,7 +81,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
enum fixed_addresses idx; enum fixed_addresses idx;
unsigned long vaddr; unsigned long vaddr;
preempt_disable(); inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -104,7 +104,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME if (vaddr < FIXADDR_START) { // FIXME
preempt_enable(); dec_preempt_count();
return; return;
} }
...@@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -119,7 +119,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
__flush_tlb_one(vaddr); __flush_tlb_one(vaddr);
#endif #endif
preempt_enable(); dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -158,6 +158,8 @@ typedef struct { ...@@ -158,6 +158,8 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
/* /*
* On x86, we implement read-write locks as a 32-bit counter * On x86, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit. * with the high bit (sign) being the "contended" bit.
......
...@@ -45,11 +45,21 @@ extern unsigned long pgkern_mask; ...@@ -45,11 +45,21 @@ extern unsigned long pgkern_mask;
__flush_tlb(); \ __flush_tlb(); \
} while (0) } while (0)
#ifndef CONFIG_X86_INVLPG #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#define __flush_tlb_one(addr) __flush_tlb()
#define __flush_tlb_single(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else #else
#define __flush_tlb_one(addr) \ # define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) do { \
if (cpu_has_invlpg) \
__flush_tlb_single(addr); \
else \
__flush_tlb(); \
} while (0)
#endif #endif
/* /*
......
...@@ -85,8 +85,10 @@ typedef struct { ...@@ -85,8 +85,10 @@ typedef struct {
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT #if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else #else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif #endif
#define irq_exit() \ #define irq_exit() \
......
...@@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -88,6 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned int idx; unsigned int idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -109,8 +110,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) // FIXME if (vaddr < KMAP_FIX_BEGIN) { // FIXME
dec_preempt_count();
return; return;
}
if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE) if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
BUG(); BUG();
...@@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -122,6 +125,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear(kmap_pte+idx); pte_clear(kmap_pte+idx);
flush_tlb_page(0, vaddr); flush_tlb_page(0, vaddr);
#endif #endif
dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -113,6 +113,12 @@ do { \ ...@@ -113,6 +113,12 @@ do { \
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK) #define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif #endif
#if CONFIG_PREEMPT
# define in_atomic() (preempt_count() != kernel_locked())
#else
# define in_atomic() (preempt_count() != 0)
#endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define synchronize_irq() barrier() #define synchronize_irq() barrier()
......
...@@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -83,6 +83,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long idx; unsigned long idx;
unsigned long vaddr; unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
...@@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -116,8 +117,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr; unsigned long vaddr = (unsigned long) kvaddr;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIX_KMAP_BEGIN) // FIXME if (vaddr < FIX_KMAP_BEGIN) { // FIXME
dec_preempt_count();
return; return;
}
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE) if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
BUG(); BUG();
...@@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -142,6 +145,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
flush_tlb_all(); flush_tlb_all();
#endif #endif
#endif #endif
dec_preempt_count();
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); } ...@@ -24,8 +24,8 @@ static inline void *kmap(struct page *page) { return page_address(page); }
#define kunmap(page) do { (void) (page); } while (0) #define kunmap(page) do { (void) (page); } while (0)
#define kmap_atomic(page,idx) kmap(page) #define kmap_atomic(page, idx) page_address(page)
#define kunmap_atomic(page,idx) kunmap(page) #define kunmap_atomic(addr, idx) do { } while (0)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
......
...@@ -29,10 +29,11 @@ ...@@ -29,10 +29,11 @@
.mmlist = LIST_HEAD_INIT(name.mmlist), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \
} }
#define INIT_SIGNALS { \ #define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \ .count = ATOMIC_INIT(1), \
.action = { {{0,}}, }, \ .action = { {{0,}}, }, \
.siglock = SPIN_LOCK_UNLOCKED \ .siglock = SPIN_LOCK_UNLOCKED, \
.shared_pending = { NULL, &sig.shared_pending.head, {{0}}}, \
} }
/* /*
......
...@@ -386,6 +386,9 @@ struct pci_dev { ...@@ -386,6 +386,9 @@ struct pci_dev {
int ro; /* ISAPnP: read only */ int ro; /* ISAPnP: read only */
unsigned short regs; /* ISAPnP: supported registers */ unsigned short regs; /* ISAPnP: supported registers */
/* These fields are used by common fixups */
unsigned short transparent:1; /* Transparent PCI bridge */
int (*prepare)(struct pci_dev *dev); /* ISAPnP hooks */ int (*prepare)(struct pci_dev *dev); /* ISAPnP hooks */
int (*activate)(struct pci_dev *dev); int (*activate)(struct pci_dev *dev);
int (*deactivate)(struct pci_dev *dev); int (*deactivate)(struct pci_dev *dev);
...@@ -406,6 +409,10 @@ struct pci_dev { ...@@ -406,6 +409,10 @@ struct pci_dev {
#define PCI_ROM_RESOURCE 6 #define PCI_ROM_RESOURCE 6
#define PCI_BRIDGE_RESOURCES 7 #define PCI_BRIDGE_RESOURCES 7
#define PCI_NUM_RESOURCES 11 #define PCI_NUM_RESOURCES 11
#ifndef PCI_BUS_NUM_RESOURCES
#define PCI_BUS_NUM_RESOURCES 4
#endif
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
...@@ -415,7 +422,8 @@ struct pci_bus { ...@@ -415,7 +422,8 @@ struct pci_bus {
struct list_head children; /* list of child buses */ struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */ struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */ struct pci_dev *self; /* bridge device as seen by parent */
struct resource *resource[4]; /* address space routed to this bus */ struct resource *resource[PCI_BUS_NUM_RESOURCES];
/* address space routed to this bus */
struct pci_ops *ops; /* configuration access functions */ struct pci_ops *ops; /* configuration access functions */
void *sysdata; /* hook for sys-specific extension */ void *sysdata; /* hook for sys-specific extension */
......
#ifndef __LINUX_PREEMPT_H #ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/config.h> #include <linux/config.h>
#define preempt_count() (current_thread_info()->preempt_count) #define preempt_count() (current_thread_info()->preempt_count)
#define inc_preempt_count() \ #define inc_preempt_count() \
do { \ do { \
...@@ -31,36 +36,25 @@ do { \ ...@@ -31,36 +36,25 @@ do { \
barrier(); \ barrier(); \
} while (0) } while (0)
#define preempt_enable() \ #define preempt_check_resched() \
do { \ do { \
preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \ preempt_schedule(); \
} while (0) } while (0)
#define preempt_check_resched() \ #define preempt_enable() \
do { \ do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_enable_no_resched(); \
preempt_schedule(); \ preempt_check_resched(); \
} while (0) } while (0)
#define inc_preempt_count_non_preempt() do { } while (0)
#define dec_preempt_count_non_preempt() do { } while (0)
#else #else
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0) #define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0) #define preempt_check_resched() do { } while (0)
/*
* Sometimes we want to increment the preempt count, but we know that it's
* already incremented if the kernel is compiled for preemptibility.
*/
#define inc_preempt_count_non_preempt() inc_preempt_count()
#define dec_preempt_count_non_preempt() dec_preempt_count()
#endif #endif
#endif /* __LINUX_PREEMPT_H */ #endif /* __LINUX_PREEMPT_H */
...@@ -211,6 +211,11 @@ struct signal_struct { ...@@ -211,6 +211,11 @@ struct signal_struct {
atomic_t count; atomic_t count;
struct k_sigaction action[_NSIG]; struct k_sigaction action[_NSIG];
spinlock_t siglock; spinlock_t siglock;
/* current thread group signal load-balancing target: */
task_t *curr_target;
struct sigpending shared_pending;
}; };
/* /*
...@@ -356,7 +361,7 @@ struct task_struct { ...@@ -356,7 +361,7 @@ struct task_struct {
spinlock_t sigmask_lock; /* Protects signal and blocked */ spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig; struct signal_struct *sig;
sigset_t blocked; sigset_t blocked, real_blocked, shared_unblocked;
struct sigpending pending; struct sigpending pending;
unsigned long sas_ss_sp; unsigned long sas_ss_sp;
...@@ -431,6 +436,7 @@ extern void set_cpus_allowed(task_t *p, unsigned long new_mask); ...@@ -431,6 +436,7 @@ extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
extern void set_user_nice(task_t *p, long nice); extern void set_user_nice(task_t *p, long nice);
extern int task_prio(task_t *p); extern int task_prio(task_t *p);
extern int task_nice(task_t *p); extern int task_nice(task_t *p);
extern int task_curr(task_t *p);
extern int idle_cpu(int cpu); extern int idle_cpu(int cpu);
void yield(void); void yield(void);
...@@ -535,7 +541,7 @@ extern void proc_caches_init(void); ...@@ -535,7 +541,7 @@ extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *); extern void flush_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *); extern void flush_signal_handlers(struct task_struct *);
extern void sig_exit(int, int, struct siginfo *); extern void sig_exit(int, int, struct siginfo *);
extern int dequeue_signal(sigset_t *, siginfo_t *); extern int dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info);
extern void block_all_signals(int (*notifier)(void *priv), void *priv, extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask); sigset_t *mask);
extern void unblock_all_signals(void); extern void unblock_all_signals(void);
...@@ -654,6 +660,7 @@ extern void exit_thread(void); ...@@ -654,6 +660,7 @@ extern void exit_thread(void);
extern void exit_mm(struct task_struct *); extern void exit_mm(struct task_struct *);
extern void exit_files(struct task_struct *); extern void exit_files(struct task_struct *);
extern void exit_sighand(struct task_struct *); extern void exit_sighand(struct task_struct *);
extern void remove_thread_group(struct task_struct *tsk, struct signal_struct *sig);
extern void reparent_to_init(void); extern void reparent_to_init(void);
extern void daemonize(void); extern void daemonize(void);
...@@ -786,8 +793,29 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) ...@@ -786,8 +793,29 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
#define for_each_thread(task) \ #define for_each_thread(task) \
for (task = next_thread(current) ; task != current ; task = next_thread(task)) for (task = next_thread(current) ; task != current ; task = next_thread(task))
#define next_thread(p) \ static inline task_t *next_thread(task_t *p)
list_entry((p)->thread_group.next, struct task_struct, thread_group) {
if (!p->sig)
BUG();
#if CONFIG_SMP
if (!spin_is_locked(&p->sig->siglock) &&
!rwlock_is_locked(&tasklist_lock))
BUG();
#endif
return list_entry((p)->thread_group.next, task_t, thread_group);
}
static inline task_t *prev_thread(task_t *p)
{
if (!p->sig)
BUG();
#if CONFIG_SMP
if (!spin_is_locked(&p->sig->siglock) &&
!rwlock_is_locked(&tasklist_lock))
BUG();
#endif
return list_entry((p)->thread_group.prev, task_t, thread_group);
}
#define thread_group_leader(p) (p->pid == p->tgid) #define thread_group_leader(p) (p->pid == p->tgid)
...@@ -903,21 +931,8 @@ static inline void cond_resched(void) ...@@ -903,21 +931,8 @@ static inline void cond_resched(void)
This is required every time the blocked sigset_t changes. This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */ Athread cathreaders should have t->sigmask_lock. */
static inline void recalc_sigpending_tsk(struct task_struct *t) extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
{ extern void recalc_sigpending(void);
if (has_pending_signals(&t->pending.signal, &t->blocked))
set_tsk_thread_flag(t, TIF_SIGPENDING);
else
clear_tsk_thread_flag(t, TIF_SIGPENDING);
}
static inline void recalc_sigpending(void)
{
if (has_pending_signals(&current->pending.signal, &current->blocked))
set_thread_flag(TIF_SIGPENDING);
else
clear_thread_flag(TIF_SIGPENDING);
}
/* /*
* Wrappers for p->thread_info->cpu access. No-op on UP. * Wrappers for p->thread_info->cpu access. No-op on UP.
......
...@@ -28,6 +28,7 @@ extern asmlinkage long sys_mount(char *dev_name, char *dir_name, char *type, ...@@ -28,6 +28,7 @@ extern asmlinkage long sys_mount(char *dev_name, char *dir_name, char *type,
unsigned long flags, void *data); unsigned long flags, void *data);
extern asmlinkage long sys_mkdir(const char *name, int mode); extern asmlinkage long sys_mkdir(const char *name, int mode);
extern asmlinkage long sys_chdir(const char *name); extern asmlinkage long sys_chdir(const char *name);
extern asmlinkage long sys_fchdir(int fd);
extern asmlinkage long sys_chroot(const char *name); extern asmlinkage long sys_chroot(const char *name);
extern asmlinkage long sys_unlink(const char *name); extern asmlinkage long sys_unlink(const char *name);
extern asmlinkage long sys_symlink(const char *old, const char *new); extern asmlinkage long sys_symlink(const char *old, const char *new);
...@@ -730,17 +731,13 @@ static void __init mount_root(void) ...@@ -730,17 +731,13 @@ static void __init mount_root(void)
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
static int old_fd, root_fd;
static int do_linuxrc(void * shell) static int do_linuxrc(void * shell)
{ {
static char *argv[] = { "linuxrc", NULL, }; static char *argv[] = { "linuxrc", NULL, };
extern char * envp_init[]; extern char * envp_init[];
sys_chdir("/root"); close(old_fd);close(root_fd);
sys_mount(".", "/", NULL, MS_MOVE, NULL);
sys_chroot(".");
mount_devfs_fs ();
close(0);close(1);close(2); close(0);close(1);close(2);
setsid(); setsid();
(void) open("/dev/console",O_RDWR,0); (void) open("/dev/console",O_RDWR,0);
...@@ -758,9 +755,16 @@ static void __init handle_initrd(void) ...@@ -758,9 +755,16 @@ static void __init handle_initrd(void)
int i, pid; int i, pid;
create_dev("/dev/root.old", Root_RAM0, NULL); create_dev("/dev/root.old", Root_RAM0, NULL);
/* mount initrd on rootfs' /root */
mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
sys_mkdir("/old", 0700); sys_mkdir("/old", 0700);
sys_chdir("/old"); root_fd = open("/", 0, 0);
old_fd = open("/old", 0, 0);
/* move initrd over / and chdir/chroot in initrd root */
sys_chdir("/root");
sys_mount(".", "/", NULL, MS_MOVE, NULL);
sys_chroot(".");
mount_devfs_fs ();
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
if (pid > 0) { if (pid > 0) {
...@@ -768,7 +772,14 @@ static void __init handle_initrd(void) ...@@ -768,7 +772,14 @@ static void __init handle_initrd(void)
yield(); yield();
} }
sys_mount("..", ".", NULL, MS_MOVE, NULL); /* move initrd to rootfs' /old */
sys_fchdir(old_fd);
sys_mount("/", ".", NULL, MS_MOVE, NULL);
/* switch root and cwd back to / of rootfs */
sys_fchdir(root_fd);
sys_chroot(".");
close(old_fd);
close(root_fd);
sys_umount("/old/dev", 0); sys_umount("/old/dev", 0);
if (real_root_dev == Root_RAM0) { if (real_root_dev == Root_RAM0) {
......
...@@ -36,7 +36,6 @@ static inline void __unhash_process(struct task_struct *p) ...@@ -36,7 +36,6 @@ static inline void __unhash_process(struct task_struct *p)
nr_threads--; nr_threads--;
unhash_pid(p); unhash_pid(p);
REMOVE_LINKS(p); REMOVE_LINKS(p);
list_del(&p->thread_group);
p->pid = 0; p->pid = 0;
proc_dentry = p->proc_dentry; proc_dentry = p->proc_dentry;
if (unlikely(proc_dentry != NULL)) { if (unlikely(proc_dentry != NULL)) {
...@@ -73,6 +72,7 @@ static void release_task(struct task_struct * p) ...@@ -73,6 +72,7 @@ static void release_task(struct task_struct * p)
} }
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
unhash_process(p); unhash_process(p);
exit_sighand(p);
release_thread(p); release_thread(p);
if (p != current) { if (p != current) {
...@@ -244,7 +244,8 @@ void daemonize(void) ...@@ -244,7 +244,8 @@ void daemonize(void)
static void reparent_thread(task_t *p, task_t *reaper, task_t *child_reaper) static void reparent_thread(task_t *p, task_t *reaper, task_t *child_reaper)
{ {
/* We dont want people slaying init */ /* We dont want people slaying init */
p->exit_signal = SIGCHLD; if (p->exit_signal != -1)
p->exit_signal = SIGCHLD;
p->self_exec_id++; p->self_exec_id++;
/* Make sure we're not reparenting to ourselves */ /* Make sure we're not reparenting to ourselves */
...@@ -412,18 +413,15 @@ void exit_mm(struct task_struct *tsk) ...@@ -412,18 +413,15 @@ void exit_mm(struct task_struct *tsk)
*/ */
static inline void forget_original_parent(struct task_struct * father) static inline void forget_original_parent(struct task_struct * father)
{ {
struct task_struct *p, *reaper; struct task_struct *p, *reaper = father;
struct list_head *_p; struct list_head *_p;
read_lock(&tasklist_lock); write_lock_irq(&tasklist_lock);
/* Next in our thread group, if they're not already exiting */ if (father->exit_signal != -1)
reaper = father; reaper = prev_thread(reaper);
do { else
reaper = next_thread(reaper); reaper = child_reaper;
if (!(reaper->flags & PF_EXITING))
break;
} while (reaper != father);
if (reaper == father) if (reaper == father)
reaper = child_reaper; reaper = child_reaper;
...@@ -444,7 +442,7 @@ static inline void forget_original_parent(struct task_struct * father) ...@@ -444,7 +442,7 @@ static inline void forget_original_parent(struct task_struct * father)
p = list_entry(_p,struct task_struct,ptrace_list); p = list_entry(_p,struct task_struct,ptrace_list);
reparent_thread(p, reaper, child_reaper); reparent_thread(p, reaper, child_reaper);
} }
read_unlock(&tasklist_lock); write_unlock_irq(&tasklist_lock);
} }
static inline void zap_thread(task_t *p, task_t *father, int traced) static inline void zap_thread(task_t *p, task_t *father, int traced)
...@@ -604,7 +602,6 @@ NORET_TYPE void do_exit(long code) ...@@ -604,7 +602,6 @@ NORET_TYPE void do_exit(long code)
__exit_files(tsk); __exit_files(tsk);
__exit_fs(tsk); __exit_fs(tsk);
exit_namespace(tsk); exit_namespace(tsk);
exit_sighand(tsk);
exit_thread(); exit_thread();
if (current->leader) if (current->leader)
...@@ -763,6 +760,8 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc ...@@ -763,6 +760,8 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
if (options & __WNOTHREAD) if (options & __WNOTHREAD)
break; break;
tsk = next_thread(tsk); tsk = next_thread(tsk);
if (tsk->sig != current->sig)
BUG();
} while (tsk != current); } while (tsk != current);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (flag) { if (flag) {
......
...@@ -630,6 +630,9 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t ...@@ -630,6 +630,9 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
spin_lock_init(&sig->siglock); spin_lock_init(&sig->siglock);
atomic_set(&sig->count, 1); atomic_set(&sig->count, 1);
memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action)); memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
sig->curr_target = NULL;
init_sigpending(&sig->shared_pending);
return 0; return 0;
} }
...@@ -664,6 +667,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -664,6 +667,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well:
*/
if (clone_flags & CLONE_THREAD)
clone_flags |= CLONE_SIGHAND;
retval = security_ops->task_create(clone_flags); retval = security_ops->task_create(clone_flags);
if (retval) if (retval)
goto fork_out; goto fork_out;
...@@ -843,8 +852,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -843,8 +852,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->parent = p->real_parent; p->parent = p->real_parent;
if (clone_flags & CLONE_THREAD) { if (clone_flags & CLONE_THREAD) {
spin_lock(&current->sig->siglock);
p->tgid = current->tgid; p->tgid = current->tgid;
list_add(&p->thread_group, &current->thread_group); list_add(&p->thread_group, &current->thread_group);
spin_unlock(&current->sig->siglock);
} }
SET_LINKS(p); SET_LINKS(p);
......
...@@ -1335,6 +1335,15 @@ int task_nice(task_t *p) ...@@ -1335,6 +1335,15 @@ int task_nice(task_t *p)
return TASK_NICE(p); return TASK_NICE(p);
} }
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
int task_curr(task_t *p)
{
return cpu_curr(task_cpu(p)) == p;
}
/** /**
* idle_cpu - is a given cpu idle currently? * idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question. * @cpu: the processor in question.
......
This diff is collapsed.
...@@ -1036,7 +1036,52 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * ...@@ -1036,7 +1036,52 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
UPDATE_ATIME(inode); UPDATE_ATIME(inode);
} }
int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) /*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
* FIXME: this assumes that two userspace pages are always sufficient. That's
* not true if PAGE_CACHE_SIZE > PAGE_SIZE.
*/
static inline int fault_in_pages_writeable(char *uaddr, int size)
{
int ret;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
ret = __put_user(0, uaddr);
if (ret == 0) {
char *end = uaddr + size - 1;
/*
* If the page was already mapped, this will get a cache miss
* for sure, so try to avoid doing it.
*/
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK))
ret = __put_user(0, end);
}
return ret;
}
static inline void fault_in_pages_readable(const char *uaddr, int size)
{
volatile char c;
int ret;
ret = __get_user(c, (char *)uaddr);
if (ret == 0) {
const char *end = uaddr + size - 1;
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK))
__get_user(c, (char *)end);
}
}
int file_read_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size)
{ {
char *kaddr; char *kaddr;
unsigned long left, count = desc->count; unsigned long left, count = desc->count;
...@@ -1044,14 +1089,28 @@ int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long o ...@@ -1044,14 +1089,28 @@ int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long o
if (size > count) if (size > count)
size = count; size = count;
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
*/
if (!fault_in_pages_writeable(desc->buf, size)) {
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_to_user(desc->buf, kaddr + offset, size);
kunmap_atomic(kaddr, KM_USER0);
if (left == 0)
goto success;
}
/* Do it the slow way */
kaddr = kmap(page); kaddr = kmap(page);
left = __copy_to_user(desc->buf, kaddr + offset, size); left = __copy_to_user(desc->buf, kaddr + offset, size);
kunmap(page); kunmap(page);
if (left) { if (left) {
size -= left; size -= left;
desc->error = -EFAULT; desc->error = -EFAULT;
} }
success:
desc->count = count - size; desc->count = count - size;
desc->written += size; desc->written += size;
desc->buf += size; desc->buf += size;
...@@ -1838,6 +1897,26 @@ inline void remove_suid(struct dentry *dentry) ...@@ -1838,6 +1897,26 @@ inline void remove_suid(struct dentry *dentry)
} }
} }
static inline int
filemap_copy_from_user(struct page *page, unsigned long offset,
const char *buf, unsigned bytes)
{
char *kaddr;
int left;
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (left != 0) {
/* Do it the slow way */
kaddr = kmap(page);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap(page);
}
return left;
}
/* /*
* Write to a file through the page cache. * Write to a file through the page cache.
* *
...@@ -1990,7 +2069,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -1990,7 +2069,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
unsigned long index; unsigned long index;
unsigned long offset; unsigned long offset;
long page_fault; long page_fault;
char *kaddr;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT; index = pos >> PAGE_CACHE_SHIFT;
...@@ -2004,10 +2082,7 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2004,10 +2082,7 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
* same page as we're writing to, without it being marked * same page as we're writing to, without it being marked
* up-to-date. * up-to-date.
*/ */
{ volatile unsigned char dummy; fault_in_pages_readable(buf, bytes);
__get_user(dummy, buf);
__get_user(dummy, buf+bytes-1);
}
page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec); page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec);
if (!page) { if (!page) {
...@@ -2015,22 +2090,19 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2015,22 +2090,19 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
break; break;
} }
kaddr = kmap(page);
status = a_ops->prepare_write(file, page, offset, offset+bytes); status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) { if (unlikely(status)) {
/* /*
* prepare_write() may have instantiated a few blocks * prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again. * outside i_size. Trim these off again.
*/ */
kunmap(page);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
if (pos + bytes > inode->i_size) if (pos + bytes > inode->i_size)
vmtruncate(inode, inode->i_size); vmtruncate(inode, inode->i_size);
break; break;
} }
page_fault = __copy_from_user(kaddr + offset, buf, bytes); page_fault = filemap_copy_from_user(page, offset, buf, bytes);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes); status = a_ops->commit_write(file, page, offset, offset+bytes);
if (unlikely(page_fault)) { if (unlikely(page_fault)) {
status = -EFAULT; status = -EFAULT;
...@@ -2045,7 +2117,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf, ...@@ -2045,7 +2117,6 @@ ssize_t generic_file_write_nolock(struct file *file, const char *buf,
buf += status; buf += status;
} }
} }
kunmap(page);
if (!PageReferenced(page)) if (!PageReferenced(page))
SetPageReferenced(page); SetPageReferenced(page);
unlock_page(page); unlock_page(page);
......
...@@ -782,10 +782,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -782,10 +782,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
totalpages = 0; totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < MAX_NR_ZONES; i++)
unsigned long size = zones_size[i]; totalpages += zones_size[i];
totalpages += size;
}
realtotalpages = totalpages; realtotalpages = totalpages;
if (zholes_size) if (zholes_size)
for (i = 0; i < MAX_NR_ZONES; i++) for (i = 0; i < MAX_NR_ZONES; i++)
...@@ -823,7 +822,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -823,7 +822,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
if (zholes_size) if (zholes_size)
realsize -= zholes_size[j]; realsize -= zholes_size[j];
printk("zone(%lu): %lu pages.\n", j, size); printk(" %s zone: %lu pages\n", zone_names[j], realsize);
zone->size = size; zone->size = size;
zone->name = zone_names[j]; zone->name = zone_names[j];
spin_lock_init(&zone->lock); spin_lock_init(&zone->lock);
......
...@@ -483,7 +483,7 @@ shrink_zone(struct zone *zone, int priority, ...@@ -483,7 +483,7 @@ shrink_zone(struct zone *zone, int priority,
ratio = (unsigned long)nr_pages * zone->nr_active / ratio = (unsigned long)nr_pages * zone->nr_active /
((zone->nr_inactive | 1) * 2); ((zone->nr_inactive | 1) * 2);
atomic_add(ratio+1, &zone->refill_counter); atomic_add(ratio+1, &zone->refill_counter);
if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) { while (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
atomic_sub(SWAP_CLUSTER_MAX, &zone->refill_counter); atomic_sub(SWAP_CLUSTER_MAX, &zone->refill_counter);
refill_inactive_zone(zone, SWAP_CLUSTER_MAX); refill_inactive_zone(zone, SWAP_CLUSTER_MAX);
} }
...@@ -517,7 +517,7 @@ shrink_caches(struct zone *classzone, int priority, ...@@ -517,7 +517,7 @@ shrink_caches(struct zone *classzone, int priority,
first_classzone = classzone->zone_pgdat->node_zones; first_classzone = classzone->zone_pgdat->node_zones;
zone = classzone; zone = classzone;
while (zone >= first_classzone) { while (zone >= first_classzone && nr_pages > 0) {
if (zone->free_pages <= zone->pages_high) { if (zone->free_pages <= zone->pages_high) {
nr_pages = shrink_zone(zone, priority, nr_pages = shrink_zone(zone, priority,
gfp_mask, nr_pages); gfp_mask, nr_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment