Commit af55c465 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.99pre7-4

parent 05b27da8
...@@ -700,11 +700,7 @@ static int __init acpi_init_piix4(struct pci_dev *dev) ...@@ -700,11 +700,7 @@ static int __init acpi_init_piix4(struct pci_dev *dev)
if (!(pmregmisc & ACPI_PIIX4_PMIOSE)) if (!(pmregmisc & ACPI_PIIX4_PMIOSE))
return -ENODEV; return -ENODEV;
pci_read_config_dword(dev, 0x40, &base); base = dev->resource[PCI_BRIDGE_RESOURCES].start & PCI_BASE_ADDRESS_IO_MASK;
if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
return -ENODEV;
base &= PCI_BASE_ADDRESS_IO_MASK;
if (!base) if (!base)
return -ENODEV; return -ENODEV;
...@@ -757,16 +753,13 @@ static int __init acpi_init_via(struct pci_dev *dev) ...@@ -757,16 +753,13 @@ static int __init acpi_init_via(struct pci_dev *dev)
if (!(tmp & 0x80)) if (!(tmp & 0x80))
return -ENODEV; return -ENODEV;
pci_read_config_byte(dev, PCI_CLASS_REVISION, &tmp); base = pci_resource_start(dev, PCI_BRIDGE_RESOURCES);
tmp = (tmp & 0x10 ? 0x48 : 0x20); if (!base) {
base = pci_resource_start(dev, PCI_BASE_ADDRESS_4);
pci_read_config_dword(dev, tmp, &base);
if (!(base & PCI_BASE_ADDRESS_SPACE_IO))
return -ENODEV;
base &= PCI_BASE_ADDRESS_IO_MASK;
if (!base) if (!base)
return -ENODEV; return -ENODEV;
}
base &= PCI_BASE_ADDRESS_IO_MASK;
pci_read_config_byte(dev, 0x42, &irq); pci_read_config_byte(dev, 0x42, &irq);
......
...@@ -433,7 +433,6 @@ void __init pcibios_fixup_irqs(void) ...@@ -433,7 +433,6 @@ void __init pcibios_fixup_irqs(void)
dev->irq = irq; dev->irq = irq;
} }
} }
pirq_table = NULL; /* Avoid automatic IRQ assignment */
} }
#endif #endif
/* /*
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
static const char *version = "pcnet32.c:v1.25kf 26.9.1999 tsbogend@alpha.franken.de\n"; static const char *version = "pcnet32.c:v1.25kf 26.9.1999 tsbogend@alpha.franken.de\n";
#include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
......
...@@ -163,6 +163,23 @@ static void __init quirk_piix4acpi(struct pci_dev *dev) ...@@ -163,6 +163,23 @@ static void __init quirk_piix4acpi(struct pci_dev *dev)
quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1); quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1);
} }
/*
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
static void __init quirk_via_acpi(struct pci_dev *dev)
{
u8 rev;
u32 region;
pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
if (rev & 0x10) {
pci_read_config_dword(dev, 0x48, &region);
region &= PCI_BASE_ADDRESS_IO_MASK;
quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES);
}
}
/* /*
* The main table of quirks. * The main table of quirks.
*/ */
...@@ -192,6 +209,8 @@ static struct pci_fixup pci_fixups[] __initdata = { ...@@ -192,6 +209,8 @@ static struct pci_fixup pci_fixups[] __initdata = {
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4acpi }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4acpi },
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101 }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101 },
{ 0 } { 0 }
......
...@@ -4,9 +4,11 @@ ...@@ -4,9 +4,11 @@
* Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds
* *
* super.c contains code to handle: - mount structures * super.c contains code to handle: - mount structures
* - super-block tables. * - super-block tables
* - filesystem drivers list
* - mount system call * - mount system call
* - umount system call * - umount system call
* - ustat system call
* *
* Added options to /proc/mounts * Added options to /proc/mounts
* Torbjrn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. * Torbjrn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
...@@ -288,7 +290,7 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb, ...@@ -288,7 +290,7 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb,
struct vfsmount *mnt; struct vfsmount *mnt;
char *name; char *name;
mnt = (struct vfsmount *)kmalloc(sizeof(struct vfsmount), GFP_KERNEL); mnt = kmalloc(sizeof(struct vfsmount), GFP_KERNEL);
if (!mnt) if (!mnt)
goto out; goto out;
memset(mnt, 0, sizeof(struct vfsmount)); memset(mnt, 0, sizeof(struct vfsmount));
...@@ -302,19 +304,17 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb, ...@@ -302,19 +304,17 @@ static struct vfsmount *add_vfsmnt(struct super_block *sb,
/* N.B. Is it really OK to have a vfsmount without names? */ /* N.B. Is it really OK to have a vfsmount without names? */
if (dev_name) { if (dev_name) {
name = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL); name = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
if (name) { if (name) {
strcpy(name, dev_name); strcpy(name, dev_name);
mnt->mnt_devname = name; mnt->mnt_devname = name;
} }
} }
if (dir_name) { name = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
name = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL);
if (name) { if (name) {
strcpy(name, dir_name); strcpy(name, dir_name);
mnt->mnt_dirname = name; mnt->mnt_dirname = name;
} }
}
list_add(&mnt->mnt_instances, &sb->s_mounts); list_add(&mnt->mnt_instances, &sb->s_mounts);
list_add(&mnt->mnt_clash, &mountpoint->d_vfsmnt); list_add(&mnt->mnt_clash, &mountpoint->d_vfsmnt);
...@@ -336,12 +336,12 @@ static void move_vfsmnt(struct vfsmount *mnt, ...@@ -336,12 +336,12 @@ static void move_vfsmnt(struct vfsmount *mnt,
char *new_devname = NULL, *new_dirname = NULL; char *new_devname = NULL, *new_dirname = NULL;
if (dev_name) { if (dev_name) {
new_devname = (char *) kmalloc(strlen(dev_name)+1, GFP_KERNEL); new_devname = kmalloc(strlen(dev_name)+1, GFP_KERNEL);
if (new_devname) if (new_devname)
strcpy(new_devname, dev_name); strcpy(new_devname, dev_name);
} }
if (dir_name) { if (dir_name) {
new_dirname = (char *) kmalloc(strlen(dir_name)+1, GFP_KERNEL); new_dirname = kmalloc(strlen(dir_name)+1, GFP_KERNEL);
if (new_dirname) if (new_dirname)
strcpy(new_dirname, dir_name); strcpy(new_dirname, dir_name);
} }
......
...@@ -12,7 +12,7 @@ extern int printk(const char * fmt, ...) ...@@ -12,7 +12,7 @@ extern int printk(const char * fmt, ...)
* initialize their spinlocks properly, tsk tsk. * initialize their spinlocks properly, tsk tsk.
* Remember to turn this off in 2.4. -ben * Remember to turn this off in 2.4. -ben
*/ */
#define SPINLOCK_DEBUG 1 #define SPINLOCK_DEBUG 0
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
......
...@@ -80,6 +80,7 @@ extern void lock_page(struct page *page); ...@@ -80,6 +80,7 @@ extern void lock_page(struct page *page);
extern void __add_page_to_hash_queue(struct page * page, struct page **p); extern void __add_page_to_hash_queue(struct page * page, struct page **p);
extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index); extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index) extern inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long index)
{ {
......
...@@ -1455,6 +1455,7 @@ static int shm_swap_core(struct shmid_kernel *shp, unsigned long idx, swp_entry_ ...@@ -1455,6 +1455,7 @@ static int shm_swap_core(struct shmid_kernel *shp, unsigned long idx, swp_entry_
reading a not yet uptodate block from disk. reading a not yet uptodate block from disk.
NOTE: we just accounted the swap space reference for this NOTE: we just accounted the swap space reference for this
swap cache page at __get_swap_page() time. */ swap cache page at __get_swap_page() time. */
lock_page(page_map);
add_to_swap_cache(*outpage = page_map, swap_entry); add_to_swap_cache(*outpage = page_map, swap_entry);
return OKAY; return OKAY;
} }
......
...@@ -429,12 +429,12 @@ NORET_TYPE void do_exit(long code) ...@@ -429,12 +429,12 @@ NORET_TYPE void do_exit(long code)
#ifdef CONFIG_BSD_PROCESS_ACCT #ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code); acct_process(code);
#endif #endif
task_lock(tsk);
sem_exit(); sem_exit();
__exit_mm(tsk); __exit_mm(tsk);
__exit_files(tsk); __exit_files(tsk);
__exit_fs(tsk); __exit_fs(tsk);
__exit_sighand(tsk); __exit_sighand(tsk);
task_lock(tsk);
exit_thread(); exit_thread();
tsk->state = TASK_ZOMBIE; tsk->state = TASK_ZOMBIE;
tsk->exit_code = code; tsk->exit_code = code;
......
...@@ -46,7 +46,7 @@ unsigned int page_hash_bits; ...@@ -46,7 +46,7 @@ unsigned int page_hash_bits;
struct page **page_hash_table; struct page **page_hash_table;
struct list_head lru_cache; struct list_head lru_cache;
spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED; static spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
/* /*
* NOTE: to avoid deadlocking you must never acquire the pagecache_lock with * NOTE: to avoid deadlocking you must never acquire the pagecache_lock with
* the pagemap_lru_lock held. * the pagemap_lru_lock held.
...@@ -485,6 +485,26 @@ int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsig ...@@ -485,6 +485,26 @@ int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsig
return retval; return retval;
} }
/*
* Add a page to the inode page cache.
*
* The caller must have locked the page and
* set all the page flags correctly..
*/
void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
{
if (!PageLocked(page))
BUG();
get_page(page);
spin_lock(&pagecache_lock);
page->index = index;
add_page_to_inode_queue(mapping, page);
__add_page_to_hash_queue(page, page_hash(mapping, index));
lru_cache_add(page);
spin_unlock(&pagecache_lock);
}
/* /*
* This adds a page to the page cache, starting out as locked, * This adds a page to the page cache, starting out as locked,
* owned by us, referenced, but not uptodate and with no errors. * owned by us, referenced, but not uptodate and with no errors.
...@@ -1514,12 +1534,8 @@ static int filemap_write_page(struct file *file, ...@@ -1514,12 +1534,8 @@ static int filemap_write_page(struct file *file,
struct page * page, struct page * page,
int wait) int wait)
{ {
int result; struct dentry * dentry = file->f_dentry;
struct dentry * dentry; struct inode * inode = dentry->d_inode;
struct inode * inode;
dentry = file->f_dentry;
inode = dentry->d_inode;
/* /*
* If a task terminates while we're swapping the page, the vma and * If a task terminates while we're swapping the page, the vma and
...@@ -1527,10 +1543,7 @@ static int filemap_write_page(struct file *file, ...@@ -1527,10 +1543,7 @@ static int filemap_write_page(struct file *file,
* vma/file is guaranteed to exist in the unmap/sync cases because * vma/file is guaranteed to exist in the unmap/sync cases because
* mmap_sem is held. * mmap_sem is held.
*/ */
lock_page(page); return inode->i_mapping->a_ops->writepage(file, dentry, page);
result = inode->i_mapping->a_ops->writepage(file, dentry, page);
UnlockPage(page);
return result;
} }
...@@ -1588,7 +1601,9 @@ static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma, ...@@ -1588,7 +1601,9 @@ static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
printk("weirdness: pgoff=%lu index=%lu address=%lu vm_start=%lu vm_pgoff=%lu\n", printk("weirdness: pgoff=%lu index=%lu address=%lu vm_start=%lu vm_pgoff=%lu\n",
pgoff, page->index, address, vma->vm_start, vma->vm_pgoff); pgoff, page->index, address, vma->vm_start, vma->vm_pgoff);
} }
lock_page(page);
error = filemap_write_page(vma->vm_file, pgoff, page, 1); error = filemap_write_page(vma->vm_file, pgoff, page, 1);
UnlockPage(page);
page_cache_free(page); page_cache_free(page);
return error; return error;
} }
......
...@@ -24,8 +24,12 @@ ...@@ -24,8 +24,12 @@
unsigned long highmem_mapnr; unsigned long highmem_mapnr;
/*
* Take one locked page, return another low-memory locked page.
*/
struct page * prepare_highmem_swapout(struct page * page) struct page * prepare_highmem_swapout(struct page * page)
{ {
struct page *new_page;
unsigned long regular_page; unsigned long regular_page;
unsigned long vaddr; unsigned long vaddr;
/* /*
...@@ -36,6 +40,14 @@ struct page * prepare_highmem_swapout(struct page * page) ...@@ -36,6 +40,14 @@ struct page * prepare_highmem_swapout(struct page * page)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page; return page;
/*
* Here we break the page lock, and we split the
* dirty page into two. We can unlock the old page,
* and we'll now have two of them. Too bad, it would
* have been nice to continue to potentially share
* across a fork().
*/
UnlockPage(page);
regular_page = __get_free_page(GFP_ATOMIC); regular_page = __get_free_page(GFP_ATOMIC);
if (!regular_page) if (!regular_page)
return NULL; return NULL;
...@@ -49,8 +61,9 @@ struct page * prepare_highmem_swapout(struct page * page) ...@@ -49,8 +61,9 @@ struct page * prepare_highmem_swapout(struct page * page)
* we stored its data into the new regular_page. * we stored its data into the new regular_page.
*/ */
__free_page(page); __free_page(page);
new_page = mem_map + MAP_NR(regular_page);
return mem_map + MAP_NR(regular_page); LockPage(new_page);
return new_page;
} }
struct page * replace_with_highmem(struct page * page) struct page * replace_with_highmem(struct page * page)
......
...@@ -47,14 +47,20 @@ void show_swap_cache_info(void) ...@@ -47,14 +47,20 @@ void show_swap_cache_info(void)
void add_to_swap_cache(struct page *page, swp_entry_t entry) void add_to_swap_cache(struct page *page, swp_entry_t entry)
{ {
unsigned long flags;
#ifdef SWAP_CACHE_INFO #ifdef SWAP_CACHE_INFO
swap_cache_add_total++; swap_cache_add_total++;
#endif #endif
if (!PageLocked(page))
BUG();
if (PageTestandSetSwapCache(page)) if (PageTestandSetSwapCache(page))
BUG(); BUG();
if (page->mapping) if (page->mapping)
BUG(); BUG();
add_to_page_cache(page, &swapper_space, entry.val); flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_referenced) | (1 << PG_uptodate);
add_to_page_cache_locked(page, &swapper_space, entry.val);
} }
static inline void remove_from_swap_cache(struct page *page) static inline void remove_from_swap_cache(struct page *page)
...@@ -225,6 +231,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait) ...@@ -225,6 +231,7 @@ struct page * read_swap_cache_async(swp_entry_t entry, int wait)
/* /*
* Add it to the swap cache and read its contents. * Add it to the swap cache and read its contents.
*/ */
lock_page(new_page);
add_to_swap_cache(new_page, entry); add_to_swap_cache(new_page, entry);
rw_swap_page(READ, new_page, wait); rw_swap_page(READ, new_page, wait);
return new_page; return new_page;
......
...@@ -60,8 +60,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -60,8 +60,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
goto out_failed; goto out_failed;
} }
#error Do not let this one slip through.. if (TryLockPage(page))
if (PageLocked(page))
goto out_failed; goto out_failed;
/* /*
...@@ -77,6 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -77,6 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
swap_duplicate(entry); swap_duplicate(entry);
set_pte(page_table, swp_entry_to_pte(entry)); set_pte(page_table, swp_entry_to_pte(entry));
drop_pte: drop_pte:
UnlockPage(page);
vma->vm_mm->rss--; vma->vm_mm->rss--;
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
__free_page(page); __free_page(page);
...@@ -108,7 +108,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -108,7 +108,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
* locks etc. * locks etc.
*/ */
if (!(gfp_mask & __GFP_IO)) if (!(gfp_mask & __GFP_IO))
goto out_failed; goto out_unlock;
/* /*
* Ok, it's really dirty. That means that * Ok, it's really dirty. That means that
...@@ -139,6 +139,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -139,6 +139,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
vmlist_access_unlock(vma->vm_mm); vmlist_access_unlock(vma->vm_mm);
error = swapout(page, file); error = swapout(page, file);
UnlockPage(page);
if (file) fput(file); if (file) fput(file);
if (!error) if (!error)
goto out_free_success; goto out_free_success;
...@@ -154,15 +155,16 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -154,15 +155,16 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
*/ */
entry = get_swap_page(); entry = get_swap_page();
if (!entry.val) if (!entry.val)
goto out_failed; /* No swap space left */ goto out_unlock; /* No swap space left */
if (!(page = prepare_highmem_swapout(page))) if (!(page = prepare_highmem_swapout(page)))
goto out_swap_free; goto out_swap_free;
swap_duplicate(entry); /* One for the process, one for the swap cache */ swap_duplicate(entry); /* One for the process, one for the swap cache */
/* This will also lock the page */ /* Add it to the swap cache */
add_to_swap_cache(page, entry); add_to_swap_cache(page, entry);
/* Put the swap entry into the pte after the page is in swapcache */ /* Put the swap entry into the pte after the page is in swapcache */
vma->vm_mm->rss--; vma->vm_mm->rss--;
set_pte(page_table, swp_entry_to_pte(entry)); set_pte(page_table, swp_entry_to_pte(entry));
...@@ -179,7 +181,9 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -179,7 +181,9 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
swap_free(entry); swap_free(entry);
out_failed: out_failed:
return 0; return 0;
out_unlock:
UnlockPage(page);
return 0;
} }
/* /*
...@@ -530,11 +534,11 @@ int kswapd(void *unused) ...@@ -530,11 +534,11 @@ int kswapd(void *unused)
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < MAX_NR_ZONES; i++) {
int count = SWAP_CLUSTER_MAX; int count = SWAP_CLUSTER_MAX;
zone = pgdat->node_zones + i; zone = pgdat->node_zones + i;
if ((!zone->size) || (!zone->zone_wake_kswapd))
continue;
do { do {
if (tsk->need_resched) if (tsk->need_resched)
schedule(); schedule();
if ((!zone->size) || (!zone->zone_wake_kswapd))
continue;
do_try_to_free_pages(GFP_KSWAPD, zone); do_try_to_free_pages(GFP_KSWAPD, zone);
} while (zone->free_pages < zone->pages_low && } while (zone->free_pages < zone->pages_low &&
--count); --count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment