Commit 6a65e6e3 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.24pre3

parent 3dd09e50
......@@ -16,7 +16,6 @@
#include <asm/hydra.h>
#include <asm/prom.h>
#include <asm/gg2.h>
#include <asm/ide.h>
#include <asm/machdep.h>
#include "pci.h"
......
......@@ -28,7 +28,6 @@
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/blk.h>
#include <linux/ide.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/pci.h>
......@@ -37,6 +36,7 @@
#include <linux/adb.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <asm/mmu.h>
#include <asm/processor.h>
......@@ -127,7 +127,7 @@ chrp_get_cpuinfo(char *buffer)
len = sprintf(buffer,"machine\t\t: CHRP %s\n", model);
/* longtrail (goldengate) stuff */
if ( !strncmp( model, "IBM,LongTrail", 9 ) )
if ( !strncmp( model, "IBM,LongTrail", 13 ) )
{
/* VLSI VAS96011/12 `Golden Gate 2' */
/* Memory banks */
......@@ -199,10 +199,10 @@ static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
{
u8 level0, type0, active;
struct device_node *root;
root = find_path_device("/");
if (root &&
!strcmp(get_property(root, "model", NULL), "IBM,LongTrail" ) )
!strncmp(get_property(root, "model", NULL), "IBM,LongTrail", 13 ) )
{
/* select logical device */
sio_write(device, 0x07);
......@@ -220,6 +220,7 @@ static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
sio_write(type, 0x71);
}
}
}
static void __init sio_init(void)
......
......@@ -332,9 +332,12 @@ void __init gemini_init_IRQ(void)
/* gemini has no 8259 */
open_pic.irq_offset = 0;
for( i=0; i < 16; i++ )
for( i=0; i < OPENPIC_VEC_SPURIOUS; i++ )
irq_desc[i].ctl = &open_pic;
openpic_init(1);
#ifdef __SMP__
request_irq(OPENPIC_VEC_IPI, openpic_ipi_action, 0, "IPI0", 0);
#endif /* __SMP__ */
}
#define gemini_rtc_read(x) (readb(GEMINI_RTC+(x)))
......
......@@ -40,10 +40,10 @@ unsigned long zero_paged_on = 0;
unsigned long powersave_nap = 0;
unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
unsigned long zero_sz; /* # currently pre-zero'd pages */
unsigned long zeropage_hits; /* # zero'd pages request that we've done */
unsigned long zeropage_calls; /* # zero'd pages request that've been made */
unsigned long zerototal; /* # pages zero'd over time */
atomic_t zerototal; /* # pages zero'd over time */
atomic_t zeropage_hits; /* # zero'd pages request that we've done */
atomic_t zero_sz; /* # currently pre-zero'd pages */
atomic_t zeropage_calls; /* # zero'd pages request that've been made */
int idled(void)
{
......@@ -57,7 +57,7 @@ int idled(void)
check_pgt_cache();
if ( !current->need_resched && zero_paged_on ) zero_paged();
/*if ( !current->need_resched && zero_paged_on ) zero_paged();*/
if ( !current->need_resched && htab_reclaim_on ) htab_reclaim();
if ( !current->need_resched ) power_save();
......@@ -141,6 +141,7 @@ void inline htab_reclaim(void)
#endif /* CONFIG_8xx */
}
#if 0
/*
* Returns a pre-zero'd page from the list otherwise returns
* NULL.
......@@ -149,7 +150,7 @@ unsigned long get_zero_page_fast(void)
{
unsigned long page = 0;
atomic_inc((atomic_t *)&zero_cache_calls);
atomic_inc(&zero_cache_calls);
if ( zero_quicklist )
{
/* atomically remove this page from the list */
......@@ -194,9 +195,9 @@ void zero_paged(void)
unsigned long bytecount = 0;
pte_t *pte;
if ( zero_cache_sz >= zero_cache_water[0] )
if ( atomic_read(&zero_cache_sz) >= zero_cache_water[0] )
return;
while ( (zero_cache_sz < zero_cache_water[1]) && (!current->need_resched) )
while ( (atomic_read(&zero_cache_sz) < zero_cache_water[1]) && (!current->need_resched) )
{
/*
* Mark a page as reserved so we can mess with it
......@@ -272,6 +273,7 @@ void zero_paged(void)
atomic_inc((atomic_t *)&zero_cache_total);
}
}
#endif
void power_save(void)
{
......
......@@ -85,6 +85,7 @@ static void no_action(int ir1, void *dev, struct pt_regs *regs)
#ifdef __SMP__
void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs)
{
printk("openpic_ipi_action\n");
smp_message_recv();
}
#endif /* __SMP__ */
......
......@@ -311,18 +311,16 @@ int get_cpuinfo(char *buffer)
/*
* Ooh's and aah's info about zero'd pages in idle task
*/
{
len += sprintf(buffer+len,"zero pages\t: total %lu (%luKb) "
"current: %lu (%luKb) hits: %lu/%lu (%lu%%)\n",
zero_cache_total,
(zero_cache_total*PAGE_SIZE)>>10,
zero_cache_sz,
(zero_cache_sz*PAGE_SIZE)>>10,
zero_cache_hits,zero_cache_calls,
/* : 1 below is so we don't div by zero */
(zero_cache_hits*100) /
((zero_cache_calls)?zero_cache_calls:1));
}
len += sprintf(buffer+len,"zero pages\t: total: %u (%luKb) "
"current: %u (%luKb) hits: %u/%u (%u%%)\n",
atomic_read(&zero_cache_total),
(atomic_read(&zero_cache_total)*PAGE_SIZE)>>10,
atomic_read(&zero_cache_sz),
(atomic_read(&zero_cache_sz)*PAGE_SIZE)>>10,
atomic_read(&zero_cache_hits),atomic_read(&zero_cache_calls),
/* : 1 below is so we don't div by zero */
(atomic_read(&zero_cache_hits)*100) /
((atomic_read(&zero_cache_calls))?atomic_read(&zero_cache_calls):1));
if (ppc_md.get_cpuinfo != NULL)
{
......
......@@ -159,6 +159,12 @@ void smp_message_recv(void)
void smp_send_reschedule(int cpu)
{
/*
* This isn't the case anymore since the other CPU could be
* sleeping and won't reschedule until the next interrupt (such
* as the timer).
* -- Cort
*/
/* This is only used if `cpu' is running an idle task,
so it will reschedule itself anyway... */
/*smp_message_pass(cpu, MSG_RESCHEDULE, 0, 0);*/
......@@ -173,7 +179,7 @@ spinlock_t mesg_pass_lock = SPIN_LOCK_UNLOCKED;
void smp_message_pass(int target, int msg, unsigned long data, int wait)
{
int i;
if ( !(_machine & (_MACH_Pmac|_MACH_chrp)) )
if ( !(_machine & (_MACH_Pmac|_MACH_chrp|_MACH_gemini)) )
return;
spin_lock(&mesg_pass_lock);
......@@ -216,24 +222,29 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
{
/*
* There has to be some way of doing this better -
* perhaps a sent-to-all or send-to-all-but-self
* perhaps a send-to-all or send-to-all-but-self
* in the openpic. This gets us going for now, though.
* -- Cort
*/
switch ( target )
{
case MSG_ALL:
for ( i = 0 ; i < smp_num_cpus ; i++ )
openpic_cause_IPI(i, 0, 0xffffffff );
openpic_cause_IPI(smp_processor_id(), 0, 0x0 );
openpic_cause_IPI(smp_processor_id(), 0, 0xffffffff );
break;
case MSG_ALL_BUT_SELF:
for ( i = 0 ; i < smp_num_cpus ; i++ )
if ( i != smp_processor_id () )
openpic_cause_IPI(i, 0,
0xffffffff & ~(1 << smp_processor_id()));
{
openpic_cause_IPI(smp_processor_id(), 0,
0x0 );
openpic_cause_IPI(smp_processor_id(), 0,
0xffffffff & ~(1 << smp_processor_id()));
}
break;
default:
openpic_cause_IPI(target, 0, 1U << target);
openpic_cause_IPI(smp_processor_id(), 0, 0x0 );
openpic_cause_IPI(target, 0, 1U << target );
break;
}
}
......@@ -251,8 +262,7 @@ void __init smp_boot_cpus(void)
struct task_struct *p;
unsigned long a;
printk("Entering SMP Mode...\n");
/* let other processors know to not do certain initialization */
printk("Entering SMP Mode...\n");
smp_num_cpus = 1;
smp_store_cpu_info(0);
......@@ -290,15 +300,13 @@ void __init smp_boot_cpus(void)
cpu_nr = 2;
break;
case _MACH_chrp:
/* openpic doesn't report # of cpus, just # possible -- Cort */
#if 0
cpu_nr = ((openpic_read(&OpenPIC->Global.Feature_Reporting0)
& OPENPIC_FEATURE_LAST_PROCESSOR_MASK) >>
OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT)+1;
#endif
for ( i = 0; i < 4 ; i++ )
openpic_enable_IPI(i);
cpu_nr = smp_chrp_cpu_nr;
break;
case _MACH_gemini:
for ( i = 0; i < 4 ; i++ )
openpic_enable_IPI(i);
cpu_nr = (readb(GEMINI_CPUSTAT) & GEMINI_CPU_COUNT_MASK)>>2;
cpu_nr = (cpu_nr == 0) ? 4 : cpu_nr;
break;
......@@ -350,19 +358,6 @@ void __init smp_boot_cpus(void)
case _MACH_chrp:
*(unsigned long *)KERNELBASE = i;
asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
#if 0
device = find_type_devices("cpu");
/* assume cpu device list is in order, find the ith cpu */
for ( a = i; device && a; device = device->next, a-- )
;
if ( !device )
break;
printk( "Starting %s (%lu): ", device->full_name,
*(ulong *)get_property(device, "reg", NULL) );
call_rtas( "start-cpu", 3, 1, NULL,
*(ulong *)get_property(device, "reg", NULL),
__pa(__secondary_start_chrp), i);
#endif
break;
case _MACH_gemini:
openpic_init_processor( 1<<i );
......@@ -428,6 +423,7 @@ void __init smp_callin(void)
smp_store_cpu_info(current->processor);
set_dec(decrementer_count);
init_idle();
#if 0
current->mm->mmap->vm_page_prot = PAGE_SHARED;
current->mm->mmap->vm_start = PAGE_OFFSET;
......
......@@ -204,9 +204,8 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
if (pmd_none(*pmd)) {
if (!mem_init_done)
pte = (pte_t *) MMU_get_page();
else if ((pte = (pte_t *) get_zero_page_fast()) == NULL)
if ((pte = (pte_t *) __get_free_page(GFP_KERNEL)))
clear_page(pte);
else if ((pte = (pte_t *) __get_free_page(GFP_KERNEL)))
clear_page(pte);
if (pte) {
pmd_val(*pmd) = (unsigned long)pte;
return pte + offset;
......@@ -1156,8 +1155,7 @@ void __init do_init_bootmem(void)
__pa(end_of_DRAM) >> PAGE_SHIFT);
/* remove the bootmem bitmap from the available memory */
remove_mem_piece(&phys_avail, start, start + boot_mapsize, 1);
remove_mem_piece(&phys_avail, start, boot_mapsize, 1);
/* add everything in phys_avail into the bootmem map */
for (i = 0; i < phys_avail.n_regions; ++i)
free_bootmem(phys_avail.regions[i].address,
......
......@@ -1292,15 +1292,16 @@ static void create_empty_buffers(struct page *page, struct inode *inode, unsigne
static void unmap_underlying_metadata(struct buffer_head * bh)
{
#if 0
bh = get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size);
if (bh)
{
if (bh) {
unmap_buffer(bh);
/* Here we could run brelse or bforget. We use
bforget because it will try to put the buffer
in the freelist. */
__bforget(bh);
}
#endif
}
/*
......@@ -2192,11 +2193,13 @@ int try_to_free_buffers(struct page * page)
void show_buffers(void)
{
#ifdef __SMP__
struct buffer_head * bh;
int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
int protected = 0;
int nlist;
static char *buf_types[NR_LIST] = { "CLEAN", "LOCKED", "DIRTY" };
#endif
printk("Buffer memory: %6dkB\n",
atomic_read(&buffermem_pages) << (PAGE_SHIFT-10));
......
......@@ -556,7 +556,6 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
if ((stable || (stable = EX_ISSYNC(exp))) && !EX_WGATHER(exp))
file.f_flags |= O_SYNC;
fh_lock(fhp); /* lock inode */
file.f_pos = offset; /* set write offset */
/* Write the data. */
......@@ -588,8 +587,6 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
current->cap_effective = saved_cap;
}
fh_unlock(fhp); /* unlock inode */
if (err >= 0 && stable) {
static unsigned long last_ino = 0;
static kdev_t last_dev = NODEV;
......
......@@ -404,9 +404,6 @@ romfs_readpage(struct file * file, struct page * page)
get_page(page);
buf = page_address(page);
/* hack? */
page->owner = current;
offset = page->offset;
if (offset < inode->i_size) {
avail = inode->i_size-offset;
......
......@@ -44,15 +44,15 @@ printk_name(const char *name, int len)
* smb-cache code assumes we return a locked page.
*/
static unsigned long
get_cached_page(struct address_space *owner, unsigned long offset, int new)
get_cached_page(struct address_space *mapping, unsigned long offset, int new)
{
struct page * page;
struct page ** hash;
unsigned long new_page;
again:
hash = page_hash(owner, offset);
page = __find_lock_page(owner, offset, hash);
hash = page_hash(mapping, offset);
page = __find_lock_page(mapping, offset, hash);
if(!page && new) {
/* not in cache, alloc a new page */
new_page = page_cache_alloc();
......@@ -60,7 +60,7 @@ get_cached_page(struct address_space *owner, unsigned long offset, int new)
return 0;
clear_page(new_page); /* smb code assumes pages are zeroed */
page = page_cache_entry(new_page);
if (add_to_page_cache_unique(page, owner, offset, hash)) {
if (add_to_page_cache_unique(page, mapping, offset, hash)) {
/* Hmm, a page has materialized in the
cache. Fine. Go back and get that page
instead ... throwing away this one first. */
......@@ -78,7 +78,7 @@ get_cached_page(struct address_space *owner, unsigned long offset, int new)
static inline struct address_space *
get_cache_inode(struct cache_head *cachep)
{
return (mem_map + MAP_NR((unsigned long) cachep))->owner;
return (mem_map + MAP_NR((unsigned long) cachep))->mapping;
}
/*
......@@ -89,14 +89,14 @@ get_cache_inode(struct cache_head *cachep)
struct cache_head *
smb_get_dircache(struct dentry * dentry)
{
struct address_space * owner = &dentry->d_inode->i_data;
struct address_space * mapping = &dentry->d_inode->i_data;
struct cache_head * cachep;
#ifdef SMBFS_DEBUG_VERBOSE
printk("smb_get_dircache: finding cache for %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
#endif
cachep = (struct cache_head *) get_cached_page(owner, 0, 1);
cachep = (struct cache_head *) get_cached_page(mapping, 0, 1);
if (!cachep)
goto out;
if (cachep->valid)
......@@ -118,7 +118,7 @@ printk("smb_get_dircache: cache %s/%s has existing block!\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
#endif
offset = PAGE_SIZE + (i << PAGE_SHIFT);
block = (struct cache_block *) get_cached_page(owner,
block = (struct cache_block *) get_cached_page(mapping,
offset, 0);
if (!block)
goto out;
......@@ -187,7 +187,7 @@ void
smb_add_to_cache(struct cache_head * cachep, struct cache_dirent *entry,
off_t fpos)
{
struct address_space * owner = get_cache_inode(cachep);
struct address_space * mapping = get_cache_inode(cachep);
struct cache_index * index;
struct cache_block * block;
unsigned long page_off;
......@@ -196,7 +196,7 @@ smb_add_to_cache(struct cache_head * cachep, struct cache_dirent *entry,
#ifdef SMBFS_DEBUG_VERBOSE
printk("smb_add_to_cache: cache %p, status %d, adding ",
owner, cachep->status);
mapping, cachep->status);
printk_name(entry->name, entry->len);
printk(" at %ld\n", fpos);
#endif
......@@ -251,14 +251,14 @@ printk("smb_add_to_cache: new index already has block!\n");
get_block:
cachep->pages++;
page_off = PAGE_SIZE + (cachep->idx << PAGE_SHIFT);
block = (struct cache_block *) get_cached_page(owner, page_off, 1);
block = (struct cache_block *) get_cached_page(mapping, page_off, 1);
if (block)
{
index->block = block;
index->space = PAGE_SIZE;
#ifdef SMBFS_DEBUG_VERBOSE
printk("smb_add_to_cache: owner=%p, pages=%d, block at %ld\n",
owner, cachep->pages, page_off);
printk("smb_add_to_cache: mapping=%p, pages=%d, block at %ld\n",
mapping, cachep->pages, page_off);
#endif
goto add_entry;
}
......
......@@ -84,9 +84,7 @@ extern int console_loglevel;
* see^H^H^Hhear bugs in early bootup as well!
*/
#define BUG() do { \
__asm__ __volatile__ ("movb $0x3,%al; outb %al,$0x61"); \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
console_loglevel = 0; \
__asm__ __volatile__(".byte 0x0f,0x0b"); \
} while (0)
......
......@@ -99,18 +99,21 @@ extern unsigned long ISA_DMA_THRESHOLD;
/* used in nasty hack for sound - see prep_setup_arch() -- Cort */
extern long ppc_cs4232_dma, ppc_cs4232_dma2;
#ifdef CONFIG_CS4232
#if defined(CONFIG_CS4232)
#if defined(CONFIG_PREP) || defined(CONFIG_ALL_PPC)
#define SND_DMA1 ppc_cs4232_dma
#define SND_DMA2 ppc_cs4232_dma2
#else
#ifdef CONFIG_MSS
#else /* !CONFIG_PREP && !CONFIG_ALL_PPC */
#define SND_DMA1 -1
#define SND_DMA2 -1
#endif /* !CONFIG_PREP */
#elif defined(CONFIG_MSS)
#define SND_DMA1 CONFIG_MSS_DMA
#define SND_DMA2 CONFIG_MSS_DMA2
#else
#define SND_DMA1 -1
#define SND_DMA2 -1
#endif
#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
......@@ -203,6 +206,7 @@ static __inline__ void enable_dma(unsigned int dmanr)
*/
unsigned char ucDmaCmd=0x00;
#if defined(CONFIG_PREP) || defined(CONFIG_ALL_PPC)
if(_prep_type==_PREP_Radstone)
{
switch(ucSystemType)
......@@ -227,6 +231,7 @@ static __inline__ void enable_dma(unsigned int dmanr)
}
}
}
#endif /* CONFIG_PREP || CONFIG_ALL_PPC */
if (dmanr != 4)
{
......
......@@ -423,10 +423,10 @@ extern struct pgtable_cache_struct {
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
extern unsigned long zero_sz; /* # currently pre-zero'd pages */
extern unsigned long zeropage_hits; /* # zero'd pages request that we've done */
extern unsigned long zeropage_calls; /* # zero'd pages request that've been made */
extern unsigned long zerototal; /* # pages zero'd over time */
extern atomic_t zero_sz; /* # currently pre-zero'd pages */
extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
extern atomic_t zerototal; /* # pages zero'd over time */
#define zero_quicklist (zero_cache)
#define zero_cache_sz (zero_sz)
......@@ -440,12 +440,9 @@ extern unsigned long get_zero_page_fast(void);
extern __inline__ pgd_t *get_pgd_slow(void)
{
pgd_t *ret, *init;
if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )
{
if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
/*if ( (ret = (pgd_t *)get_zero_page_fast()) == NULL )*/
if ( (ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL )
memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
if (ret) {
init = pgd_offset(&init_mm, 0);
memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
......@@ -489,7 +486,7 @@ extern __inline__ pte_t *get_pte_fast(void)
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
}
return (pte_t *)ret;
}
......
......@@ -117,6 +117,7 @@
#define AURORA_MAJOR 79
#define RTF_MAJOR 150
#define RAW_MAJOR 162
#define UNIX98_PTY_MASTER_MAJOR 128
......
......@@ -49,6 +49,7 @@
#include <linux/kmod.h>
#endif
extern int console_loglevel;
extern void set_device_ro(kdev_t dev,int flag);
extern struct file_operations * get_blkfops(unsigned int);
extern int blkdev_release(struct inode * inode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment