Commit 338322e6 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.29pre3

parent 9a0a7855
......@@ -9,10 +9,8 @@
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
#include <linux/mm.h>
#include <asm/segment.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
/*
* gzip declarations
*/
......
......@@ -37,7 +37,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/desc.h>
#include <asm/irq.h>
......
......@@ -1818,6 +1818,7 @@ int __init mtrr_init(void)
# ifdef CONFIG_PROC_FS
proc_root_mtrr = create_proc_entry("mtrr", S_IWUSR|S_IRUGO, &proc_root);
proc_root_mtrr->ops = &proc_mtrr_inode_operations;
#endif
init_table ();
return 0;
} /* End Function mtrr_init */
......@@ -637,7 +637,6 @@ void __init setup_arch(char **cmdline_p)
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > MAXMEM_PFN) {
highstart_pfn = MAXMEM_PFN;
highend_pfn = max_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
}
......
......@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
/*
* Some notes on processor bugs:
......
......@@ -44,6 +44,7 @@
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
/* Set if we find a B stepping CPU */
static int smp_b_stepping = 0;
......@@ -649,10 +650,11 @@ void __init smp_alloc_memory(void)
void __init smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c=&cpu_data[id];
struct cpuinfo_x86 *c = cpu_data + id;
*c = boot_cpu_data;
c->pte_quick = 0;
c->pmd_quick = 0;
c->pgd_quick = 0;
c->pgtable_cache_sz = 0;
identify_cpu(c);
......@@ -719,7 +721,7 @@ void __init setup_local_APIC(void)
* Enable APIC
*/
value |= (1<<8);
#if 0
#if 1
/* Enable focus processor (bit==0) */
value &= ~(1<<9);
#else
......@@ -821,8 +823,7 @@ void __init init_smp_mappings(void)
* could use the real zero-page, but it's safer
* this way if some buggy code writes to this page ...
*/
apic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)apic_phys, 0, PAGE_SIZE);
apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
apic_phys = __pa(apic_phys);
}
set_fixmap(FIX_APIC_BASE, apic_phys);
......@@ -837,8 +838,7 @@ void __init init_smp_mappings(void)
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mpc_apicaddr;
} else {
ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)ioapic_phys, 0, PAGE_SIZE);
ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap(idx,ioapic_phys);
......
......@@ -35,6 +35,7 @@
#include <asm/desc.h>
#include <asm/smp.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_X86_VISWS_APIC
#include <asm/fixmap.h>
......
......@@ -14,7 +14,7 @@
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
/*
......
......@@ -19,7 +19,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
extern void die(const char *,struct pt_regs *,long);
......
......@@ -30,6 +30,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
......@@ -285,7 +286,6 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
#if CONFIG_X86_PAE
if (pgd_none(*pgd)) {
pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
memset((void*)pmd, 0, PAGE_SIZE);
pgd_val(*pgd) = __pa(pmd) + 0x1;
if (pmd != pmd_offset(pgd, start))
BUG();
......@@ -297,7 +297,6 @@ static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t
for (; (j < PTRS_PER_PMD) && start; pmd++, j++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
memset((void*)pte, 0, PAGE_SIZE);
pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
if (pte != pte_offset(pmd, 0))
BUG();
......@@ -327,7 +326,6 @@ static void __init pagetable_init(void)
vaddr = i*PGDIR_SIZE;
#if CONFIG_X86_PAE
pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
memset((void*)pmd, 0, PAGE_SIZE);
pgd_val(*pgd) = __pa(pmd) + 0x1;
#else
pmd = (pmd_t *)pgd;
......@@ -352,7 +350,6 @@ static void __init pagetable_init(void)
}
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
memset((void*)pte, 0, PAGE_SIZE);
pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
if (pte != pte_offset(pmd, 0))
......@@ -412,7 +409,11 @@ void __init zap_low_mappings (void)
* that case).
*/
for (i = 0; i < USER_PTRS_PER_PGD; i++)
#if CONFIG_X86_PAE
pgd_clear(swapper_pg_dir+i);
#else
pgd_val(swapper_pg_dir[i]) = 0;
#endif
flush_tlb_all();
}
......@@ -448,13 +449,23 @@ void __init paging_init(void)
kmap_init();
#endif
{
unsigned int zones_size[3];
zones_size[0] = virt_to_phys((char *)MAX_DMA_ADDRESS)
>> PAGE_SHIFT;
zones_size[1] = max_low_pfn - zones_size[0];
zones_size[2] = highend_pfn - zones_size[0] - zones_size[1];
unsigned int zones_size[MAX_NR_ZONES] = {0, 0, 0};
unsigned int max_dma, high, low;
unsigned int align = (1 << (MAX_ORDER-1))-1;
max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
low = (max_low_pfn + align) & ~align;
high = (highend_pfn + align) & ~align;
if (low < max_dma)
zones_size[ZONE_DMA] = low;
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = low - max_dma;
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = high - low;
#endif
}
free_area_init(zones_size);
}
return;
......@@ -533,15 +544,13 @@ static inline int page_is_ram (unsigned long pagenr)
void __init mem_init(void)
{
int codepages = 0;
int reservedpages = 0;
int datapages = 0;
int initpages = 0;
#ifdef CONFIG_HIGHMEM
int codesize, reservedpages, datasize, initsize;
int tmp;
if (!mem_map)
BUG();
#ifdef CONFIG_HIGHMEM
highmem_start_page = mem_map + highstart_pfn;
/* cache the highmem_mapnr */
highmem_mapnr = highstart_pfn;
......@@ -557,6 +566,13 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
/*
* Only count reserved RAM pages
*/
if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
reservedpages++;
#ifdef CONFIG_HIGHMEM
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = mem_map + tmp;
......@@ -573,19 +589,23 @@ void __init mem_init(void)
}
totalram_pages += totalhigh_pages;
#endif
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
);
#if CONFIG_X86_PAE
if (!cpu_has_pae)
panic("cannot execute a PAE-enabled kernel on a PAE-incapable CPU!");
panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
#endif
if (boot_cpu_data.wp_works_ok < 0)
test_wp_bit();
......
......@@ -10,6 +10,7 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
......
......@@ -130,7 +130,7 @@
* 3.15 July 2, 1996 -- Added support for Sanyo 3 CD changers
* from Ben Galliart <bgallia@luc.edu> with
* special help from Jeff Lightfoot
* <jeffml@netcom.com>
* <jeffml@pobox.com>
* 3.15a July 9, 1996 -- Improved Sanyo 3 CD changer identification
* 3.16 Jul 28, 1996 -- Fix from Gadi to reduce kernel stack usage for ioctl.
* 3.17 Sep 17, 1996 -- Tweak audio reads for some drives.
......
......@@ -21,7 +21,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_SOUND
void soundcore_init(void);
......
......@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/netlink.h>
extern int plip_init(void);
extern int mkiss_init_ctrl_dev(void);
extern int ppp_init(void);
extern int slip_init_ctrl_dev(void);
......
......@@ -35,7 +35,7 @@ if [ "$CONFIG_PARPORT" != "n" ]; then
else
define_tristate CONFIG_PARPORT_ATARI n
fi
if [ "$CONFIG_SBUS" = "y" ]; then
if [ "$CONFIG_SBUS" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
dep_tristate ' Sparc hardware (EXPERIMENTAL)' CONFIG_PARPORT_SUNBPP $CONFIG_PARPORT
else
define_tristate CONFIG_PARPORT_SUNBPP n
......
......@@ -25,7 +25,6 @@
$Header: /vger/u4/cvs/linux/drivers/scsi/hosts.h,v 1.6 1997/01/19 23:07:13 davem Exp $
*/
#include <linux/config.h>
#include <linux/proc_fs.h>
/* It is senseless to set SG_ALL any higher than this - the performance
......
#
# USB device configuration
#
# NOTE NOTE NOTE! This is still considered extremely experimental.
# Right now hubs, mice and keyboards work - at least with UHCI.
# But that may be more a lucky coincidence than anything else..
#
mainmenu_option next_comment
comment 'USB drivers - not for the faint of heart'
comment 'Support for USB'
tristate 'Support for USB (EXPERIMENTAL!)' CONFIG_USB
tristate 'Support for USB' CONFIG_USB
if [ ! "$CONFIG_USB" = "n" ]; then
comment 'USB Controllers'
dep_tristate ' UHCI (Intel PIIX4 and others) support' CONFIG_USB_UHCI \
......
This diff is collapsed.
......@@ -674,39 +674,6 @@ fbmem_init(void)
fb_drivers[i].init();
}
int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
const struct fb_info *fb_info)
{
#if 0
/*
* long long divisions .... $#%%#$
*/
unsigned long long hpicos, vpicos;
const unsigned long long _1e12 = 1000000000000ULL;
const struct fb_monspecs *monspecs = &fb_info->monspecs;
hpicos = (unsigned long long)htotal*(unsigned long long)pixclock;
vpicos = (unsigned long long)vtotal*(unsigned long long)hpicos;
if (!vpicos)
return 0;
if (monspecs->hfmin == 0)
return 1;
if (hpicos*monspecs->hfmin > _1e12 || hpicos*monspecs->hfmax < _1e12 ||
vpicos*monspecs->vfmin > _1e12 || vpicos*monspecs->vfmax < _1e12)
return 0;
#endif
return 1;
}
int fbmon_dpms(const struct fb_info *fb_info)
{
return fb_info->monspecs.dpms;
}
/*
* Command line options
*/
......
/*
* linux/drivers/video/fbmon.c
*
* Copyright (C) 1999 James Simmons
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
* Notes:
* This code handles the different types of monitors that are out their.
* Most video cards for example can support a mode like 800x600 but fix
* frequency monitors can't. So the code here checks if the monitor can
* support the mode as well as the card can. Fbmonospecs takes on
* different meaning with different types of monitors. For multifrequency
* monitors fbmonospecs represents the range of frequencies the monitor
* can support. Only one fbmonospec needs to be allocated. The fbmonospecs
* pointer in fb_info points to this one. If you specific a mode that has
* timing greater than the allowed range then setting the video mode will
* fail. With multifrequency monitors you can set any mode you like as long
* as you have a programmable clock on the video card.
* With fixed frequency monitors you have only a SET of very narrow
* allowed frequency ranges. So for a fixed fequency monitor you have a
* array of fbmonospecs. The fbmonospecs in fb_info represents the
* monitor frequency for the CURRENT mode. If you change the mode and ask
* for fbmonospecs you will NOT get the same values as before. Note this
* is not true for multifrequency monitors where you do get the same
* fbmonospecs each time. Also the values in each fbmonospecs represent the
* very narrow frequency band for range. Well you can't have exactly the
* same frequencies from fixed monitor. So some tolerance is excepted.
* By DEFAULT all monitors are assumed fixed frequency since they are so
* easy to fry or screw up a mode with. Just try setting a 800x600 mode on
* one. After you boot you can run a simple program the tells what kind of
* monitor you have. If you have a multifrequency monitor then you can set
* any mode size you like as long as your video card has a programmable clock.
* By default also besides assuming you have a fixed frequency monitor it
* assumes the monitor only supports lower modes. This way for example you
* can't set a 1280x1024 mode on a fixed frequency monitor that can only
* support up to 1024x768.
*
*/
#include <linux/fb.h>
int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
const struct fb_info *fb_info)
{
#if 0
/*
* long long divisions .... $#%%#$
*/
unsigned long long hpicos, vpicos;
const unsigned long long _1e12 = 1000000000000ULL;
const struct fb_monspecs *monspecs = &fb_info->monspecs;
hpicos = (unsigned long long)htotal*(unsigned long long)pixclock;
vpicos = (unsigned long long)vtotal*(unsigned long long)hpicos;
if (!vpicos)
return 0;
if (monspecs->hfmin == 0)
return 1;
if (hpicos*monspecs->hfmin > _1e12 || hpicos*monspecs->hfmax < _1e12 ||
vpicos*monspecs->vfmin > _1e12 || vpicos*monspecs->vfmax < _1e12)
return 0;
#endif
return 1;
}
int fbmon_dpms(const struct fb_info *fb_info)
{
return fb_info->monspecs->dpms;
}
......@@ -12,14 +12,14 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'ADFS filesystem support (read only) (EXPERIMENTAL)' CONFIG_ADFS_FS
fi
tristate 'Amiga FFS filesystem support' CONFIG_AFFS_FS
tristate 'Apple Macintosh filesystem support (EXPERIMENTAL)' CONFIG_HFS_FS
# msdos filesystems
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'Apple Macintosh filesystem support (EXPERIMENTAL)' CONFIG_HFS_FS
tristate 'BFS filesystem (read only) support (EXPERIMENTAL)' CONFIG_BFS_FS
if [ "$CONFIG_BFS_FS" != "n" ]; then
bool ' BFS filesystem write support (DANGEROUS)' CONFIG_BFS_FS_WRITE
fi
fi
# msdos filesystems
tristate 'DOS FAT fs support' CONFIG_FAT_FS
dep_tristate ' MSDOS fs support' CONFIG_MSDOS_FS $CONFIG_FAT_FS
dep_tristate ' UMSDOS: Unix-like filesystem on top of standard MSDOS filesystem' CONFIG_UMSDOS_FS $CONFIG_MSDOS_FS
......
......@@ -27,7 +27,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(int fd);
......
......@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/config.h>
......
......@@ -35,7 +35,7 @@
#include <linux/highmem.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_KMOD
......
......@@ -522,7 +522,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (!page)
goto read_really;
ctl.cache = cache = (union ncp_dir_cache *) page_address(page);
ctl.cache = cache = (union ncp_dir_cache *) kmap(page);
ctl.head = cache->head;
if (!Page_Uptodate(page) || !ctl.head.eof)
......@@ -550,10 +550,10 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
ctl.page = ncp_get_cache_page(inode, ctl.ofs, 1);
if (!ctl.page)
goto invalid_cache;
ctl.cache = (union ncp_dir_cache *)
kmap(ctl.page);
if (!Page_Uptodate(ctl.page))
goto invalid_cache;
ctl.cache = (union ncp_dir_cache *)
page_address(ctl.page);
}
while (ctl.idx < NCP_DIRCACHE_SIZE) {
struct dentry *dent;
......@@ -575,6 +575,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
goto finished;
}
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
UnlockPage(ctl.page);
page_cache_release(ctl.page);
......@@ -585,6 +586,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
}
invalid_cache:
if (ctl.page) {
kunmap(ctl.page);
UnlockPage(ctl.page);
page_cache_release(ctl.page);
ctl.page = NULL;
......@@ -614,12 +616,14 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
ctl.head.eof = ctl.valid;
finished:
if (page) {
kunmap(page);
cache->head = ctl.head;
SetPageUptodate(page);
UnlockPage(page);
page_cache_release(page);
}
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
UnlockPage(ctl.page);
page_cache_release(ctl.page);
......@@ -680,6 +684,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
if (ctl.idx >= NCP_DIRCACHE_SIZE) {
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
UnlockPage(ctl.page);
page_cache_release(ctl.page);
......@@ -690,7 +695,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
ctl.page = ncp_get_cache_page(inode, ctl.ofs, 0);
if (ctl.page)
ctl.cache = (union ncp_dir_cache *)
page_address(ctl.page);
kmap(ctl.page);
}
if (ctl.cache) {
ctl.cache->dentry[ctl.idx] = newdent;
......
......@@ -43,10 +43,11 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
int bufsize;
int pos;
page = alloc_page(GFP_KERNEL);
page = alloc_page(GFP_HIGHMEM); /* ncpfs has nothing against GFP_HIGHMEM
as long as recvmsg and memset works on it */
if (!page)
return page;
pg_addr = page_address(page);
pg_addr = kmap(page);
address &= PAGE_MASK;
pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
......@@ -87,6 +88,7 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
if (already_read < PAGE_SIZE)
memset((char*)(pg_addr + already_read), 0,
PAGE_SIZE - already_read);
kunmap(page);
return page;
}
......
......@@ -346,8 +346,9 @@ static struct page *try_to_get_dirent_page(struct file *file, __u32 cookie, int
goto repeat;
}
kmap(page);
rd_args.fh = NFS_FH(dentry);
rd_res.buffer = (char *)page_address(page_cache);
rd_res.buffer = (char *)page_address(page);
rd_res.bufsiz = PAGE_CACHE_SIZE;
rd_res.cookie = *cookiep;
do {
......@@ -365,6 +366,8 @@ static struct page *try_to_get_dirent_page(struct file *file, __u32 cookie, int
goto error;
SetPageUptodate(page);
unmap_out:
kunmap(page);
unlock_out:
UnlockPage(page);
out:
......@@ -372,7 +375,7 @@ static struct page *try_to_get_dirent_page(struct file *file, __u32 cookie, int
error:
SetPageError(page);
goto unlock_out;
goto unmap_out;
}
/* Seek up to dirent assosciated with the passed in cookie,
......@@ -438,8 +441,10 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (!Page_Uptodate(page))
goto dirent_read_error;
success:
kmap(page);
filp->f_pos = nfs_do_filldir((__u32 *) page_address(page),
filp->f_pos, dirent, filldir);
kunmap(page);
page_cache_release(page);
return 0;
......
......@@ -171,7 +171,8 @@ static int nfs_write_one_page(struct file *file, struct page *page, unsigned lon
{
long status;
bytes -= copy_from_user((u8*)page_address(page) + offset, buf, bytes);
bytes -= copy_from_user((u8*)kmap(page) + offset, buf, bytes);
kunmap(page);
status = -EFAULT;
if (bytes) {
lock_kernel();
......
......@@ -71,7 +71,7 @@ nfs_readpage_sync(struct dentry *dentry, struct inode *inode, struct page *page)
{
struct nfs_rreq rqst;
unsigned long offset = page->index << PAGE_CACHE_SHIFT;
char *buffer = (char *) page_address(page);
char *buffer;
int rsize = NFS_SERVER(inode)->rsize;
int result, refresh = 0;
int count = PAGE_SIZE;
......@@ -79,6 +79,12 @@ nfs_readpage_sync(struct dentry *dentry, struct inode *inode, struct page *page)
dprintk("NFS: nfs_readpage_sync(%p)\n", page);
/*
* This works now because the socket layer never tries to DMA
* into this buffer directly.
*/
buffer = (char *) kmap(page);
do {
if (count < rsize)
rsize = count;
......@@ -116,6 +122,7 @@ nfs_readpage_sync(struct dentry *dentry, struct inode *inode, struct page *page)
result = 0;
io_error:
kunmap(page);
UnlockPage(page);
/* Note: we don't refresh if the call returned error */
if (refresh && result >= 0)
......@@ -152,6 +159,7 @@ nfs_readpage_result(struct rpc_task *task)
fail++;
dprintk("NFS: %d successful reads, %d failures\n", succ, fail);
}
kunmap(page);
UnlockPage(page);
free_page(address);
......@@ -163,7 +171,7 @@ static inline int
nfs_readpage_async(struct dentry *dentry, struct inode *inode,
struct page *page)
{
unsigned long address = page_address(page);
unsigned long address;
struct nfs_rreq *req;
int result = -1, flags;
......@@ -177,6 +185,7 @@ nfs_readpage_async(struct dentry *dentry, struct inode *inode,
if (!req)
goto out_defer;
address = kmap(page);
/* Initialize request */
/* N.B. Will the dentry remain valid for life of request? */
nfs_readreq_setup(req, NFS_FH(dentry), page->index << PAGE_CACHE_SHIFT,
......@@ -200,6 +209,7 @@ nfs_readpage_async(struct dentry *dentry, struct inode *inode,
goto out;
out_free:
dprintk("NFS: failed to enqueue async READ request.\n");
kunmap(page);
kfree(req);
goto out;
}
......@@ -254,7 +264,7 @@ nfs_readpage(struct file *file, struct page *page)
out_error:
UnlockPage(page);
out_free:
free_page(page_address(page));
__free_page(page);
out:
unlock_kernel();
return error;
......
......@@ -80,6 +80,8 @@ static struct page *try_to_get_symlink_page(struct dentry *dentry, struct inode
goto repeat;
}
kmap(page);
/* We place the length at the beginning of the page,
* in host byte order, followed by the string. The
* XDR response verification will NULL terminate it.
......@@ -91,6 +93,7 @@ static struct page *try_to_get_symlink_page(struct dentry *dentry, struct inode
goto error;
SetPageUptodate(page);
unlock_out:
kunmap(page);
UnlockPage(page);
out:
return page;
......@@ -113,11 +116,12 @@ static int nfs_readlink(struct dentry *dentry, char *buffer, int buflen)
if (!Page_Uptodate(page))
goto readlink_read_error;
success:
p = (u32 *) page_address(page);
p = (u32 *) kmap(page);
len = *p++;
if (len > buflen)
len = buflen;
copy_to_user(buffer, p, len);
kunmap(page);
page_cache_release(page);
return len;
......@@ -148,8 +152,9 @@ nfs_follow_link(struct dentry *dentry, struct dentry *base, unsigned int follow)
if (!Page_Uptodate(page))
goto followlink_read_error;
success:
p = (u32 *) page_address(page);
p = (u32 *) kmap(page);
result = lookup_dentry((char *) (p + 1), base, follow);
kunmap(page);
page_cache_release(page);
return result;
......
......@@ -99,7 +99,7 @@ nfs_writepage_sync(struct dentry *dentry, struct inode *inode,
dentry->d_parent->d_name.name, dentry->d_name.name,
count, page->index, offset);
buffer = (u8 *) page_address(page) + offset;
buffer = (u8 *) kmap(page) + offset;
offset += page->index << PAGE_CACHE_SHIFT;
do {
......@@ -132,6 +132,7 @@ nfs_writepage_sync(struct dentry *dentry, struct inode *inode,
} while (count);
io_error:
kunmap(page);
/* Note: we don't refresh if the call failed (fattr invalid) */
if (refresh && result >= 0) {
/* See comments in nfs_wback_result */
......@@ -314,6 +315,7 @@ create_write_request(struct file * file, struct page *page, unsigned int offset,
wreq->wb_bytes = bytes;
wreq->wb_count = 2; /* One for the IO, one for us */
kmap(page);
append_write_request(&NFS_WRITEBACK(inode), wreq);
if (nr_write_requests++ > NFS_WRITEBACK_MAX*3/4)
......@@ -687,6 +689,7 @@ nfs_wback_result(struct rpc_task *task)
if (WB_INVALIDATE(req))
ClearPageUptodate(page);
kunmap(page);
__free_page(page);
remove_write_request(&NFS_WRITEBACK(inode), req);
nr_write_requests--;
......
......@@ -277,10 +277,6 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
return 1; /* halt enum */
}
}
else
{
udf_debug("size=%d, nf_pos=%d, liu=%d, lfi=%d\n", size, nf_pos, liu, lfi);
}
}
} /* end while */
......
......@@ -84,7 +84,7 @@ Uint8 * udf_filead_read(struct inode *dir, Uint8 *tmpad, Uint8 ad_size,
}
struct FileIdentDesc *
udf_fileident_read(struct inode *dir, int *nf_pos,
udf_fileident_read(struct inode *dir, loff_t *nf_pos,
struct udf_fileident_bh *fibh,
struct FileIdentDesc *cfi,
lb_addr *bloc, Uint32 *extoffset,
......
......@@ -201,8 +201,8 @@ struct buffer_head * udf_expand_adinicb(struct inode *inode, int *block, int isd
if (isdir)
{
struct udf_fileident_bh sfibh, dfibh;
int f_pos = UDF_I_EXT0OFFS(inode) >> 2;
int size = (UDF_I_EXT0OFFS(inode) + inode->i_size) >> 2;
loff_t f_pos = UDF_I_EXT0OFFS(inode) >> 2;
loff_t size = (UDF_I_EXT0OFFS(inode) + inode->i_size) >> 2;
struct FileIdentDesc cfi, *sfi, *dfi;
sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
......
......@@ -123,7 +123,7 @@ udf_get_last_block(kdev_t dev, int *flags)
BLKGETSIZE,
(unsigned long) &lblock);
if (!ret && block != 0x7FFFFFFF) /* Hard Disk */
if (!ret && lblock != 0x7FFFFFFF) /* Hard Disk */
{
if (mult)
lblock *= mult;
......
......@@ -25,6 +25,8 @@
*
*/
#include "udfdecl.h"
#if defined(__linux__) && defined(__KERNEL__)
#include <linux/version.h>
#include "udf_i.h"
......@@ -36,8 +38,6 @@
#include <linux/udf_fs.h>
#endif
#include "udfdecl.h"
static inline int udf_match(int len, const char * const name, struct qstr *qs)
{
if (len != qs->len)
......@@ -333,10 +333,10 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
struct ustr unifilename;
char name[UDF_NAME_LEN], fname[UDF_NAME_LEN];
int namelen;
int f_pos;
loff_t f_pos;
int flen;
char *nameptr;
int size = (UDF_I_EXT0OFFS(dir) + dir->i_size) >> 2;
loff_t size = (UDF_I_EXT0OFFS(dir) + dir->i_size) >> 2;
int nfidlen;
Uint8 lfi;
Uint16 liu;
......@@ -825,8 +825,8 @@ static int empty_dir(struct inode *dir)
{
struct FileIdentDesc *fi, cfi;
struct udf_fileident_bh fibh;
int f_pos;
int size = (UDF_I_EXT0OFFS(dir) + dir->i_size) >> 2;
loff_t f_pos;
loff_t size = (UDF_I_EXT0OFFS(dir) + dir->i_size) >> 2;
int block;
lb_addr bloc, eloc;
Uint32 extoffset, elen, offset;
......
......@@ -184,7 +184,7 @@ extern int udf_sync_file(struct file *, struct dentry *);
/* directory.c */
extern Uint8 * udf_filead_read(struct inode *, Uint8 *, Uint8, lb_addr, int *, int *, struct buffer_head **, int *);
extern struct FileIdentDesc * udf_fileident_read(struct inode *, int *, struct udf_fileident_bh *, struct FileIdentDesc *, lb_addr *, Uint32 *, Uint32 *, struct buffer_head **);
extern struct FileIdentDesc * udf_fileident_read(struct inode *, loff_t *, struct udf_fileident_bh *, struct FileIdentDesc *, lb_addr *, Uint32 *, Uint32 *, struct buffer_head **);
#endif /* __KERNEL__ */
......
......@@ -37,7 +37,20 @@ extern pte_t *pkmap_page_table;
extern void kmap_init(void) __init;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#define PKMAP_BASE (0xff000000UL)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern unsigned long FASTCALL(kmap_high(struct page *page));
extern void FASTCALL(kunmap_high(struct page *page));
......
......@@ -103,7 +103,6 @@ __OUTS(l)
#ifdef __KERNEL__
#include <asm/page.h>
#include <linux/vmalloc.h>
/*
......
#ifndef _I386_PGALLOC_2LEVEL_H
#define _I386_PGALLOC_2LEVEL_H
/*
* traditional i386 two-level paging, page table allocation routines:
*/
extern __inline__ pmd_t *get_pmd_fast(void)
{
return (pmd_t *)0;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
if (!pgd)
BUG();
return (pmd_t *) pgd;
}
#endif /* _I386_PGALLOC_2LEVEL_H */
#ifndef _I386_PGALLOC_3LEVEL_H
#define _I386_PGALLOC_3LEVEL_H
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs. Page-table allocation routines.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
extern __inline__ pmd_t *get_pmd_slow(void)
{
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL);
if (ret)
memset(ret, 0, PAGE_SIZE);
return ret;
}
extern __inline__ pmd_t *get_pmd_fast(void)
{
unsigned long *ret;
if ((ret = pmd_quicklist) != NULL) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pmd_slow();
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
pgtable_cache_size++;
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
if (!pgd)
BUG();
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = get_pmd_fast();
if (!page)
page = get_pmd_slow();
if (page) {
if (pgd_none(*pgd)) {
pgd_val(*pgd) = 1 + __pa(page);
__flush_tlb();
return page + address;
} else
free_pmd_fast(page);
} else
return NULL;
}
return (pmd_t *)pgd_page(*pgd) + address;
}
#endif /* _I386_PGALLOC_3LEVEL_H */
#ifndef _I386_PGALLOC_H
#define _I386_PGALLOC_H
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
#define pgd_quicklist (current_cpu_data.pgd_quick)
#define pmd_quicklist (current_cpu_data.pmd_quick)
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
#if CONFIG_X86_PAE
# include <asm/pgalloc-3level.h>
#else
# include <asm/pgalloc-2level.h>
#endif
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
extern __inline__ pgd_t *get_pgd_slow(void)
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
#if CONFIG_X86_PAE
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i++)
__pgd_clear(ret + i);
#else
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
#endif
memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern __inline__ pte_t *get_pte_fast(void)
{
unsigned long *ret;
if((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
extern __inline__ void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
if (!pmd)
BUG();
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t * page = (pte_t *) get_pte_fast();
if (!page)
return get_pte_kernel_slow(pmd, address);
pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
return page + address;
}
if (pmd_bad(*pmd)) {
__handle_bad_pmd_kernel(pmd);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd))
goto getnew;
if (pmd_bad(*pmd))
goto fix;
return (pte_t *)pmd_page(*pmd) + address;
getnew:
{
unsigned long page = (unsigned long) get_pte_fast();
if (!page)
return get_pte_slow(pmd, address);
pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
return (pte_t *)page + address;
}
fix:
__handle_bad_pmd(pmd);
return NULL;
}
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
* (In the PAE case we free the page.)
*/
#define pmd_free(pmd) free_pmd_slow(pmd)
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
read_lock(&tasklist_lock);
for_each_task(p) {
if (!p->mm)
continue;
*pgd_offset(p->mm,address) = entry;
}
read_unlock(&tasklist_lock);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#else
/* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
modify pgd caches of other CPUs as well. -jj */
for (i = 0; i < NR_CPUS; i++)
for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
}
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#ifndef __SMP__
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb()
#define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm == current->active_mm)
__flush_tlb();
}
#else
/*
* We aren't very clever about this yet - SMP could certainly
* avoid some global flushes..
*/
#include <asm/smp.h>
#define local_flush_tlb() \
__flush_tlb()
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
#endif
#endif /* _I386_PGALLOC_H */
......@@ -42,19 +42,4 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
return (pmd_t *) dir;
}
extern __inline__ pmd_t *get_pmd_fast(void)
{
return (pmd_t *)0;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
if (!pgd)
BUG();
return (pmd_t *) pgd;
}
#endif /* _I386_PGTABLE_2LEVEL_H */
......@@ -27,11 +27,11 @@
#define PTRS_PER_PTE 512
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
printk("%s:%d: bad pte %p(%016Lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016Lx.\n", __FILE__, __LINE__, pmd_val(e))
printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016Lx.\n", __FILE__, __LINE__, pgd_val(e))
printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
/*
* Subtle, in PAE mode we cannot have zeroes in the top level
......@@ -64,61 +64,4 @@ extern inline void pgd_clear (pgd_t * pgd)
#define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
__pmd_offset(address))
extern __inline__ pmd_t *get_pmd_slow(void)
{
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL);
if (ret)
memset(ret, 0, PAGE_SIZE);
return ret;
}
extern __inline__ pmd_t *get_pmd_fast(void)
{
unsigned long *ret;
if ((ret = pmd_quicklist) != NULL) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pmd_slow();
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
pgtable_cache_size++;
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
if (!pgd)
BUG();
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
pmd_t *page = get_pmd_fast();
if (!page)
page = get_pmd_slow();
if (page) {
if (pgd_none(*pgd)) {
pgd_val(*pgd) = 1 + __pa(page);
__flush_tlb();
return page + address;
} else
free_pmd_fast(page);
} else
return NULL;
}
return (pmd_t *)pgd_page(*pgd) + address;
}
#endif /* _I386_PGTABLE_3LEVEL_H */
......@@ -27,19 +27,6 @@ extern pgd_t swapper_pg_dir[1024];
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
......@@ -49,65 +36,9 @@ do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3"
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#endif
#ifndef __SMP__
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb()
#define local_flush_tlb() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm == current->active_mm)
__flush_tlb();
}
#else
/*
* We aren't very clever about this yet - SMP could certainly
* avoid some global flushes..
*/
#include <asm/smp.h>
#define local_flush_tlb() \
__flush_tlb()
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
#endif
#endif /* !__ASSEMBLY__ */
#define pgd_quicklist (current_cpu_data.pgd_quick)
#define pmd_quicklist (current_cpu_data.pmd_quick)
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the
......@@ -247,7 +178,8 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
......@@ -277,14 +209,14 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pt
* and a page entry and page directory to the page they refer to.
*/
extern inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = (page-mem_map)*(unsigned long long)PAGE_SIZE +
pgprot_val(pgprot);
return __pte;
}
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
\
pte_val(__pte) = ((page)-mem_map)*(unsigned long long)PAGE_SIZE + \
pgprot_val(pgprot); \
__pte; \
})
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) \
......@@ -316,183 +248,11 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
__pte_offset(address))
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
extern __inline__ pgd_t *get_pgd_slow(void)
{
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
#if 0
/*
* On PAE allocating a whole page is overkill - we will
* either embedd this in mm_struct, or do a SLAB cache.
*/
memcpy(ret, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
#endif
#if CONFIG_X86_PAE
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i++)
__pgd_clear(ret + i);
#else
memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
#endif
memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return ret;
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern __inline__ pte_t *get_pte_fast(void)
{
unsigned long *ret;
if((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
extern __inline__ void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
#define pte_free_kernel(pte) free_pte_slow(pte)
#define pte_free(pte) free_pte_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
#define pgd_alloc() get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
if (!pmd)
BUG();
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t * page = (pte_t *) get_pte_fast();
if (!page)
return get_pte_kernel_slow(pmd, address);
pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
return page + address;
}
if (pmd_bad(*pmd)) {
__handle_bad_pmd_kernel(pmd);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd))
goto getnew;
if (pmd_bad(*pmd))
goto fix;
return (pte_t *)pmd_page(*pmd) + address;
getnew:
{
unsigned long page = (unsigned long) get_pte_fast();
if (!page)
return get_pte_slow(pmd, address);
pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
return (pte_t *)page + address;
}
fix:
__handle_bad_pmd(pmd);
return NULL;
}
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
* (In the PAE case we free the page.)
*/
#define pmd_free(pmd) free_pmd_slow(pmd)
#define pmd_free_kernel pmd_free
#define pmd_alloc_kernel pmd_alloc
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
pgd_t *pgd;
#ifdef __SMP__
int i;
#endif
read_lock(&tasklist_lock);
for_each_task(p) {
if (!p->mm)
continue;
*pgd_offset(p->mm,address) = entry;
}
read_unlock(&tasklist_lock);
#ifndef __SMP__
for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#else
/* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
modify pgd caches of other CPUs as well. -jj */
for (i = 0; i < NR_CPUS; i++)
for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
pgd[address >> PGDIR_SHIFT] = entry;
#endif
}
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
}
#define update_mmu_cache(vma,address,pte) do { } while (0)
/* Encode and de-code a swap entry */
#define SWP_TYPE(x) (((x).val >> 1) & 0x3f)
......
......@@ -3,6 +3,7 @@
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <linux/init.h>
/*
* simple boot-time physical memory area allocator.
......
......@@ -133,13 +133,13 @@ int do_reset_coda_cache_inv_stats( ctl_table * table, int write,
/* these functions are called to form the content of /proc/fs/coda/... files */
int coda_vfs_stats_get_info( char * buffer, char ** start, off_t offset,
int length, int dummy );
int length);
int coda_upcall_stats_get_info( char * buffer, char ** start, off_t offset,
int length, int dummy );
int length);
int coda_permission_stats_get_info( char * buffer, char ** start, off_t offset,
int length, int dummy );
int length);
int coda_cache_inv_stats_get_info( char * buffer, char ** start, off_t offset,
int length, int dummy );
int length);
#endif /* _CODA_PROC_H */
......@@ -408,18 +408,19 @@ extern int fbgen_switch(int con, struct fb_info *info);
extern void fbgen_blank(int blank, struct fb_info *info);
/* drivers/char/fbmem.c */
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(const struct fb_info *fb_info);
extern int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
const struct fb_info *fb_info);
extern int fbmon_dpms(const struct fb_info *fb_info);
extern int num_registered_fb;
extern struct fb_info *registered_fb[FB_MAX];
extern char con2fb_map[MAX_NR_CONSOLES];
/* drivers/video/fbmon.c */
extern int fbmon_valid_timings(u_int pixclock, u_int htotal, u_int vtotal,
const struct fb_info *fb_info);
extern int fbmon_dpms(const struct fb_info *fb_info);
/* drivers/video/fbcon.c */
extern struct display fb_display[MAX_NR_CONSOLES];
......
......@@ -2,8 +2,7 @@
#define _LINUX_HIGHMEM_H
#include <linux/config.h>
#include <linux/pagemap.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_HIGHMEM
......@@ -24,7 +23,7 @@ extern struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig);
extern inline unsigned int nr_free_highpages(void) { return 0; }
#define prepare_highmem_swapout(page) page
#define replace_with_highmem(page) page
#define kmap(page) page_address(page)
#define kmap(page) __page_address(page)
#define kunmap(page) do { } while (0)
#endif /* CONFIG_HIGHMEM */
......
......@@ -16,6 +16,7 @@ extern void * high_memory;
extern int page_cluster;
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/atomic.h>
/*
......@@ -118,6 +119,8 @@ typedef struct {
unsigned long val;
} swp_entry_t;
struct zone_struct;
/*
* Try to keep the most commonly accessed fields in single cache lines
* here (16 bytes or greater). This ordering should be particularly
......@@ -127,7 +130,6 @@ typedef struct {
* is used for linear searches (eg. clock algorithm scans).
*/
typedef struct page {
/* these must be first (free area handling) */
struct list_head list;
struct address_space *mapping;
unsigned long index;
......@@ -139,6 +141,7 @@ typedef struct page {
struct page **pprev_hash;
struct buffer_head * buffers;
unsigned long virtual; /* nonzero if kmapped */
struct zone_struct *zone;
} mem_map_t;
#define get_page(p) atomic_inc(&(p)->count)
......@@ -283,19 +286,113 @@ typedef struct page {
extern mem_map_t * mem_map;
/*
* This is timing-critical - most of the time in getting a new page
* goes to clearing the page. If you want a page without the clearing
* overhead, just use __get_free_page() directly..
* Free memory management - zoned buddy allocator.
*/
#if CONFIG_AP1000
/* the AP+ needs to allocate 8MB contiguous, aligned chunks of ram
for the ring buffers */
#define MAX_ORDER 12
#else
#define MAX_ORDER 10
#endif
typedef struct free_area_struct {
struct list_head free_list;
unsigned int * map;
} free_area_t;
typedef struct zone_struct {
/*
* Commonly accessed fields:
*/
spinlock_t lock;
unsigned long offset;
unsigned long free_pages;
int low_on_memory;
unsigned long pages_low, pages_high;
/*
* free areas of different sizes
*/
free_area_t free_area[MAX_ORDER];
/*
* rarely used fields:
*/
char * name;
unsigned long size;
} zone_t;
#define ZONE_DMA 0
#define ZONE_NORMAL 1
#define ZONE_HIGHMEM 2
/*
* NUMA architectures will have more:
*/
#define MAX_NR_ZONES 3
/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
* priority. On NUMA we want to fall back on other CPU's zones
* as well.
*
* We have two allocation namespaces - the *get*page*() variants
* return virtual kernel addresses to the allocated page(s), the
* alloc_page*() variants return 'struct page *'.
* Right now a zonelist takes up less than a cacheline. We never
* modify it apart from boot-up, and only a few indices are used,
* so despite the zonelist table being relatively big, the cache
* footprint of this construct is very small.
*/
typedef struct zonelist_struct {
zone_t * zones [MAX_NR_ZONES+1]; // NULL delimited
int gfp_mask;
} zonelist_t;
#define NR_GFPINDEX 0x100
extern zonelist_t zonelists [NR_GFPINDEX];
/*
* There is only one page-allocator function, and two main namespaces to
* it. The alloc_page*() variants return 'struct page *' and as such
* can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s).
*/
#define __get_free_page(gfp_mask) __get_free_pages((gfp_mask),0)
#define __get_dma_pages(gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order))
extern unsigned long FASTCALL(__get_free_pages(int gfp_mask, unsigned long order));
extern struct page * FASTCALL(alloc_pages(int gfp_mask, unsigned long order));
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
extern struct page * FASTCALL(__alloc_pages(zonelist_t *zonelist, unsigned long order));
extern inline struct page * alloc_pages(int gfp_mask, unsigned long order)
{
/* temporary check. */
if (zonelists[gfp_mask].gfp_mask != (gfp_mask))
BUG();
/*
* Gets optimized away by the compiler.
*/
if (order >= MAX_ORDER)
return NULL;
return __alloc_pages(zonelists+(gfp_mask), order);
}
#define alloc_page(gfp_mask) \
alloc_pages(gfp_mask, 0)
extern inline unsigned long __get_free_pages (int gfp_mask, unsigned long order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
if (!page)
return 0;
return __page_address(page);
}
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask),0)
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
extern inline unsigned long get_zeroed_page(int gfp_mask)
{
......@@ -312,11 +409,29 @@ extern inline unsigned long get_zeroed_page(int gfp_mask)
*/
#define get_free_page get_zeroed_page
/* memory.c & swap.c*/
/*
* There is only one 'core' page-freeing function.
*/
extern void FASTCALL(__free_pages_ok(struct page * page, unsigned long order));
extern inline void __free_pages(struct page *page, unsigned long order)
{
if (!put_page_testzero(page))
return;
__free_pages_ok(page, order);
}
#define __free_page(page) __free_pages(page, 0)
extern inline void free_pages(unsigned long addr, unsigned long order)
{
unsigned long map_nr = MAP_NR(addr);
if (map_nr < max_mapnr)
__free_pages(mem_map + map_nr, order);
}
#define free_page(addr) free_pages((addr),0)
extern int FASTCALL(free_pages(unsigned long addr, unsigned long order));
extern int FASTCALL(__free_page(struct page *));
extern void show_free_areas(void);
extern struct page * put_dirty_page(struct task_struct * tsk, struct page *page,
......@@ -398,7 +513,7 @@ extern void put_cached_page(unsigned long);
#define GFP_DMA __GFP_DMA
/* Flag - indicates that the buffer can be taken from high memory which is not
directly addressable by the kernel */
permanently mapped by the kernel */
#define GFP_HIGHMEM __GFP_HIGHMEM
......@@ -447,7 +562,6 @@ extern struct vm_area_struct *find_extend_vma(struct task_struct *tsk, unsigned
#define vmlist_modify_lock(mm) vmlist_access_lock(mm)
#define vmlist_modify_unlock(mm) vmlist_access_unlock(mm)
#endif /* __KERNEL__ */
#endif
......@@ -13,6 +13,7 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <linux/highmem.h>
/*
* The page cache can done in larger chunks than
......
......@@ -479,7 +479,6 @@ asmlinkage void __init start_kernel(void)
size = prof_len * sizeof(unsigned int) + PAGE_SIZE-1;
prof_buffer = (unsigned int *) alloc_bootmem(size);
memset(prof_buffer, 0, size);
}
kmem_cache_init();
......
......@@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
......
......@@ -92,10 +92,9 @@ EXPORT_SYMBOL(exit_fs);
EXPORT_SYMBOL(exit_sighand);
/* internal kernel memory management */
EXPORT_SYMBOL(__get_free_pages);
EXPORT_SYMBOL(free_pages);
EXPORT_SYMBOL(alloc_pages);
EXPORT_SYMBOL(__free_page);
EXPORT_SYMBOL(__alloc_pages);
EXPORT_SYMBOL(__free_pages_ok);
EXPORT_SYMBOL(zonelists);
EXPORT_SYMBOL(kmem_find_general_cachep);
EXPORT_SYMBOL(kmem_cache_create);
EXPORT_SYMBOL(kmem_cache_destroy);
......
......@@ -4,7 +4,7 @@
#include <asm/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/init.h>
/*
......
......@@ -178,11 +178,11 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned
last_offset = offset+size;
ret = phys_to_virt(last_pos*PAGE_SIZE + offset);
} else {
size -= remaining_size;
areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
remaining_size = size - remaining_size;
areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
ret = phys_to_virt(last_pos*PAGE_SIZE + offset);
last_pos = start+areasize-1;
last_offset = size;
last_offset = remaining_size;
}
last_offset &= ~PAGE_MASK;
} else {
......@@ -196,7 +196,7 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned
for (i = start; i < start+areasize; i++)
if (test_and_set_bit(i, bootmem_map))
BUG();
memset(ret, 0, size);
return ret;
}
......
......@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <linux/highmem.h>
......
......@@ -70,9 +70,9 @@ struct page * replace_with_highmem(struct page * page)
return page;
}
vaddr = kmap(page);
vaddr = kmap(highpage);
copy_page((void *)vaddr, (void *)page_address(page));
kunmap(page);
kunmap(highpage);
/* Preserve the caching of the swap_entry. */
highpage->index = page->index;
......@@ -87,20 +87,6 @@ struct page * replace_with_highmem(struct page * page)
return highpage;
}
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 2048
#else
#define LAST_PKMAP 4096
#endif
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
......@@ -135,7 +121,7 @@ static void flush_all_zero_pkmaps(void)
pkmap_count[i] = 0;
pte = pkmap_page_table[i];
if (pte_none(pte))
continue;
BUG();
pte_clear(pkmap_page_table+i);
page = pte_page(pte);
page->virtual = 0;
......@@ -167,30 +153,27 @@ static unsigned long map_new_virtual(struct page *page)
current->state = TASK_UNINTERRUPTIBLE;
add_wait_queue(&pkmap_map_wait, &wait);
spin_unlock(&kmap_lock);
// it's not quite possible to saturate the
// pkmap pool right now.
BUG();
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
spin_lock(&kmap_lock);
}
/* Somebody else might have mapped it while we slept */
if (page->virtual)
return page->virtual;
/* Somebody else might have mapped it while we slept */
if (page->virtual)
return page->virtual;
/* Re-start */
count = LAST_PKMAP;
/* Re-start */
count = LAST_PKMAP;
}
}
vaddr = PKMAP_ADDR(last_pkmap_nr);
pkmap_page_table[last_pkmap_nr] = mk_pte(page, kmap_prot);
/*
* Subtle! For some reason if we dont do this TLB flush then
* we get data corruption and weird behavior in dbench runs.
* But invlpg this should not be necessery ... Any ideas?
*/
__flush_tlb_one(vaddr);
pkmap_count[last_pkmap_nr] = 1;
page->virtual = vaddr;
......@@ -201,8 +184,6 @@ unsigned long kmap_high(struct page *page)
{
unsigned long vaddr;
if (!PageHighMem(page))
BUG();
/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
......
......@@ -39,14 +39,14 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/swapctl.h>
#include <linux/iobuf.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
unsigned long max_mapnr = 0;
unsigned long num_physpages = 0;
......
......@@ -14,7 +14,7 @@
#include <linux/file.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
......
......@@ -9,7 +9,7 @@
#include <linux/mman.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static inline void change_pte_range(pmd_t * pmd, unsigned long address,
unsigned long size, pgprot_t newprot)
......
......@@ -11,7 +11,7 @@
#include <linux/swap.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
extern int vm_enough_memory(long pages);
......
This diff is collapsed.
......@@ -1043,20 +1043,12 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep)
int
kmem_cache_shrink(kmem_cache_t *cachep)
{
if (!cachep) {
printk(KERN_ERR "kmem_shrink: NULL ptr\n");
return 2;
}
if (in_interrupt()) {
printk(KERN_ERR "kmem_shrink: Called during int - %s\n", cachep->c_name);
return 2;
}
if (!is_chained_kmem_cache(cachep)) {
printk(KERN_ERR "kmem_shrink: Invalid cache addr %p\n",
cachep);
return 2;
}
if (!cachep)
BUG();
if (in_interrupt())
BUG();
if (!is_chained_kmem_cache(cachep))
BUG();
return __kmem_cache_shrink(cachep);
}
......
......@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
struct vm_struct * vmlist = NULL;
......
......@@ -20,7 +20,7 @@
#include <linux/highmem.h>
#include <linux/file.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
/*
* The swap-out functions return 1 if they successfully
......@@ -505,7 +505,6 @@ int kswapd(void *unused)
allocations (not GFP_HIGHMEM ones). */
if (nr_free_buffer_pages() >= freepages.high)
break;
if (!do_try_to_free_pages(GFP_KSWAPD))
break;
run_task_queue(&tq_disk);
......
......@@ -41,7 +41,7 @@ extern int discovery_proc_read(char *buf, char **start, off_t offset, int len);
struct irda_entry {
char *name;
int (*fn)(char*, char**, off_t, int, int);
int (*fn)(char*, char**, off_t, int);
};
struct proc_dir_entry *proc_irda;
......
......@@ -197,21 +197,21 @@ int __init wanrouter_proc_init (void)
if (!proc_router)
goto fail;
p = proc_create_proc_entry("config",0,proc_router);
p = create_proc_entry("config",0,proc_router);
if (!p)
goto fail_config;
p->ops = &router_inode;
p->info = config_get_info;
p = proc_create_proc_entry("status",0,proc_router);
p->get_info = config_get_info;
p = create_proc_entry("status",0,proc_router);
if (!p)
goto fail_stat;
p->ops = &router_inode;
p->info = status_get_info;
p->get_info = status_get_info;
return 0;
fail_stat:
remove_proc_entry("config", proc_router);
fail_config:
remove_proc_entry(proc_net, ROUTER_NAME);
remove_proc_entry(ROUTER_NAME, proc_net);
fail:
return -ENOMEM;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment