Commit 79425084 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] uninline some things in mm/*.c

Tuned for gcc-2.95.3:

	filemap.c:	10815 -> 10046
	highmem.c:	3392 -> 3104
	mmap.c:		5998 -> 5854
	mremap.c:	3058 -> 2802
	msync.c:	1521 -> 1489
	page_alloc.c:	8487 -> 8167
parent 631709da
......@@ -288,7 +288,7 @@ static int page_cache_read(struct file * file, unsigned long offset)
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static inline wait_queue_head_t *page_waitqueue(struct page *page)
static wait_queue_head_t *page_waitqueue(struct page *page)
{
const struct zone *zone = page_zone(page);
......@@ -758,7 +758,7 @@ static inline int fault_in_pages_writeable(char *uaddr, int size)
return ret;
}
static inline void fault_in_pages_readable(const char *uaddr, int size)
static void fault_in_pages_readable(const char *uaddr, int size)
{
volatile char c;
int ret;
......@@ -1296,7 +1296,7 @@ __grab_cache_page(struct address_space *mapping, unsigned long index,
return page;
}
inline void remove_suid(struct dentry *dentry)
void remove_suid(struct dentry *dentry)
{
struct iattr newattrs;
struct inode *inode = dentry->d_inode;
......@@ -1332,7 +1332,7 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
return left;
}
static inline int
static int
__filemap_copy_from_user_iovec(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
......
......@@ -229,7 +229,7 @@ __initcall(init_emergency_pool);
/*
* highmem version, map in to vec
*/
static inline void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
{
unsigned long flags;
unsigned char *vto;
......@@ -272,7 +272,7 @@ int init_emergency_isa_pool(void)
* queue gfp mask set, *to may or may not be a highmem page. kmap it
* always, it will do the Right Thing
*/
static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
struct bio_vec *tovec, *fromvec;
......@@ -338,7 +338,7 @@ static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
static inline void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
......
......@@ -599,7 +599,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i;
}
static inline void zeromap_pte_range(pte_t * pte, unsigned long address,
static void zeromap_pte_range(pte_t * pte, unsigned long address,
unsigned long size, pgprot_t prot)
{
unsigned long end;
......
......@@ -133,7 +133,7 @@ int vm_enough_memory(long pages)
}
/* Remove one vm structure from the inode's i_mapping address space. */
static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
static void remove_shared_vm_struct(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
......@@ -302,7 +302,7 @@ static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct
}
}
static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
static void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
struct rb_node ** rb_link, struct rb_node * rb_parent)
{
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
......@@ -336,8 +336,9 @@ static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, stru
__vma_link_file(vma);
}
static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
struct rb_node ** rb_link, struct rb_node * rb_parent)
static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
struct address_space *mapping = NULL;
......
......@@ -21,7 +21,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
{
pgd_t * pgd;
pmd_t * pmd;
......@@ -81,7 +81,7 @@ static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr)
return pte;
}
static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
static int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
{
int error = 0;
pte_t pte;
......@@ -170,7 +170,7 @@ static int move_page_tables(struct vm_area_struct *vma,
return -1;
}
static inline unsigned long move_vma(struct vm_area_struct * vma,
static unsigned long move_vma(struct vm_area_struct * vma,
unsigned long addr, unsigned long old_len, unsigned long new_len,
unsigned long new_addr)
{
......
......@@ -39,7 +39,7 @@ static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
return 0;
}
static inline int filemap_sync_pte_range(pmd_t * pmd,
static int filemap_sync_pte_range(pmd_t * pmd,
unsigned long address, unsigned long end,
struct vm_area_struct *vma, unsigned int flags)
{
......
......@@ -51,7 +51,7 @@ static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
/*
* Temporary debugging check for pages not lying within a given zone.
*/
static inline int bad_range(struct zone *zone, struct page *page)
static int bad_range(struct zone *zone, struct page *page)
{
if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
return 1;
......
......@@ -389,7 +389,7 @@ static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
}
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
static void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
unsigned long address, unsigned long size, unsigned long offset,
swp_entry_t entry, struct page* page)
{
......@@ -418,7 +418,7 @@ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
}
/* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
static void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
unsigned long address, unsigned long size,
swp_entry_t entry, struct page* page)
{
......
......@@ -23,7 +23,7 @@
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
struct vm_struct *vmlist;
static inline void unmap_area_pte(pmd_t *pmd, unsigned long address,
static void unmap_area_pte(pmd_t *pmd, unsigned long address,
unsigned long size)
{
unsigned long end;
......@@ -56,7 +56,7 @@ static inline void unmap_area_pte(pmd_t *pmd, unsigned long address,
} while (address < end);
}
static inline void unmap_area_pmd(pgd_t *dir, unsigned long address,
static void unmap_area_pmd(pgd_t *dir, unsigned long address,
unsigned long size)
{
unsigned long end;
......@@ -83,7 +83,7 @@ static inline void unmap_area_pmd(pgd_t *dir, unsigned long address,
} while (address < end);
}
static inline int map_area_pte(pte_t *pte, unsigned long address,
static int map_area_pte(pte_t *pte, unsigned long address,
unsigned long size, pgprot_t prot,
struct page ***pages)
{
......@@ -110,7 +110,7 @@ static inline int map_area_pte(pte_t *pte, unsigned long address,
return 0;
}
static inline int map_area_pmd(pmd_t *pmd, unsigned long address,
static int map_area_pmd(pmd_t *pmd, unsigned long address,
unsigned long size, pgprot_t prot,
struct page ***pages)
{
......
......@@ -193,7 +193,7 @@ static inline int is_page_cache_freeable(struct page *page)
/*
* shrink_list returns the number of reclaimed pages
*/
static /* inline */ int
static int
shrink_list(struct list_head *page_list, unsigned int gfp_mask,
int *max_scan, int *nr_mapped)
{
......@@ -417,7 +417,7 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
* For pagecache intensive workloads, the first loop here is the hottest spot
* in the kernel (apart from the copy_*_user functions).
*/
static /* inline */ int
static int
shrink_cache(const int nr_pages, struct zone *zone,
unsigned int gfp_mask, int max_scan, int *nr_mapped)
{
......@@ -521,7 +521,7 @@ shrink_cache(const int nr_pages, struct zone *zone,
* The downside is that we have to touch page->count against each page.
* But we had to alter page->flags anyway.
*/
static /* inline */ void
static void
refill_inactive_zone(struct zone *zone, const int nr_pages_in,
struct page_state *ps, int priority)
{
......@@ -667,7 +667,7 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
* pages. This is a basic per-zone page freer. Used by both kswapd and
* direct reclaim.
*/
static /* inline */ int
static int
shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
const int nr_pages, int *nr_mapped, struct page_state *ps, int priority)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment