Commit 27496a8c authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

[PATCH] gfp_t: fs/*

 - ->releasepage() annotated (s/int/gfp_t), instances updated
 - missing gfp_t in fs/* added
 - fixed misannotation from the original sweep caught by bitwise checks:
   XFS used __nocast both for gfp_t and for flags used by XFS allocator.
   The latter left with unsigned int __nocast; we might want to add a
   different type for those but for now let's leave them alone.  That,
   BTW, is a case when __nocast use had been actively confusing - it had
   been used in the same code for two different and similar types, with
   no way to catch misuses.  Switch of gfp_t to bitwise had caught that
   immediately...

One tricky bit is left alone to be dealt with later - mapping->flags is
a mix of gfp_t and error indications.  Left alone for now.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7d877f3b
...@@ -29,7 +29,7 @@ static int afs_file_release(struct inode *inode, struct file *file); ...@@ -29,7 +29,7 @@ static int afs_file_release(struct inode *inode, struct file *file);
static int afs_file_readpage(struct file *file, struct page *page); static int afs_file_readpage(struct file *file, struct page *page);
static int afs_file_invalidatepage(struct page *page, unsigned long offset); static int afs_file_invalidatepage(struct page *page, unsigned long offset);
static int afs_file_releasepage(struct page *page, int gfp_flags); static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
static ssize_t afs_file_write(struct file *file, const char __user *buf, static ssize_t afs_file_write(struct file *file, const char __user *buf,
size_t size, loff_t *off); size_t size, loff_t *off);
...@@ -279,7 +279,7 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset) ...@@ -279,7 +279,7 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset)
/* /*
* release a page and cleanup its private data * release a page and cleanup its private data
*/ */
static int afs_file_releasepage(struct page *page, int gfp_flags) static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{ {
struct cachefs_page *pageio; struct cachefs_page *pageio;
......
...@@ -778,7 +778,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) ...@@ -778,7 +778,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
static struct bio *__bio_map_kern(request_queue_t *q, void *data, static struct bio *__bio_map_kern(request_queue_t *q, void *data,
unsigned int len, unsigned int gfp_mask) unsigned int len, gfp_t gfp_mask)
{ {
unsigned long kaddr = (unsigned long)data; unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -825,7 +825,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, ...@@ -825,7 +825,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
* device. Returns an error pointer in case of error. * device. Returns an error pointer in case of error.
*/ */
struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
unsigned int gfp_mask) gfp_t gfp_mask)
{ {
struct bio *bio; struct bio *bio;
......
...@@ -1571,7 +1571,7 @@ static inline void discard_buffer(struct buffer_head * bh) ...@@ -1571,7 +1571,7 @@ static inline void discard_buffer(struct buffer_head * bh)
* *
* NOTE: @gfp_mask may go away, and this function may become non-blocking. * NOTE: @gfp_mask may go away, and this function may become non-blocking.
*/ */
int try_to_release_page(struct page *page, int gfp_mask) int try_to_release_page(struct page *page, gfp_t gfp_mask)
{ {
struct address_space * const mapping = page->mapping; struct address_space * const mapping = page->mapping;
......
...@@ -689,7 +689,7 @@ void shrink_dcache_anon(struct hlist_head *head) ...@@ -689,7 +689,7 @@ void shrink_dcache_anon(struct hlist_head *head)
* *
* In this case we return -1 to tell the caller that we baled. * In this case we return -1 to tell the caller that we baled.
*/ */
static int shrink_dcache_memory(int nr, unsigned int gfp_mask) static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
if (!(gfp_mask & __GFP_FS)) if (!(gfp_mask & __GFP_FS))
......
...@@ -500,7 +500,7 @@ static void prune_dqcache(int count) ...@@ -500,7 +500,7 @@ static void prune_dqcache(int count)
* more memory * more memory
*/ */
static int shrink_dqcache_memory(int nr, unsigned int gfp_mask) static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
spin_lock(&dq_list_lock); spin_lock(&dq_list_lock);
......
...@@ -1434,7 +1434,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset) ...@@ -1434,7 +1434,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
return journal_invalidatepage(journal, page, offset); return journal_invalidatepage(journal, page, offset);
} }
static int ext3_releasepage(struct page *page, int wait) static int ext3_releasepage(struct page *page, gfp_t wait)
{ {
journal_t *journal = EXT3_JOURNAL(page->mapping->host); journal_t *journal = EXT3_JOURNAL(page->mapping->host);
......
...@@ -46,7 +46,7 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block) ...@@ -46,7 +46,7 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfs_get_block); return generic_block_bmap(mapping, block, hfs_get_block);
} }
static int hfs_releasepage(struct page *page, int mask) static int hfs_releasepage(struct page *page, gfp_t mask)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
......
...@@ -40,7 +40,7 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) ...@@ -40,7 +40,7 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, hfsplus_get_block); return generic_block_bmap(mapping, block, hfsplus_get_block);
} }
static int hfsplus_releasepage(struct page *page, int mask) static int hfsplus_releasepage(struct page *page, gfp_t mask)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
......
...@@ -475,7 +475,7 @@ static void prune_icache(int nr_to_scan) ...@@ -475,7 +475,7 @@ static void prune_icache(int nr_to_scan)
* This function is passed the number of inodes to scan, and it returns the * This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes. * total number of remaining possibly-reclaimable inodes.
*/ */
static int shrink_icache_memory(int nr, unsigned int gfp_mask) static int shrink_icache_memory(int nr, gfp_t gfp_mask)
{ {
if (nr) { if (nr) {
/* /*
......
...@@ -1606,7 +1606,7 @@ int journal_blocks_per_page(struct inode *inode) ...@@ -1606,7 +1606,7 @@ int journal_blocks_per_page(struct inode *inode)
* Simple support for retrying memory allocations. Introduced to help to * Simple support for retrying memory allocations. Introduced to help to
* debug different VM deadlock avoidance strategies. * debug different VM deadlock avoidance strategies.
*/ */
void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry) void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
{ {
return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
} }
......
...@@ -1621,7 +1621,7 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) ...@@ -1621,7 +1621,7 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
* while the data is part of a transaction. Yes? * while the data is part of a transaction. Yes?
*/ */
int journal_try_to_free_buffers(journal_t *journal, int journal_try_to_free_buffers(journal_t *journal,
struct page *page, int unused_gfp_mask) struct page *page, gfp_t unused_gfp_mask)
{ {
struct buffer_head *head; struct buffer_head *head;
struct buffer_head *bh; struct buffer_head *bh;
......
...@@ -198,7 +198,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) ...@@ -198,7 +198,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
} }
} }
static inline struct metapage *alloc_metapage(unsigned int gfp_mask) static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
{ {
return mempool_alloc(metapage_mempool, gfp_mask); return mempool_alloc(metapage_mempool, gfp_mask);
} }
...@@ -534,7 +534,7 @@ static int metapage_readpage(struct file *fp, struct page *page) ...@@ -534,7 +534,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
return -EIO; return -EIO;
} }
static int metapage_releasepage(struct page *page, int gfp_mask) static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
{ {
struct metapage *mp; struct metapage *mp;
int busy = 0; int busy = 0;
......
...@@ -116,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache) ...@@ -116,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache)
* What the mbcache registers as to get shrunk dynamically. * What the mbcache registers as to get shrunk dynamically.
*/ */
static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask); static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
static inline int static inline int
...@@ -140,7 +140,7 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce) ...@@ -140,7 +140,7 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce)
static inline void static inline void
__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
{ {
struct mb_cache *cache = ce->e_cache; struct mb_cache *cache = ce->e_cache;
...@@ -193,7 +193,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) ...@@ -193,7 +193,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
* Returns the number of objects which are present in the cache. * Returns the number of objects which are present in the cache.
*/ */
static int static int
mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct list_head *l, *ltmp; struct list_head *l, *ltmp;
......
...@@ -2022,7 +2022,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) ...@@ -2022,7 +2022,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h)
} }
#ifdef CONFIG_REISERFS_CHECK #ifdef CONFIG_REISERFS_CHECK
void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s) void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s)
{ {
void *vp; void *vp;
static size_t malloced; static size_t malloced;
......
...@@ -2842,7 +2842,7 @@ static int reiserfs_set_page_dirty(struct page *page) ...@@ -2842,7 +2842,7 @@ static int reiserfs_set_page_dirty(struct page *page)
* even in -o notail mode, we can't be sure an old mount without -o notail * even in -o notail mode, we can't be sure an old mount without -o notail
* didn't create files with tails. * didn't create files with tails.
*/ */
static int reiserfs_releasepage(struct page *page, int unused_gfp_flags) static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb); struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
......
...@@ -45,11 +45,11 @@ ...@@ -45,11 +45,11 @@
void * void *
kmem_alloc(size_t size, gfp_t flags) kmem_alloc(size_t size, unsigned int __nocast flags)
{ {
int retries = 0; int retries = 0;
unsigned int lflags = kmem_flags_convert(flags); gfp_t lflags = kmem_flags_convert(flags);
void *ptr; void *ptr;
do { do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
...@@ -67,7 +67,7 @@ kmem_alloc(size_t size, gfp_t flags) ...@@ -67,7 +67,7 @@ kmem_alloc(size_t size, gfp_t flags)
} }
void * void *
kmem_zalloc(size_t size, gfp_t flags) kmem_zalloc(size_t size, unsigned int __nocast flags)
{ {
void *ptr; void *ptr;
...@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size) ...@@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size)
void * void *
kmem_realloc(void *ptr, size_t newsize, size_t oldsize, kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
gfp_t flags) unsigned int __nocast flags)
{ {
void *new; void *new;
...@@ -105,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, ...@@ -105,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
} }
void * void *
kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags) kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
{ {
int retries = 0; int retries = 0;
unsigned int lflags = kmem_flags_convert(flags); gfp_t lflags = kmem_flags_convert(flags);
void *ptr; void *ptr;
do { do {
ptr = kmem_cache_alloc(zone, lflags); ptr = kmem_cache_alloc(zone, lflags);
...@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags) ...@@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
} }
void * void *
kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags) kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
{ {
void *ptr; void *ptr;
......
...@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t; ...@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \ *(NSTATEP) = *(OSTATEP); \
} while (0) } while (0)
static __inline unsigned int kmem_flags_convert(gfp_t flags) static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags)
{ {
unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */
#ifdef DEBUG #ifdef DEBUG
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
...@@ -125,16 +125,16 @@ kmem_zone_destroy(kmem_zone_t *zone) ...@@ -125,16 +125,16 @@ kmem_zone_destroy(kmem_zone_t *zone)
BUG(); BUG();
} }
extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t); extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t); extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_alloc(size_t, gfp_t); extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, gfp_t); extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, gfp_t); extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t); extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t; typedef struct shrinker *kmem_shaker_t;
typedef int (*kmem_shake_func_t)(int, unsigned int); typedef int (*kmem_shake_func_t)(int, gfp_t);
static __inline kmem_shaker_t static __inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc) kmem_shake_register(kmem_shake_func_t sfunc)
...@@ -149,7 +149,7 @@ kmem_shake_deregister(kmem_shaker_t shrinker) ...@@ -149,7 +149,7 @@ kmem_shake_deregister(kmem_shaker_t shrinker)
} }
static __inline int static __inline int
kmem_shake_allow(unsigned int gfp_mask) kmem_shake_allow(gfp_t gfp_mask)
{ {
return (gfp_mask & __GFP_WAIT); return (gfp_mask & __GFP_WAIT);
} }
......
...@@ -1296,7 +1296,7 @@ linvfs_invalidate_page( ...@@ -1296,7 +1296,7 @@ linvfs_invalidate_page(
STATIC int STATIC int
linvfs_release_page( linvfs_release_page(
struct page *page, struct page *page,
int gfp_mask) gfp_t gfp_mask)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
int dirty, delalloc, unmapped, unwritten; int dirty, delalloc, unmapped, unwritten;
......
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
STATIC kmem_cache_t *pagebuf_zone; STATIC kmem_cache_t *pagebuf_zone;
STATIC kmem_shaker_t pagebuf_shake; STATIC kmem_shaker_t pagebuf_shake;
STATIC int xfsbufd_wakeup(int, unsigned int); STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *xfslogd_workqueue; STATIC struct workqueue_struct *xfslogd_workqueue;
...@@ -383,7 +383,7 @@ _pagebuf_lookup_pages( ...@@ -383,7 +383,7 @@ _pagebuf_lookup_pages(
size_t blocksize = bp->pb_target->pbr_bsize; size_t blocksize = bp->pb_target->pbr_bsize;
size_t size = bp->pb_count_desired; size_t size = bp->pb_count_desired;
size_t nbytes, offset; size_t nbytes, offset;
int gfp_mask = pb_to_gfp(flags); gfp_t gfp_mask = pb_to_gfp(flags);
unsigned short page_count, i; unsigned short page_count, i;
pgoff_t first; pgoff_t first;
loff_t end; loff_t end;
...@@ -1749,8 +1749,8 @@ STATIC int xfsbufd_force_sleep; ...@@ -1749,8 +1749,8 @@ STATIC int xfsbufd_force_sleep;
STATIC int STATIC int
xfsbufd_wakeup( xfsbufd_wakeup(
int priority, int priority,
unsigned int mask) gfp_t mask)
{ {
if (xfsbufd_force_sleep) if (xfsbufd_force_sleep)
return 0; return 0;
......
...@@ -301,7 +301,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *, ...@@ -301,7 +301,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
struct sg_iovec *, int, int); struct sg_iovec *, int, int);
extern void bio_unmap_user(struct bio *); extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
unsigned int); gfp_t);
extern void bio_set_pages_dirty(struct bio *bio); extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio);
extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
......
...@@ -188,7 +188,7 @@ extern int buffer_heads_over_limit; ...@@ -188,7 +188,7 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed * Generic address_space_operations implementations for buffer_head-backed
* address_spaces. * address_spaces.
*/ */
int try_to_release_page(struct page * page, int gfp_mask); int try_to_release_page(struct page * page, gfp_t gfp_mask);
int block_invalidatepage(struct page *page, unsigned long offset); int block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block, int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc); struct writeback_control *wbc);
......
...@@ -320,7 +320,7 @@ struct address_space_operations { ...@@ -320,7 +320,7 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */ /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t); sector_t (*bmap)(struct address_space *, sector_t);
int (*invalidatepage) (struct page *, unsigned long); int (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, int); int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs); loff_t offset, unsigned long nr_segs);
struct page* (*get_xip_page)(struct address_space *, sector_t, struct page* (*get_xip_page)(struct address_space *, sector_t,
......
...@@ -69,7 +69,7 @@ extern int journal_enable_debug; ...@@ -69,7 +69,7 @@ extern int journal_enable_debug;
#define jbd_debug(f, a...) /**/ #define jbd_debug(f, a...) /**/
#endif #endif
extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry); extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
#define jbd_kmalloc(size, flags) \ #define jbd_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \ #define jbd_rep_kmalloc(size, flags) \
...@@ -890,7 +890,7 @@ extern int journal_forget (handle_t *, struct buffer_head *); ...@@ -890,7 +890,7 @@ extern int journal_forget (handle_t *, struct buffer_head *);
extern void journal_sync_buffer (struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *);
extern int journal_invalidatepage(journal_t *, extern int journal_invalidatepage(journal_t *,
struct page *, unsigned long); struct page *, unsigned long);
extern int journal_try_to_free_buffers(journal_t *, struct page *, int); extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
extern int journal_stop(handle_t *); extern int journal_stop(handle_t *);
extern int journal_flush (journal_t *); extern int journal_flush (journal_t *);
extern void journal_lock_updates (journal_t *); extern void journal_lock_updates (journal_t *);
......
...@@ -22,7 +22,7 @@ struct mb_cache_entry { ...@@ -22,7 +22,7 @@ struct mb_cache_entry {
}; };
struct mb_cache_op { struct mb_cache_op {
int (*free)(struct mb_cache_entry *, int); int (*free)(struct mb_cache_entry *, gfp_t);
}; };
/* Functions on caches */ /* Functions on caches */
......
...@@ -1972,7 +1972,7 @@ extern struct address_space_operations reiserfs_address_space_operations; ...@@ -1972,7 +1972,7 @@ extern struct address_space_operations reiserfs_address_space_operations;
/* fix_nodes.c */ /* fix_nodes.c */
#ifdef CONFIG_REISERFS_CHECK #ifdef CONFIG_REISERFS_CHECK
void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s); void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s);
void reiserfs_kfree(const void *vp, size_t size, struct super_block *s); void reiserfs_kfree(const void *vp, size_t size, struct super_block *s);
#else #else
static inline void *reiserfs_kmalloc(size_t size, int flags, static inline void *reiserfs_kmalloc(size_t size, int flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment