Commit 2b1f55b0 authored by Chris Mason's avatar Chris Mason

Remove Btrfs compat code for older kernels

Btrfs had compatibility code for kernels back to 2.6.18.  These have
been removed, and will be maintained in a separate backport
git tree from now on.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 9b49c9b9
...@@ -20,13 +20,7 @@ ...@@ -20,13 +20,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
# include <linux/freezer.h> # include <linux/freezer.h>
#else
# include <linux/sched.h>
#endif
#include "async-thread.h" #include "async-thread.h"
/* /*
......
#ifndef _COMPAT_H_ #ifndef _COMPAT_H_
#define _COMPAT_H_ #define _COMPAT_H_
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26) #define btrfs_drop_nlink(inode) drop_nlink(inode)
#define trylock_page(page) (!TestSetPageLocked(page)) #define btrfs_inc_nlink(inode) inc_nlink(inode)
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
static inline struct dentry *d_obtain_alias(struct inode *inode) static inline struct dentry *d_obtain_alias(struct inode *inode)
...@@ -22,39 +21,4 @@ static inline struct dentry *d_obtain_alias(struct inode *inode) ...@@ -22,39 +21,4 @@ static inline struct dentry *d_obtain_alias(struct inode *inode)
} }
#endif #endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
static inline void btrfs_drop_nlink(struct inode *inode)
{
inode->i_nlink--;
}
static inline void btrfs_inc_nlink(struct inode *inode)
{
inode->i_nlink++;
}
#else
# define btrfs_drop_nlink(inode) drop_nlink(inode)
# define btrfs_inc_nlink(inode) inc_nlink(inode)
#endif
/*
* Even if AppArmor isn't enabled, it still has different prototypes.
* Add more distro/version pairs here to declare which has AppArmor applied.
*/
#if defined(CONFIG_SUSE_KERNEL)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
# define REMOVE_SUID_PATH 1
# endif
#endif
/*
* catch any other distros that have patched in apparmor. This isn't
* 100% reliable because it won't catch people that hand compile their
* own distro kernels without apparmor compiled in. But, it is better
* than nothing.
*/
#ifdef CONFIG_SECURITY_APPARMOR
# define REMOVE_SUID_PATH 1
#endif
#endif /* _COMPAT_H_ */ #endif /* _COMPAT_H_ */
...@@ -96,13 +96,7 @@ static inline u32 __btrfs_crc32c(u32 crc, unsigned char const *address, ...@@ -96,13 +96,7 @@ static inline u32 __btrfs_crc32c(u32 crc, unsigned char const *address,
* We must workaround older implementations of crc32c_le() * We must workaround older implementations of crc32c_le()
* found on older kernel versions. * found on older kernel versions.
*/ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
#define btrfs_crc32c(seed, data, length) \
__cpu_to_le32( __btrfs_crc32c( __le32_to_cpu(seed), \
(unsigned char const *)data, length) )
#else
#define btrfs_crc32c(seed, data, length) \ #define btrfs_crc32c(seed, data, length) \
__btrfs_crc32c(seed, (unsigned char const *)data, length) __btrfs_crc32c(seed, (unsigned char const *)data, length)
#endif #endif
#endif
...@@ -1472,12 +1472,9 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { ...@@ -1472,12 +1472,9 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level) {
((unsigned long)(btrfs_leaf_data(leaf) + \ ((unsigned long)(btrfs_leaf_data(leaf) + \
btrfs_item_offset_nr(leaf, slot))) btrfs_item_offset_nr(leaf, slot)))
static inline struct dentry *fdentry(struct file *file) { static inline struct dentry *fdentry(struct file *file)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) {
return file->f_dentry;
#else
return file->f_path.dentry; return file->f_path.dentry;
#endif
} }
/* extent-tree.c */ /* extent-tree.c */
......
...@@ -26,11 +26,7 @@ ...@@ -26,11 +26,7 @@
#include <linux/buffer_head.h> // for block_sync_page #include <linux/buffer_head.h> // for block_sync_page
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
# include <linux/freezer.h> # include <linux/freezer.h>
#else
# include <linux/sched.h>
#endif
#include "crc32c.h" #include "crc32c.h"
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
...@@ -373,21 +369,11 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -373,21 +369,11 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
return ret; return ret;
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_workqueue_bio(struct bio *bio, int err) static void end_workqueue_bio(struct bio *bio, int err)
#else
static int end_workqueue_bio(struct bio *bio,
unsigned int bytes_done, int err)
#endif
{ {
struct end_io_wq *end_io_wq = bio->bi_private; struct end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
return 1;
#endif
fs_info = end_io_wq->info; fs_info = end_io_wq->info;
end_io_wq->error = err; end_io_wq->error = err;
end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.func = end_workqueue_fn;
...@@ -397,10 +383,6 @@ static int end_workqueue_bio(struct bio *bio, ...@@ -397,10 +383,6 @@ static int end_workqueue_bio(struct bio *bio,
&end_io_wq->work); &end_io_wq->work);
else else
btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
#endif
} }
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
...@@ -1161,9 +1143,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) ...@@ -1161,9 +1143,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{ {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_init(bdi); bdi_init(bdi);
#endif
bdi->ra_pages = default_backing_dev_info.ra_pages; bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->state = 0; bdi->state = 0;
bdi->capabilities = default_backing_dev_info.capabilities; bdi->capabilities = default_backing_dev_info.capabilities;
...@@ -1242,11 +1222,7 @@ static void end_workqueue_fn(struct btrfs_work *work) ...@@ -1242,11 +1222,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio->bi_private = end_io_wq->private; bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io; bio->bi_end_io = end_io_wq->end_io;
kfree(end_io_wq); kfree(end_io_wq);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
bio_endio(bio, bio->bi_size, error);
#else
bio_endio(bio, error); bio_endio(bio, error);
#endif
} }
static int cleaner_kthread(void *arg) static int cleaner_kthread(void *arg)
...@@ -1673,9 +1649,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1673,9 +1649,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
kfree(extent_root); kfree(extent_root);
kfree(tree_root); kfree(tree_root);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_destroy(&fs_info->bdi); bdi_destroy(&fs_info->bdi);
#endif
kfree(fs_info); kfree(fs_info);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1936,9 +1910,7 @@ int close_ctree(struct btrfs_root *root) ...@@ -1936,9 +1910,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_close_devices(fs_info->fs_devices); btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree); btrfs_mapping_tree_free(&fs_info->mapping_tree);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
bdi_destroy(&fs_info->bdi); bdi_destroy(&fs_info->bdi);
#endif
kfree(fs_info->extent_root); kfree(fs_info->extent_root);
kfree(fs_info->tree_root); kfree(fs_info->tree_root);
......
...@@ -7,12 +7,6 @@ ...@@ -7,12 +7,6 @@
#include "export.h" #include "export.h"
#include "compat.h" #include "compat.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
#define FILEID_BTRFS_WITHOUT_PARENT 0x4d
#define FILEID_BTRFS_WITH_PARENT 0x4e
#define FILEID_BTRFS_WITH_PARENT_ROOT 0x4f
#endif
#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) #define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4)
#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) #define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4)
#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4)
......
...@@ -1397,12 +1397,7 @@ static int check_page_writeback(struct extent_io_tree *tree, ...@@ -1397,12 +1397,7 @@ static int check_page_writeback(struct extent_io_tree *tree,
* Scheduling is not allowed, so the extent state tree is expected * Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO. * to have one and only one object corresponding to this IO.
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_bio_extent_writepage(struct bio *bio, int err) static void end_bio_extent_writepage(struct bio *bio, int err)
#else
static int end_bio_extent_writepage(struct bio *bio,
unsigned int bytes_done, int err)
#endif
{ {
int uptodate = err == 0; int uptodate = err == 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
...@@ -1412,10 +1407,6 @@ static int end_bio_extent_writepage(struct bio *bio, ...@@ -1412,10 +1407,6 @@ static int end_bio_extent_writepage(struct bio *bio,
int whole_page; int whole_page;
int ret; int ret;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
return 1;
#endif
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
...@@ -1461,10 +1452,8 @@ static int end_bio_extent_writepage(struct bio *bio, ...@@ -1461,10 +1452,8 @@ static int end_bio_extent_writepage(struct bio *bio,
else else
check_page_writeback(tree, page); check_page_writeback(tree, page);
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
bio_put(bio); bio_put(bio);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
#endif
} }
/* /*
...@@ -1478,12 +1467,7 @@ static int end_bio_extent_writepage(struct bio *bio, ...@@ -1478,12 +1467,7 @@ static int end_bio_extent_writepage(struct bio *bio,
* Scheduling is not allowed, so the extent state tree is expected * Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO. * to have one and only one object corresponding to this IO.
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_bio_extent_readpage(struct bio *bio, int err) static void end_bio_extent_readpage(struct bio *bio, int err)
#else
static int end_bio_extent_readpage(struct bio *bio,
unsigned int bytes_done, int err)
#endif
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
...@@ -1493,11 +1477,6 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1493,11 +1477,6 @@ static int end_bio_extent_readpage(struct bio *bio,
int whole_page; int whole_page;
int ret; int ret;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
return 1;
#endif
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
...@@ -1556,9 +1535,6 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1556,9 +1535,6 @@ static int end_bio_extent_readpage(struct bio *bio,
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
bio_put(bio); bio_put(bio);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
#endif
} }
/* /*
...@@ -1566,12 +1542,7 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1566,12 +1542,7 @@ static int end_bio_extent_readpage(struct bio *bio,
* the structs in the extent tree when done, and set the uptodate bits * the structs in the extent tree when done, and set the uptodate bits
* as appropriate. * as appropriate.
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_bio_extent_preparewrite(struct bio *bio, int err) static void end_bio_extent_preparewrite(struct bio *bio, int err)
#else
static int end_bio_extent_preparewrite(struct bio *bio,
unsigned int bytes_done, int err)
#endif
{ {
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
...@@ -1579,11 +1550,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, ...@@ -1579,11 +1550,6 @@ static int end_bio_extent_preparewrite(struct bio *bio,
u64 start; u64 start;
u64 end; u64 end;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
return 1;
#endif
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree; tree = &BTRFS_I(page->mapping->host)->io_tree;
...@@ -1607,9 +1573,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, ...@@ -1607,9 +1573,6 @@ static int end_bio_extent_preparewrite(struct bio *bio,
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
bio_put(bio); bio_put(bio);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
#endif
} }
static struct bio * static struct bio *
...@@ -2079,12 +2042,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -2079,12 +2042,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
return 0; return 0;
} }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
/* Taken directly from 2.6.23 with a mod for a lockpage hook */
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
#endif
/** /**
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write * @mapping: address space structure to write
...@@ -2201,10 +2158,9 @@ int extent_write_cache_pages(struct extent_io_tree *tree, ...@@ -2201,10 +2158,9 @@ int extent_write_cache_pages(struct extent_io_tree *tree,
} }
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index; mapping->writeback_index = index;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
if (wbc->range_cont) if (wbc->range_cont)
wbc->range_start = index << PAGE_CACHE_SHIFT; wbc->range_start = index << PAGE_CACHE_SHIFT;
#endif
return ret; return ret;
} }
EXPORT_SYMBOL(extent_write_cache_pages); EXPORT_SYMBOL(extent_write_cache_pages);
...@@ -2560,18 +2516,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, ...@@ -2560,18 +2516,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb,
* by increasing the reference count. So we know the page must * by increasing the reference count. So we know the page must
* be in the radix tree. * be in the radix tree.
*/ */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
rcu_read_lock(); rcu_read_lock();
#else
read_lock_irq(&mapping->tree_lock);
#endif
p = radix_tree_lookup(&mapping->page_tree, i); p = radix_tree_lookup(&mapping->page_tree, i);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
rcu_read_unlock(); rcu_read_unlock();
#else
read_unlock_irq(&mapping->tree_lock);
#endif
return p; return p;
} }
...@@ -2773,21 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, ...@@ -2773,21 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
} }
} }
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
spin_lock_irq(&page->mapping->tree_lock); spin_lock_irq(&page->mapping->tree_lock);
#else
read_lock_irq(&page->mapping->tree_lock);
#endif
if (!PageDirty(page)) { if (!PageDirty(page)) {
radix_tree_tag_clear(&page->mapping->page_tree, radix_tree_tag_clear(&page->mapping->page_tree,
page_index(page), page_index(page),
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
spin_unlock_irq(&page->mapping->tree_lock); spin_unlock_irq(&page->mapping->tree_lock);
#else
read_unlock_irq(&page->mapping->tree_lock);
#endif
unlock_page(page); unlock_page(page);
} }
return 0; return 0;
......
...@@ -871,15 +871,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, ...@@ -871,15 +871,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
goto out_nolock; goto out_nolock;
if (count == 0) if (count == 0)
goto out_nolock; goto out_nolock;
#ifdef REMOVE_SUID_PATH
err = remove_suid(&file->f_path);
#else
# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
err = file_remove_suid(file); err = file_remove_suid(file);
# else
err = remove_suid(fdentry(file));
# endif
#endif
if (err) if (err)
goto out_nolock; goto out_nolock;
file_update_time(file); file_update_time(file);
...@@ -1003,17 +996,10 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, ...@@ -1003,17 +996,10 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
btrfs_commit_transaction(trans, root); btrfs_commit_transaction(trans, root);
} }
} else if (num_written > 0 && (file->f_flags & O_DIRECT)) { } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
do_sync_file_range(file, start_pos,
start_pos + num_written - 1,
SYNC_FILE_RANGE_WRITE |
SYNC_FILE_RANGE_WAIT_AFTER);
#else
do_sync_mapping_range(inode->i_mapping, start_pos, do_sync_mapping_range(inode->i_mapping, start_pos,
start_pos + num_written - 1, start_pos + num_written - 1,
SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WRITE |
SYNC_FILE_RANGE_WAIT_AFTER); SYNC_FILE_RANGE_WAIT_AFTER);
#endif
invalidate_mapping_pages(inode->i_mapping, invalidate_mapping_pages(inode->i_mapping,
start_pos >> PAGE_CACHE_SHIFT, start_pos >> PAGE_CACHE_SHIFT,
(start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
...@@ -1097,12 +1083,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) ...@@ -1097,12 +1083,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
} }
static struct vm_operations_struct btrfs_file_vm_ops = { static struct vm_operations_struct btrfs_file_vm_ops = {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
.nopage = filemap_nopage,
.populate = filemap_populate,
#else
.fault = filemap_fault, .fault = filemap_fault,
#endif
.page_mkwrite = btrfs_page_mkwrite, .page_mkwrite = btrfs_page_mkwrite,
}; };
...@@ -1118,9 +1099,6 @@ struct file_operations btrfs_file_operations = { ...@@ -1118,9 +1099,6 @@ struct file_operations btrfs_file_operations = {
.read = do_sync_read, .read = do_sync_read,
.aio_read = generic_file_aio_read, .aio_read = generic_file_aio_read,
.splice_read = generic_file_splice_read, .splice_read = generic_file_splice_read,
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
.sendfile = generic_file_sendfile,
#endif
.write = btrfs_file_write, .write = btrfs_file_write,
.mmap = btrfs_file_mmap, .mmap = btrfs_file_mmap,
.open = generic_file_open, .open = generic_file_open,
......
...@@ -2073,104 +2073,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, ...@@ -2073,104 +2073,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
return ret; return ret;
} }
/* Kernels earlier than 2.6.28 still have the NFS deadlock where nfsd
will call the file system's ->lookup() method from within its
filldir callback, which in turn was called from the file system's
->readdir() method. And will deadlock for many file systems. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
struct nfshack_dirent {
u64 ino;
loff_t offset;
int namlen;
unsigned int d_type;
char name[];
};
struct nfshack_readdir {
char *dirent;
size_t used;
int full;
};
static int btrfs_nfshack_filldir(void *__buf, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct nfshack_readdir *buf = __buf;
struct nfshack_dirent *de = (void *)(buf->dirent + buf->used);
unsigned int reclen;
reclen = ALIGN(sizeof(struct nfshack_dirent) + namlen, sizeof(u64));
if (buf->used + reclen > PAGE_SIZE) {
buf->full = 1;
return -EINVAL;
}
de->namlen = namlen;
de->offset = offset;
de->ino = ino;
de->d_type = d_type;
memcpy(de->name, name, namlen);
buf->used += reclen;
return 0;
}
static int btrfs_nfshack_readdir(struct file *file, void *dirent,
filldir_t filldir)
{
struct nfshack_readdir buf;
struct nfshack_dirent *de;
int err;
int size;
loff_t offset;
buf.dirent = (void *)__get_free_page(GFP_KERNEL);
if (!buf.dirent)
return -ENOMEM;
offset = file->f_pos;
do {
unsigned int reclen;
buf.used = 0;
buf.full = 0;
err = btrfs_real_readdir(file, &buf, btrfs_nfshack_filldir);
if (err)
break;
size = buf.used;
if (!size)
break;
de = (struct nfshack_dirent *)buf.dirent;
while (size > 0) {
offset = de->offset;
if (filldir(dirent, de->name, de->namlen, de->offset,
de->ino, de->d_type))
goto done;
offset = file->f_pos;
reclen = ALIGN(sizeof(*de) + de->namlen,
sizeof(u64));
size -= reclen;
de = (struct nfshack_dirent *)((char *)de + reclen);
}
} while (buf.full);
done:
free_page((unsigned long)buf.dirent);
file->f_pos = offset;
return err;
}
#endif
int btrfs_write_inode(struct inode *inode, int wait) int btrfs_write_inode(struct inode *inode, int wait)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
...@@ -3311,13 +3213,8 @@ unsigned long btrfs_force_ra(struct address_space *mapping, ...@@ -3311,13 +3213,8 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
{ {
pgoff_t req_size = last_index - offset + 1; pgoff_t req_size = last_index - offset + 1;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
offset = page_cache_readahead(mapping, ra, file, offset, req_size);
return offset;
#else
page_cache_sync_readahead(mapping, ra, file, offset, req_size); page_cache_sync_readahead(mapping, ra, file, offset, req_size);
return offset + req_size; return offset + req_size;
#endif
} }
struct inode *btrfs_alloc_inode(struct super_block *sb) struct inode *btrfs_alloc_inode(struct super_block *sb)
...@@ -3373,14 +3270,7 @@ void btrfs_destroy_inode(struct inode *inode) ...@@ -3373,14 +3270,7 @@ void btrfs_destroy_inode(struct inode *inode)
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
static void init_once(void *foo) static void init_once(void *foo)
#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void init_once(struct kmem_cache * cachep, void *foo)
#else
static void init_once(void * foo, struct kmem_cache * cachep,
unsigned long flags)
#endif
{ {
struct btrfs_inode *ei = (struct btrfs_inode *) foo; struct btrfs_inode *ei = (struct btrfs_inode *) foo;
...@@ -3403,22 +3293,10 @@ void btrfs_destroy_cachep(void) ...@@ -3403,22 +3293,10 @@ void btrfs_destroy_cachep(void)
struct kmem_cache *btrfs_cache_create(const char *name, size_t size, struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
unsigned long extra_flags, unsigned long extra_flags,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) void (*ctor)(void *))
void (*ctor)(void *)
#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
void (*ctor)(struct kmem_cache *, void *)
#else
void (*ctor)(void *, struct kmem_cache *,
unsigned long)
#endif
)
{ {
return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT | return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD | extra_flags), ctor SLAB_MEM_SPREAD | extra_flags), ctor);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
,NULL
#endif
);
} }
int btrfs_init_cachep(void) int btrfs_init_cachep(void)
...@@ -3666,12 +3544,7 @@ static int btrfs_set_page_dirty(struct page *page) ...@@ -3666,12 +3544,7 @@ static int btrfs_set_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page); return __set_page_dirty_nobuffers(page);
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
static int btrfs_permission(struct inode *inode, int mask) static int btrfs_permission(struct inode *inode, int mask)
#else
static int btrfs_permission(struct inode *inode, int mask,
struct nameidata *nd)
#endif
{ {
if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE)) if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
return -EACCES; return -EACCES;
...@@ -3702,11 +3575,7 @@ static struct inode_operations btrfs_dir_ro_inode_operations = { ...@@ -3702,11 +3575,7 @@ static struct inode_operations btrfs_dir_ro_inode_operations = {
static struct file_operations btrfs_dir_file_operations = { static struct file_operations btrfs_dir_file_operations = {
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
.read = generic_read_dir, .read = generic_read_dir,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
.readdir = btrfs_nfshack_readdir,
#else /* NFSd readdir/lookup deadlock is fixed */
.readdir = btrfs_real_readdir, .readdir = btrfs_real_readdir,
#endif
.unlocked_ioctl = btrfs_ioctl, .unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl, .compat_ioctl = btrfs_ioctl,
......
...@@ -349,10 +349,7 @@ static int btrfs_fill_super(struct super_block * sb, ...@@ -349,10 +349,7 @@ static int btrfs_fill_super(struct super_block * sb,
sb->s_root = root_dentry; sb->s_root = root_dentry;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
save_mount_options(sb, data); save_mount_options(sb, data);
#endif
return 0; return 0;
fail_close: fail_close:
...@@ -566,11 +563,7 @@ static struct super_operations btrfs_super_ops = { ...@@ -566,11 +563,7 @@ static struct super_operations btrfs_super_ops = {
.put_super = btrfs_put_super, .put_super = btrfs_put_super,
.write_super = btrfs_write_super, .write_super = btrfs_write_super,
.sync_fs = btrfs_sync_fs, .sync_fs = btrfs_sync_fs,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
.read_inode = btrfs_read_locked_inode,
#else
.show_options = generic_show_options, .show_options = generic_show_options,
#endif
.write_inode = btrfs_write_inode, .write_inode = btrfs_write_inode,
.dirty_inode = btrfs_dirty_inode, .dirty_inode = btrfs_dirty_inode,
.alloc_inode = btrfs_alloc_inode, .alloc_inode = btrfs_alloc_inode,
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include "disk-io.h" #include "disk-io.h"
#include "transaction.h" #include "transaction.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
...@@ -267,35 +266,3 @@ void btrfs_exit_sysfs(void) ...@@ -267,35 +266,3 @@ void btrfs_exit_sysfs(void)
kset_unregister(btrfs_kset); kset_unregister(btrfs_kset);
} }
#else
int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
{
return 0;
}
int btrfs_sysfs_add_root(struct btrfs_root *root)
{
return 0;
}
void btrfs_sysfs_del_root(struct btrfs_root *root)
{
return;
}
void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
{
return;
}
int btrfs_init_sysfs(void)
{
return 0;
}
void btrfs_exit_sysfs(void)
{
return;
}
#endif
...@@ -2080,20 +2080,11 @@ int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, ...@@ -2080,20 +2080,11 @@ int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
static void end_bio_multi_stripe(struct bio *bio, int err) static void end_bio_multi_stripe(struct bio *bio, int err)
#else
static int end_bio_multi_stripe(struct bio *bio,
unsigned int bytes_done, int err)
#endif
{ {
struct btrfs_multi_bio *multi = bio->bi_private; struct btrfs_multi_bio *multi = bio->bi_private;
int is_orig_bio = 0; int is_orig_bio = 0;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
if (bio->bi_size)
return 1;
#endif
if (err) if (err)
atomic_inc(&multi->error); atomic_inc(&multi->error);
...@@ -2122,17 +2113,10 @@ static int end_bio_multi_stripe(struct bio *bio, ...@@ -2122,17 +2113,10 @@ static int end_bio_multi_stripe(struct bio *bio,
} }
kfree(multi); kfree(multi);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
bio_endio(bio, bio->bi_size, err);
#else
bio_endio(bio, err); bio_endio(bio, err);
#endif
} else if (!is_orig_bio) { } else if (!is_orig_bio) {
bio_put(bio); bio_put(bio);
} }
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
return 0;
#endif
} }
struct async_sched { struct async_sched {
...@@ -2248,11 +2232,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -2248,11 +2232,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
} else { } else {
bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
bio->bi_sector = logical >> 9; bio->bi_sector = logical >> 9;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
bio_endio(bio, bio->bi_size, -EIO);
#else
bio_endio(bio, -EIO); bio_endio(bio, -EIO);
#endif
} }
dev_nr++; dev_nr++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment