Commit 8c55e204 authored by Dave Kleikamp's avatar Dave Kleikamp Committed by Theodore Ts'o

EXT4: Fix whitespace

Replace a lot of spaces with tabs
Signed-off-by: default avatarDave Kleikamp <shaggy@linux.vnet.ibm.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 54ca4123
...@@ -30,15 +30,15 @@ ...@@ -30,15 +30,15 @@
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
unsigned long *blockgrpp, ext4_grpblk_t *offsetp) unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
{ {
struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct ext4_super_block *es = EXT4_SB(sb)->s_es;
ext4_grpblk_t offset; ext4_grpblk_t offset;
blocknr = blocknr - le32_to_cpu(es->s_first_data_block); blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
if (offsetp) if (offsetp)
*offsetp = offset; *offsetp = offset;
if (blockgrpp) if (blockgrpp)
*blockgrpp = blocknr; *blockgrpp = blocknr;
} }
......
...@@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc ...@@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
le32_to_cpu(ix[-1].ei_block)); le32_to_cpu(ix[-1].ei_block));
} }
BUG_ON(k && le32_to_cpu(ix->ei_block) BUG_ON(k && le32_to_cpu(ix->ei_block)
<= le32_to_cpu(ix[-1].ei_block)); <= le32_to_cpu(ix[-1].ei_block));
if (block < le32_to_cpu(ix->ei_block)) if (block < le32_to_cpu(ix->ei_block))
break; break;
chix = ix; chix = ix;
...@@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) ...@@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
path->p_ext = l - 1; path->p_ext = l - 1;
ext_debug(" -> %d:%llu:%d ", ext_debug(" -> %d:%llu:%d ",
le32_to_cpu(path->p_ext->ee_block), le32_to_cpu(path->p_ext->ee_block),
ext_pblock(path->p_ext), ext_pblock(path->p_ext),
le16_to_cpu(path->p_ext->ee_len)); le16_to_cpu(path->p_ext->ee_len));
#ifdef CHECK_BINSEARCH #ifdef CHECK_BINSEARCH
...@@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) ...@@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
chex = ex = EXT_FIRST_EXTENT(eh); chex = ex = EXT_FIRST_EXTENT(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
BUG_ON(k && le32_to_cpu(ex->ee_block) BUG_ON(k && le32_to_cpu(ex->ee_block)
<= le32_to_cpu(ex[-1].ee_block)); <= le32_to_cpu(ex[-1].ee_block));
if (block < le32_to_cpu(ex->ee_block)) if (block < le32_to_cpu(ex->ee_block))
break; break;
chex = ex; chex = ex;
...@@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, ...@@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
> le16_to_cpu(curp->p_hdr->eh_max)); > le16_to_cpu(curp->p_hdr->eh_max));
BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
err = ext4_ext_dirty(handle, inode, curp); err = ext4_ext_dirty(handle, inode, curp);
...@@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
border = path[depth].p_ext[1].ee_block; border = path[depth].p_ext[1].ee_block;
ext_debug("leaf will be split." ext_debug("leaf will be split."
" next leaf starts at %d\n", " next leaf starts at %d\n",
le32_to_cpu(border)); le32_to_cpu(border));
} else { } else {
border = newext->ee_block; border = newext->ee_block;
ext_debug("leaf will be added." ext_debug("leaf will be added."
" next leaf starts at %d\n", " next leaf starts at %d\n",
le32_to_cpu(border)); le32_to_cpu(border));
} }
/* /*
...@@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
while (path[depth].p_ext <= while (path[depth].p_ext <=
EXT_MAX_EXTENT(path[depth].p_hdr)) { EXT_MAX_EXTENT(path[depth].p_hdr)) {
ext_debug("move %d:%llu:%d in new leaf %llu\n", ext_debug("move %d:%llu:%d in new leaf %llu\n",
le32_to_cpu(path[depth].p_ext->ee_block), le32_to_cpu(path[depth].p_ext->ee_block),
ext_pblock(path[depth].p_ext), ext_pblock(path[depth].p_ext),
le16_to_cpu(path[depth].p_ext->ee_len), le16_to_cpu(path[depth].p_ext->ee_len),
newblock); newblock);
/*memmove(ex++, path[depth].p_ext++, /*memmove(ex++, path[depth].p_ext++,
sizeof(struct ext4_extent)); sizeof(struct ext4_extent));
...@@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
EXT_LAST_INDEX(path[i].p_hdr)); EXT_LAST_INDEX(path[i].p_hdr));
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
ext_debug("%d: move %d:%d in new index %llu\n", i, ext_debug("%d: move %d:%d in new index %llu\n", i,
le32_to_cpu(path[i].p_idx->ei_block), le32_to_cpu(path[i].p_idx->ei_block),
idx_pblock(path[i].p_idx), idx_pblock(path[i].p_idx),
newblock); newblock);
/*memmove(++fidx, path[i].p_idx++, /*memmove(++fidx, path[i].p_idx++,
sizeof(struct ext4_extent_idx)); sizeof(struct ext4_extent_idx));
neh->eh_entries++; neh->eh_entries++;
...@@ -1212,12 +1212,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1212,12 +1212,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
if (!nearex) { if (!nearex) {
/* there is no extent in this leaf, create first one */ /* there is no extent in this leaf, create first one */
ext_debug("first extent in the leaf: %d:%llu:%d\n", ext_debug("first extent in the leaf: %d:%llu:%d\n",
le32_to_cpu(newext->ee_block), le32_to_cpu(newext->ee_block),
ext_pblock(newext), ext_pblock(newext),
le16_to_cpu(newext->ee_len)); le16_to_cpu(newext->ee_len));
path[depth].p_ext = EXT_FIRST_EXTENT(eh); path[depth].p_ext = EXT_FIRST_EXTENT(eh);
} else if (le32_to_cpu(newext->ee_block) } else if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) { > le32_to_cpu(nearex->ee_block)) {
/* BUG_ON(newext->ee_block == nearex->ee_block); */ /* BUG_ON(newext->ee_block == nearex->ee_block); */
if (nearex != EXT_LAST_EXTENT(eh)) { if (nearex != EXT_LAST_EXTENT(eh)) {
len = EXT_MAX_EXTENT(eh) - nearex; len = EXT_MAX_EXTENT(eh) - nearex;
...@@ -1225,9 +1225,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1225,9 +1225,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
len = len < 0 ? 0 : len; len = len < 0 ? 0 : len;
ext_debug("insert %d:%llu:%d after: nearest 0x%p, " ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
"move %d from 0x%p to 0x%p\n", "move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block), le32_to_cpu(newext->ee_block),
ext_pblock(newext), ext_pblock(newext),
le16_to_cpu(newext->ee_len), le16_to_cpu(newext->ee_len),
nearex, len, nearex + 1, nearex + 2); nearex, len, nearex + 1, nearex + 2);
memmove(nearex + 2, nearex + 1, len); memmove(nearex + 2, nearex + 1, len);
} }
...@@ -1358,9 +1358,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, ...@@ -1358,9 +1358,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
cbex.ec_start = 0; cbex.ec_start = 0;
cbex.ec_type = EXT4_EXT_CACHE_GAP; cbex.ec_type = EXT4_EXT_CACHE_GAP;
} else { } else {
cbex.ec_block = le32_to_cpu(ex->ee_block); cbex.ec_block = le32_to_cpu(ex->ee_block);
cbex.ec_len = le16_to_cpu(ex->ee_len); cbex.ec_len = le16_to_cpu(ex->ee_len);
cbex.ec_start = ext_pblock(ex); cbex.ec_start = ext_pblock(ex);
cbex.ec_type = EXT4_EXT_CACHE_EXTENT; cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
} }
...@@ -1431,16 +1431,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, ...@@ -1431,16 +1431,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
len = le32_to_cpu(ex->ee_block) - block; len = le32_to_cpu(ex->ee_block) - block;
ext_debug("cache gap(before): %lu [%lu:%lu]", ext_debug("cache gap(before): %lu [%lu:%lu]",
(unsigned long) block, (unsigned long) block,
(unsigned long) le32_to_cpu(ex->ee_block), (unsigned long) le32_to_cpu(ex->ee_block),
(unsigned long) le16_to_cpu(ex->ee_len)); (unsigned long) le16_to_cpu(ex->ee_len));
} else if (block >= le32_to_cpu(ex->ee_block) } else if (block >= le32_to_cpu(ex->ee_block)
+ le16_to_cpu(ex->ee_len)) { + le16_to_cpu(ex->ee_len)) {
lblock = le32_to_cpu(ex->ee_block) lblock = le32_to_cpu(ex->ee_block)
+ le16_to_cpu(ex->ee_len); + le16_to_cpu(ex->ee_len);
len = ext4_ext_next_allocated_block(path); len = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%lu:%lu] %lu", ext_debug("cache gap(after): [%lu:%lu] %lu",
(unsigned long) le32_to_cpu(ex->ee_block), (unsigned long) le32_to_cpu(ex->ee_block),
(unsigned long) le16_to_cpu(ex->ee_len), (unsigned long) le16_to_cpu(ex->ee_len),
(unsigned long) block); (unsigned long) block);
BUG_ON(len == lblock); BUG_ON(len == lblock);
len = len - lblock; len = len - lblock;
...@@ -1468,9 +1468,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block, ...@@ -1468,9 +1468,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block,
BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
cex->ec_type != EXT4_EXT_CACHE_EXTENT); cex->ec_type != EXT4_EXT_CACHE_EXTENT);
if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
ex->ee_block = cpu_to_le32(cex->ec_block); ex->ee_block = cpu_to_le32(cex->ec_block);
ext4_ext_store_pblock(ex, cex->ec_start); ext4_ext_store_pblock(ex, cex->ec_start);
ex->ee_len = cpu_to_le16(cex->ec_len); ex->ee_len = cpu_to_le16(cex->ec_len);
ext_debug("%lu cached by %lu:%lu:%llu\n", ext_debug("%lu cached by %lu:%lu:%llu\n",
(unsigned long) block, (unsigned long) block,
(unsigned long) cex->ec_block, (unsigned long) cex->ec_block,
...@@ -1956,9 +1956,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, ...@@ -1956,9 +1956,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* we should allocate requested block */ /* we should allocate requested block */
} else if (goal == EXT4_EXT_CACHE_EXTENT) { } else if (goal == EXT4_EXT_CACHE_EXTENT) {
/* block is already allocated */ /* block is already allocated */
newblock = iblock newblock = iblock
- le32_to_cpu(newex.ee_block) - le32_to_cpu(newex.ee_block)
+ ext_pblock(&newex); + ext_pblock(&newex);
/* number of remaining blocks in the extent */ /* number of remaining blocks in the extent */
allocated = le16_to_cpu(newex.ee_len) - allocated = le16_to_cpu(newex.ee_len) -
(iblock - le32_to_cpu(newex.ee_block)); (iblock - le32_to_cpu(newex.ee_block));
...@@ -1987,7 +1987,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, ...@@ -1987,7 +1987,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ex = path[depth].p_ext; ex = path[depth].p_ext;
if (ex) { if (ex) {
unsigned long ee_block = le32_to_cpu(ex->ee_block); unsigned long ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext_pblock(ex); ext4_fsblk_t ee_start = ext_pblock(ex);
unsigned short ee_len = le16_to_cpu(ex->ee_len); unsigned short ee_len = le16_to_cpu(ex->ee_len);
...@@ -2000,7 +2000,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, ...@@ -2000,7 +2000,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
if (ee_len > EXT_MAX_LEN) if (ee_len > EXT_MAX_LEN)
goto out2; goto out2;
/* if found extent covers block, simply return it */ /* if found extent covers block, simply return it */
if (iblock >= ee_block && iblock < ee_block + ee_len) { if (iblock >= ee_block && iblock < ee_block + ee_len) {
newblock = iblock - ee_block + ee_start; newblock = iblock - ee_block + ee_start;
/* number of remaining blocks in the extent */ /* number of remaining blocks in the extent */
allocated = ee_len - (iblock - ee_block); allocated = ee_len - (iblock - ee_block);
......
...@@ -255,8 +255,8 @@ static int verify_chain(Indirect *from, Indirect *to) ...@@ -255,8 +255,8 @@ static int verify_chain(Indirect *from, Indirect *to)
* @inode: inode in question (we are only interested in its superblock) * @inode: inode in question (we are only interested in its superblock)
* @i_block: block number to be parsed * @i_block: block number to be parsed
* @offsets: array to store the offsets in * @offsets: array to store the offsets in
* @boundary: set this non-zero if the referred-to block is likely to be * @boundary: set this non-zero if the referred-to block is likely to be
* followed (on disk) by an indirect block. * followed (on disk) by an indirect block.
* *
* To store the locations of file's data ext4 uses a data structure common * To store the locations of file's data ext4 uses a data structure common
* for UNIX filesystems - tree of pointers anchored in the inode, with * for UNIX filesystems - tree of pointers anchored in the inode, with
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
*/ */
#define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext4_append(handle_t *handle, static struct buffer_head *ext4_append(handle_t *handle,
...@@ -241,7 +241,7 @@ static inline unsigned dx_node_limit (struct inode *dir) ...@@ -241,7 +241,7 @@ static inline unsigned dx_node_limit (struct inode *dir)
static void dx_show_index (char * label, struct dx_entry *entries) static void dx_show_index (char * label, struct dx_entry *entries)
{ {
int i, n = dx_get_count (entries); int i, n = dx_get_count (entries);
printk("%s index ", label); printk("%s index ", label);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
printk("%x->%u ", i? dx_get_hash(entries + i) : printk("%x->%u ", i? dx_get_hash(entries + i) :
0, dx_get_block(entries + i)); 0, dx_get_block(entries + i));
......
...@@ -1985,7 +1985,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, ...@@ -1985,7 +1985,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
if (bd_claim(bdev, sb)) { if (bd_claim(bdev, sb)) {
printk(KERN_ERR printk(KERN_ERR
"EXT4: failed to claim external journal device.\n"); "EXT4: failed to claim external journal device.\n");
blkdev_put(bdev); blkdev_put(bdev);
return NULL; return NULL;
} }
......
...@@ -32,9 +32,9 @@ ...@@ -32,9 +32,9 @@
/* /*
* Define EXT4_RESERVATION to reserve data blocks for expanding files * Define EXT4_RESERVATION to reserve data blocks for expanding files
*/ */
#define EXT4_DEFAULT_RESERVE_BLOCKS 8 #define EXT4_DEFAULT_RESERVE_BLOCKS 8
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
#define EXT4_MAX_RESERVE_BLOCKS 1027 #define EXT4_MAX_RESERVE_BLOCKS 1027
#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0 #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
/* /*
* Always enable hashed directories * Always enable hashed directories
...@@ -204,12 +204,12 @@ struct ext4_group_desc ...@@ -204,12 +204,12 @@ struct ext4_group_desc
/* Used to pass group descriptor data when online resize is done */ /* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input { struct ext4_new_group_input {
__u32 group; /* Group number for this data */ __u32 group; /* Group number for this data */
__u64 block_bitmap; /* Absolute block number of block bitmap */ __u64 block_bitmap; /* Absolute block number of block bitmap */
__u64 inode_bitmap; /* Absolute block number of inode bitmap */ __u64 inode_bitmap; /* Absolute block number of inode bitmap */
__u64 inode_table; /* Absolute block number of inode table start */ __u64 inode_table; /* Absolute block number of inode table start */
__u32 blocks_count; /* Total number of blocks in this group */ __u32 blocks_count; /* Total number of blocks in this group */
__u16 reserved_blocks; /* Number of reserved blocks in this group */ __u16 reserved_blocks; /* Number of reserved blocks in this group */
__u16 unused; __u16 unused;
}; };
...@@ -310,7 +310,7 @@ struct ext4_inode { ...@@ -310,7 +310,7 @@ struct ext4_inode {
__u8 l_i_frag; /* Fragment number */ __u8 l_i_frag; /* Fragment number */
__u8 l_i_fsize; /* Fragment size */ __u8 l_i_fsize; /* Fragment size */
__le16 l_i_file_acl_high; __le16 l_i_file_acl_high;
__le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */ __le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2; __u32 l_i_reserved2;
} linux2; } linux2;
...@@ -780,9 +780,9 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, ...@@ -780,9 +780,9 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
* Ok, these declarations are also in <linux/kernel.h> but none of the * Ok, these declarations are also in <linux/kernel.h> but none of the
* ext4 source programs needs to include it so they are duplicated here. * ext4 source programs needs to include it so they are duplicated here.
*/ */
# define NORET_TYPE /**/ # define NORET_TYPE /**/
# define ATTRIB_NORET __attribute__((noreturn)) # define ATTRIB_NORET __attribute__((noreturn))
# define NORET_AND noreturn, # define NORET_AND noreturn,
/* balloc.c */ /* balloc.c */
extern unsigned int ext4_block_group(struct super_block *sb, extern unsigned int ext4_block_group(struct super_block *sb,
......
...@@ -151,8 +151,8 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, ...@@ -151,8 +151,8 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
((struct ext4_extent_idx *) (((char *) (__hdr__)) + \ ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \
sizeof(struct ext4_extent_header))) sizeof(struct ext4_extent_header)))
#define EXT_HAS_FREE_INDEX(__path__) \ #define EXT_HAS_FREE_INDEX(__path__) \
(le16_to_cpu((__path__)->p_hdr->eh_entries) \ (le16_to_cpu((__path__)->p_hdr->eh_entries) \
< le16_to_cpu((__path__)->p_hdr->eh_max)) < le16_to_cpu((__path__)->p_hdr->eh_max))
#define EXT_LAST_EXTENT(__hdr__) \ #define EXT_LAST_EXTENT(__hdr__) \
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
#define EXT_LAST_INDEX(__hdr__) \ #define EXT_LAST_INDEX(__hdr__) \
......
...@@ -41,14 +41,14 @@ struct ext4_reserve_window_node { ...@@ -41,14 +41,14 @@ struct ext4_reserve_window_node {
struct ext4_block_alloc_info { struct ext4_block_alloc_info {
/* information about reservation window */ /* information about reservation window */
struct ext4_reserve_window_node rsv_window_node; struct ext4_reserve_window_node rsv_window_node;
/* /*
* was i_next_alloc_block in ext4_inode_info * was i_next_alloc_block in ext4_inode_info
* is the logical (file-relative) number of the * is the logical (file-relative) number of the
* most-recently-allocated block in this file. * most-recently-allocated block in this file.
* We use this for detecting linearly ascending allocation requests. * We use this for detecting linearly ascending allocation requests.
*/ */
__u32 last_alloc_logical_block; __u32 last_alloc_logical_block;
/* /*
* Was i_next_alloc_goal in ext4_inode_info * Was i_next_alloc_goal in ext4_inode_info
* is the *physical* companion to i_next_alloc_block. * is the *physical* companion to i_next_alloc_block.
...@@ -56,7 +56,7 @@ struct ext4_block_alloc_info { ...@@ -56,7 +56,7 @@ struct ext4_block_alloc_info {
* allocated to this file. This give us the goal (target) for the next * allocated to this file. This give us the goal (target) for the next
* allocation when we detect linearly ascending requests. * allocation when we detect linearly ascending requests.
*/ */
ext4_fsblk_t last_alloc_physical_block; ext4_fsblk_t last_alloc_physical_block;
}; };
#define rsv_start rsv_window._rsv_start #define rsv_start rsv_window._rsv_start
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment