Commit 1619ed8f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw:
  GFS2: local functions should be static
  GFS2: We only need one ACL getting function
  GFS2: Fix multi-block allocation
  GFS2: decouple quota allocations from block allocations
  GFS2: split function rgblk_search
  GFS2: Fix up "off by one" in the previous patch
  GFS2: move toward a generic multi-block allocator
  GFS2: O_(D)SYNC support for fallocate
  GFS2: remove vestigial al_alloced
  GFS2: combine gfs2_alloc_block and gfs2_alloc_di
  GFS2: Add non-try locks back to get_local_rgrp
  GFS2: f_ra is always valid in dir readahead function
  GFS2: Fix very unlikley memory leak in ACL xattr code
  GFS2: More automated code analysis fixes
  GFS2: Add readahead to sequential directory traversal
  GFS2: Fix up REQ flags
parents 29ad0de2 46cc1e5f
......@@ -38,8 +38,9 @@ static const char *gfs2_acl_name(int type)
return NULL;
}
static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct posix_acl *acl;
const char *name;
char *data;
......@@ -67,11 +68,6 @@ static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
return acl;
}
struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
return gfs2_acl_get(GFS2_I(inode), type);
}
static int gfs2_set_mode(struct inode *inode, umode_t mode)
{
int error = 0;
......@@ -125,7 +121,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
if (S_ISLNK(inode->i_mode))
return 0;
acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
acl = gfs2_get_acl(&dip->i_inode, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl) {
......@@ -166,7 +162,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
unsigned int len;
int error;
acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
acl = gfs2_get_acl(&ip->i_inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl)
......@@ -216,7 +212,7 @@ static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
if (type < 0)
return type;
acl = gfs2_acl_get(GFS2_I(inode), type);
acl = gfs2_get_acl(inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
......
......@@ -615,7 +615,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
struct gfs2_alloc *al = NULL;
struct gfs2_qadata *qa = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
struct page *page;
......@@ -639,8 +639,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (!al) {
qa = gfs2_qadata_get(ip);
if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
......@@ -649,8 +649,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (error)
goto out_alloc_put;
al->al_requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error)
goto out_qunlock;
}
......@@ -711,7 +710,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
out_unlock:
if (&ip->i_inode == sdp->sd_rindex) {
......@@ -848,7 +847,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
unsigned int to = from + len;
int ret;
......@@ -880,10 +879,11 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
brelse(dibh);
failed:
gfs2_trans_end(sdp);
if (al) {
if (ip->i_res)
gfs2_inplace_release(ip);
if (qa) {
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
if (inode == sdp->sd_rindex) {
gfs2_glock_dq(&m_ip->i_gh);
......
......@@ -133,7 +133,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
and write it out to disk */
unsigned int n = 1;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
goto out_brelse;
if (isdir) {
......@@ -503,7 +503,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
do {
int error;
n = blks - alloced;
error = gfs2_alloc_block(ip, &bn, &n);
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return error;
alloced += n;
......@@ -743,9 +743,6 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
else if (ip->i_depth)
revokes = sdp->sd_inptrs;
if (error)
return error;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
bstart = 0;
blen = 0;
......@@ -1044,7 +1041,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
find_metapath(sdp, lblock, &mp, ip->i_height);
if (!gfs2_alloc_get(ip))
if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
......@@ -1064,7 +1061,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
gfs2_quota_unhold(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......@@ -1166,21 +1163,20 @@ static int do_grow(struct inode *inode, u64 size)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
struct gfs2_alloc *al = NULL;
struct gfs2_qadata *qa = NULL;
int error;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
al = gfs2_alloc_get(ip);
if (al == NULL)
qa = gfs2_qadata_get(ip);
if (qa == NULL)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto do_grow_alloc_put;
al->al_requested = 1;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, 1);
if (error)
goto do_grow_qunlock;
}
......@@ -1189,7 +1185,7 @@ static int do_grow(struct inode *inode, u64 size)
if (error)
goto do_grow_release;
if (al) {
if (qa) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto do_end_trans;
......@@ -1208,12 +1204,12 @@ static int do_grow(struct inode *inode, u64 size)
do_end_trans:
gfs2_trans_end(sdp);
do_grow_release:
if (al) {
if (qa) {
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
do_grow_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
return error;
}
......
......@@ -76,6 +76,8 @@
#define IS_LEAF 1 /* Hashed (leaf) directory */
#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
#define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
......@@ -821,7 +823,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
error = gfs2_alloc_block(ip, &bn, &n);
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
......@@ -1376,6 +1378,52 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
return error;
}
/**
* gfs2_dir_readahead - Issue read-ahead requests for leaf blocks.
*
* Note: we can't calculate each index like dir_e_read can because we don't
* have the leaf, and therefore we don't have the depth, and therefore we
* don't have the length. So we have to just read enough ahead to make up
* for the loss of information.
*/
static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
struct file_ra_state *f_ra)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *gl = ip->i_gl;
struct buffer_head *bh;
u64 blocknr = 0, last;
unsigned count;
/* First check if we've already read-ahead for the whole range. */
if (index + MAX_RA_BLOCKS < f_ra->start)
return;
f_ra->start = max((pgoff_t)index, f_ra->start);
for (count = 0; count < MAX_RA_BLOCKS; count++) {
if (f_ra->start >= hsize) /* if exceeded the hash table */
break;
last = blocknr;
blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]);
f_ra->start++;
if (blocknr == last)
continue;
bh = gfs2_getbuf(gl, blocknr, 1);
if (trylock_buffer(bh)) {
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
brelse(bh);
continue;
}
bh->b_end_io = end_buffer_read_sync;
submit_bh(READA | REQ_META, bh);
continue;
}
brelse(bh);
}
}
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
......@@ -1388,7 +1436,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
*/
static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
u32 hsize, len = 0;
......@@ -1402,10 +1450,14 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
if (dip->i_hash_cache == NULL)
f_ra->start = 0;
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
return PTR_ERR(lp);
gfs2_dir_readahead(inode, hsize, index, f_ra);
while (index < hsize) {
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
......@@ -1423,7 +1475,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
}
int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
filldir_t filldir, struct file_ra_state *f_ra)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
......@@ -1437,7 +1489,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
return dir_e_read(inode, offset, opaque, filldir);
return dir_e_read(inode, offset, opaque, filldir, f_ra);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
......@@ -1798,7 +1850,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (!ht)
return -ENOMEM;
if (!gfs2_alloc_get(dip)) {
if (!gfs2_qadata_get(dip)) {
error = -ENOMEM;
goto out;
}
......@@ -1887,7 +1939,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
out_put:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
out:
kfree(ht);
return error;
......
......@@ -25,7 +25,7 @@ extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
const struct gfs2_inode *ip);
extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
extern int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir);
filldir_t filldir, struct file_ra_state *f_ra);
extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type);
......
......@@ -99,6 +99,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
struct gfs2_holder gh;
u64 offset = 0;
int error;
struct file_ra_state f_ra = { .start = 0 };
if (!dir)
return -EINVAL;
......@@ -118,7 +119,7 @@ static int gfs2_get_name(struct dentry *parent, char *name,
if (error)
return error;
error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir, &f_ra);
gfs2_glock_dq_uninit(&gh);
......
......@@ -105,7 +105,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
return error;
}
error = gfs2_dir_read(dir, &offset, dirent, filldir);
error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
gfs2_glock_dq_uninit(&d_gh);
......@@ -365,7 +365,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
loff_t size;
int ret;
......@@ -393,16 +393,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
ret = -ENOMEM;
al = gfs2_alloc_get(ip);
if (al == NULL)
qa = gfs2_qadata_get(ip);
if (qa == NULL)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
goto out_alloc_put;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
ret = gfs2_inplace_reserve(ip);
ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (ret)
goto out_quota_unlock;
......@@ -448,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out_quota_unlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
......@@ -609,7 +608,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
struct inode *inode = mapping->host;
int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
struct gfs2_inode *ip = GFS2_I(inode);
int ret, ret1 = 0;
int ret = 0, ret1 = 0;
if (mapping->nrpages) {
ret1 = filemap_fdatawrite_range(mapping, start, end);
......@@ -750,8 +749,10 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
const loff_t pos = offset;
const loff_t count = len;
loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
loff_t max_chunk_size = UINT_MAX & bsize_mask;
......@@ -782,8 +783,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
while (len > 0) {
if (len < bytes)
bytes = len;
al = gfs2_alloc_get(ip);
if (!al) {
qa = gfs2_qadata_get(ip);
if (!qa) {
error = -ENOMEM;
goto out_unlock;
}
......@@ -795,8 +796,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
......@@ -810,7 +810,6 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
max_bytes = bytes;
calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
&max_bytes, &data_blocks, &ind_blocks);
al->al_requested = data_blocks + ind_blocks;
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
RES_RG_HDR + gfs2_rg_blocks(ip);
......@@ -832,8 +831,11 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
offset += max_bytes;
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
}
if (error == 0)
error = generic_write_sync(file, pos, count);
goto out_unlock;
out_trans_fail:
......@@ -841,7 +843,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
out_qunlock:
gfs2_quota_unlock(ip);
out_alloc_put:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&ip->i_gh);
out_uninit:
......
......@@ -244,17 +244,16 @@ struct gfs2_glock {
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
struct gfs2_alloc {
struct gfs2_qadata { /* quota allocation data */
/* Quota stuff */
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
unsigned int al_qd_num;
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
struct gfs2_quota_data *qa_qd[2*MAXQUOTAS];
struct gfs2_holder qa_qd_ghs[2*MAXQUOTAS];
unsigned int qa_qd_num;
};
/* Filled in by gfs2_inplace_reserve() */
struct gfs2_holder al_rgd_gh;
struct gfs2_blkreserv {
u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
struct gfs2_holder rs_rgd_gh; /* Filled in by gfs2_inplace_reserve() */
};
enum {
......@@ -275,7 +274,8 @@ struct gfs2_inode {
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_alloc *i_alloc;
struct gfs2_qadata *i_qadata; /* quota allocation data */
struct gfs2_blkreserv *i_res; /* resource group block reservation */
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
......
......@@ -389,12 +389,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
int error;
int dblocks = 1;
if (gfs2_alloc_get(dip) == NULL)
return -ENOMEM;
error = gfs2_rindex_update(sdp);
if (error)
fs_warn(sdp, "rindex update returns %d\n", error);
dip->i_alloc->al_requested = RES_DINODE;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, RES_DINODE);
if (error)
goto out;
......@@ -402,14 +403,13 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
if (error)
goto out_ipreserv;
error = gfs2_alloc_di(dip, no_addr, generation);
error = gfs2_alloc_blocks(dip, no_addr, &dblocks, 1, generation);
gfs2_trans_end(sdp);
out_ipreserv:
gfs2_inplace_release(dip);
out:
gfs2_alloc_put(dip);
return error;
}
......@@ -525,7 +525,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
if (!gfs2_alloc_get(dip))
if (!gfs2_qadata_get(dip))
return -ENOMEM;
error = gfs2_quota_lock(dip, uid, gid);
......@@ -547,7 +547,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
out_quota:
gfs2_quota_unlock(dip);
out:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
return error;
}
......@@ -555,13 +555,13 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int alloc_required;
struct buffer_head *dibh;
int error;
al = gfs2_alloc_get(dip);
if (!al)
qa = gfs2_qadata_get(dip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
......@@ -576,9 +576,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
if (error)
goto fail_quota_locks;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto fail_quota_locks;
......@@ -619,11 +617,11 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
gfs2_quota_unlock(dip);
fail:
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
return error;
}
int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
void *fs_info)
{
const struct xattr *xattr;
......@@ -728,9 +726,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
brelse(bh);
gfs2_trans_end(sdp);
gfs2_inplace_release(dip);
/* Check if we reserved space in the rgrp. Function link_dinode may
not, depending on whether alloc is required. */
if (dip->i_res)
gfs2_inplace_release(dip);
gfs2_quota_unlock(dip);
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
mark_inode_dirty(inode);
gfs2_glock_dq_uninit_m(2, ghs);
d_instantiate(dentry, inode);
......@@ -875,8 +876,9 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
struct gfs2_alloc *al = gfs2_alloc_get(dip);
if (!al) {
struct gfs2_qadata *qa = gfs2_qadata_get(dip);
if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
......@@ -885,9 +887,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (error)
goto out_alloc;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(dip);
error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
......@@ -930,7 +930,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
gfs2_quota_unlock(dip);
out_alloc:
if (alloc_required)
gfs2_alloc_put(dip);
gfs2_qadata_put(dip);
out_gunlock:
gfs2_glock_dq(ghs + 1);
out_child:
......@@ -1037,12 +1037,14 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct buffer_head *bh;
struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd;
int error;
int error = -EROFS;
gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
if (!rgd)
goto out_inodes;
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
......@@ -1088,12 +1090,13 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
out_gunlock:
gfs2_glock_dq(ghs + 2);
out_rgrp:
gfs2_holder_uninit(ghs + 2);
gfs2_glock_dq(ghs + 1);
out_child:
gfs2_holder_uninit(ghs + 1);
gfs2_glock_dq(ghs);
out_parent:
gfs2_holder_uninit(ghs + 2);
out_inodes:
gfs2_holder_uninit(ghs + 1);
gfs2_holder_uninit(ghs);
return error;
}
......@@ -1350,8 +1353,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = 0;
if (alloc_required) {
struct gfs2_alloc *al = gfs2_alloc_get(ndip);
if (!al) {
struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
if (!qa) {
error = -ENOMEM;
goto out_gunlock;
}
......@@ -1360,9 +1364,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
goto out_alloc;
al->al_requested = sdp->sd_max_dirres;
error = gfs2_inplace_reserve(ndip);
error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
if (error)
goto out_gunlock_q;
......@@ -1423,7 +1425,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
gfs2_quota_unlock(ndip);
out_alloc:
if (alloc_required)
gfs2_alloc_put(ndip);
gfs2_qadata_put(ndip);
out_gunlock:
while (x--) {
gfs2_glock_dq(ghs + x);
......@@ -1584,7 +1586,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
ogid = ngid = NO_QUOTA_CHANGE;
if (!gfs2_alloc_get(ip))
if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_lock(ip, nuid, ngid);
......@@ -1616,7 +1618,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
out_gunlock_q:
gfs2_quota_unlock(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......
......@@ -626,7 +626,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
else
submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
......
......@@ -40,7 +40,8 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_alloc = NULL;
ip->i_qadata = NULL;
ip->i_res = NULL;
ip->i_hash_cache = NULL;
}
......
......@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
dblock++;
extlen--;
......@@ -444,7 +444,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(READA, 1, &bh);
ll_rw_block(READA | REQ_META, 1, &bh);
brelse(bh);
dblock++;
extlen--;
......
......@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
submit_bio(READ_SYNC | REQ_META, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
......
......@@ -494,11 +494,11 @@ static void qdsb_put(struct gfs2_quota_data *qd)
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_quota_data **qd = al->al_qd;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data **qd = qa->qa_qd;
int error;
if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
return -EIO;
......@@ -508,20 +508,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
error = qdsb_get(sdp, QUOTA_USER, uid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
}
......@@ -529,7 +529,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
if (error)
goto out;
al->al_qd_num++;
qa->qa_qd_num++;
qd++;
}
......@@ -542,16 +542,16 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
void gfs2_quota_unhold(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
unsigned int x;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
for (x = 0; x < al->al_qd_num; x++) {
qdsb_put(al->al_qd[x]);
al->al_qd[x] = NULL;
for (x = 0; x < qa->qa_qd_num; x++) {
qdsb_put(qa->qa_qd[x]);
qa->qa_qd[x] = NULL;
}
al->al_qd_num = 0;
qa->qa_qd_num = 0;
}
static int sort_qd(const void *a, const void *b)
......@@ -712,7 +712,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
ll_rw_block(READ | REQ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto unlock_out;
......@@ -762,7 +762,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
struct gfs2_quota_data *qd;
loff_t offset;
unsigned int nalloc = 0, blocks;
struct gfs2_alloc *al = NULL;
int error;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
......@@ -792,26 +791,19 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
nalloc++;
}
al = gfs2_alloc_get(ip);
if (!al) {
error = -ENOMEM;
goto out_gunlock;
}
/*
* 1 blk for unstuffing inode if stuffed. We add this extra
* block to the reservation unconditionally. If the inode
* doesn't need unstuffing, the block will be released to the
* rgrp since it won't be allocated during the transaction
*/
al->al_requested = 1;
/* +3 in the end for unstuffing block, inode size update block
* and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks);
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, 1 +
(nalloc * (data_blocks + ind_blocks)));
if (error)
goto out_alloc;
......@@ -840,8 +832,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
out_ipres:
gfs2_inplace_release(ip);
out_alloc:
gfs2_alloc_put(ip);
out_gunlock:
gfs2_glock_dq_uninit(&i_gh);
out:
while (qx--)
......@@ -925,7 +915,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
int error = 0;
......@@ -938,15 +928,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
sort_qd, NULL);
for (x = 0; x < al->al_qd_num; x++) {
for (x = 0; x < qa->qa_qd_num; x++) {
int force = NO_FORCE;
qd = al->al_qd[x];
qd = qa->qa_qd[x];
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force = FORCE;
error = do_glock(qd, force, &al->al_qd_ghs[x]);
error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
if (error)
break;
}
......@@ -955,7 +945,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
set_bit(GIF_QD_LOCKED, &ip->i_flags);
else {
while (x--)
gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
gfs2_quota_unhold(ip);
}
......@@ -1000,7 +990,7 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
......@@ -1008,14 +998,14 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
for (x = 0; x < al->al_qd_num; x++) {
for (x = 0; x < qa->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
int sync;
qd = al->al_qd[x];
qd = qa->qa_qd[x];
sync = need_sync(qd);
gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
if (sync && qd_trylock(qd))
qda[count++] = qd;
......@@ -1048,7 +1038,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
s64 value;
unsigned int x;
......@@ -1060,8 +1050,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
for (x = 0; x < al->al_qd_num; x++) {
qd = al->al_qd[x];
for (x = 0; x < qa->qa_qd_num; x++) {
qd = qa->qa_qd[x];
if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
......@@ -1099,7 +1089,7 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid)
{
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
......@@ -1108,8 +1098,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
for (x = 0; x < al->al_qd_num; x++) {
qd = al->al_qd[x];
for (x = 0; x < qa->qa_qd_num; x++) {
qd = qa->qa_qd[x];
if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
......@@ -1529,7 +1519,6 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
unsigned int data_blocks, ind_blocks;
unsigned int blocks = 0;
int alloc_required;
struct gfs2_alloc *al;
loff_t offset;
int error;
......@@ -1594,15 +1583,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
if (gfs2_is_stuffed(ip))
alloc_required = 1;
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (al == NULL)
goto out_i;
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
blocks = al->al_requested = 1 + data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip);
blocks = 1 + data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, blocks);
if (error)
goto out_alloc;
goto out_i;
blocks += gfs2_rg_blocks(ip);
}
......@@ -1617,11 +1603,8 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
gfs2_trans_end(sdp);
out_release:
if (alloc_required) {
if (alloc_required)
gfs2_inplace_release(ip);
out_alloc:
gfs2_alloc_put(ip);
}
out_i:
gfs2_glock_dq_uninit(&i_gh);
out_q:
......
This diff is collapsed.
......@@ -28,19 +28,19 @@ extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
static inline void gfs2_alloc_put(struct gfs2_inode *ip)
extern struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip);
static inline void gfs2_qadata_put(struct gfs2_inode *ip)
{
BUG_ON(ip->i_alloc == NULL);
kfree(ip->i_alloc);
ip->i_alloc = NULL;
BUG_ON(ip->i_qadata == NULL);
kfree(ip->i_qadata);
ip->i_qadata = NULL;
}
extern int gfs2_inplace_reserve(struct gfs2_inode *ip);
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
bool dinode, u64 *generation);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
......
......@@ -1399,8 +1399,9 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
struct gfs2_rgrpd *rgd;
struct gfs2_holder gh;
int error;
if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
......@@ -1408,8 +1409,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return -EIO;
}
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
......@@ -1423,8 +1424,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
goto out_qs;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
&al->al_rgd_gh);
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
goto out_qs;
......@@ -1440,11 +1440,11 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_rg_gunlock:
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_glock_dq_uninit(&gh);
out_qs:
gfs2_quota_unhold(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......
......@@ -30,9 +30,9 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
{
const struct gfs2_alloc *al = ip->i_alloc;
if (al->al_requested < ip->i_rgd->rd_length)
return al->al_requested + 1;
const struct gfs2_blkreserv *rs = ip->i_res;
if (rs->rs_requested < ip->i_rgd->rd_length)
return rs->rs_requested + 1;
return ip->i_rgd->rd_length;
}
......
......@@ -321,11 +321,11 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, int leave)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
......@@ -336,7 +336,7 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......@@ -549,9 +549,10 @@ int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
goto out;
error = gfs2_ea_get_copy(ip, &el, data, len);
if (error == 0)
error = len;
*ppdata = data;
if (error < 0)
kfree(data);
else
*ppdata = data;
out:
brelse(el.el_bh);
return error;
......@@ -609,7 +610,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
u64 block;
int error;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
......@@ -671,7 +672,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
error = gfs2_alloc_block(ip, &block, &n);
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
......@@ -708,21 +709,19 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
struct buffer_head *dibh;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto out;
al->al_requested = blks;
error = gfs2_inplace_reserve(ip);
error = gfs2_inplace_reserve(ip, blks);
if (error)
goto out_gunlock_q;
......@@ -751,7 +750,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......@@ -991,7 +990,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
} else {
u64 blk;
unsigned int n = 1;
error = gfs2_alloc_block(ip, &blk, &n);
error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, blk, 1);
......@@ -1435,9 +1434,9 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
static int ea_dealloc_block(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
struct buffer_head *dibh;
struct gfs2_holder gh;
int error;
rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
......@@ -1446,8 +1445,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
return -EIO;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
&al->al_rgd_gh);
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (error)
return error;
......@@ -1471,7 +1469,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
gfs2_trans_end(sdp);
out_gunlock:
gfs2_glock_dq_uninit(&al->al_rgd_gh);
gfs2_glock_dq_uninit(&gh);
return error;
}
......@@ -1484,11 +1482,11 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
int gfs2_ea_dealloc(struct gfs2_inode *ip)
{
struct gfs2_alloc *al;
struct gfs2_qadata *qa;
int error;
al = gfs2_alloc_get(ip);
if (!al)
qa = gfs2_qadata_get(ip);
if (!qa)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
......@@ -1510,7 +1508,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
out_quota:
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
gfs2_qadata_put(ip);
return error;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment