Commit e151960a authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw

Pull GFS2 updates from Steven Whitehouse:
 "The major feature this time is the "rbm" conversion in the resource
  group code.  The new struct gfs2_rbm specifies the location of an
  allocatable block in (resource group, bitmap, offset) form.  There are
  a number of added helper functions, and later patches then rewrite
  some of the resource group code in terms of this new structure.  Not
  only does this give us a nice code clean up, but it also removes some
  of the previous restrictions where extents could not cross bitmap
  boundaries, for example.

  In addition to that, there are a few bug fixes and clean ups, but the
  rbm work is by far the majority of this patch set in terms of number
  of changed lines."

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw: (27 commits)
  GFS2: Write out dirty inode metadata in delayed deletes
  GFS2: fix s_writers.counter imbalance in gfs2_ail_empty_gl
  GFS2: Fix infinite loop in rbm_find
  GFS2: Consolidate free block searching functions
  GFS2: Get rid of I_MUTEX_QUOTA usage
  GFS2: Stop block extents at the end of bitmaps
  GFS2: Fix unclaimed_blocks() wrapping bug and clean up
  GFS2: Improve block reservation tracing
  GFS2: Fall back to ignoring reservations, if there are no other blocks left
  GFS2: Fix ->show_options() for statfs slow
  GFS2: Use rbm for gfs2_setbit()
  GFS2: Use rbm for gfs2_testbit()
  GFS2: Eliminate unnecessary check for state > 3 in bitfit
  GFS2: Eliminate redundant calls to may_grant
  GFS2: Combine functions gfs2_glock_dq_wait and wait_on_demote
  GFS2: Combine functions gfs2_glock_wait and wait_on_holder
  GFS2: inline __gfs2_glock_schedule_for_reclaim
  GFS2: change function gfs2_direct_IO to use a normal gfs2_glock_dq
  GFS2: rbm code cleanup
  GFS2: Fix case where reservation finished at end of rgrp
  ...
parents a0d271cb 2216db70
......@@ -612,6 +612,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
unsigned requested = 0;
int alloc_required;
int error = 0;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
......@@ -641,7 +642,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (error)
goto out_unlock;
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
requested = data_blocks + ind_blocks;
error = gfs2_inplace_reserve(ip, requested);
if (error)
goto out_qunlock;
}
......@@ -654,7 +656,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (&ip->i_inode == sdp->sd_rindex)
rblocks += 2 * RES_STATFS;
if (alloc_required)
rblocks += gfs2_rg_blocks(ip);
rblocks += gfs2_rg_blocks(ip, requested);
error = gfs2_trans_begin(sdp, rblocks,
PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
......@@ -868,7 +870,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
brelse(dibh);
failed:
gfs2_trans_end(sdp);
if (gfs2_mb_reserved(ip))
gfs2_inplace_release(ip);
if (ip->i_res->rs_qa_qd_num)
gfs2_quota_unlock(ip);
......@@ -1023,7 +1024,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
offset, nr_segs, gfs2_get_block_direct,
NULL, NULL, 0);
out:
gfs2_glock_dq_m(1, &gh);
gfs2_glock_dq(&gh);
gfs2_holder_uninit(&gh);
return rv;
}
......
......@@ -786,7 +786,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
goto out_rlist;
if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
gfs2_rs_deltree(ip->i_res);
gfs2_rs_deltree(ip, ip->i_res);
error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
RES_INDIRECT + RES_STATFS + RES_QUOTA,
......
......@@ -441,7 +441,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
rblocks += data_blocks ? data_blocks : 1;
if (ind_blocks || data_blocks) {
rblocks += RES_STATFS + RES_QUOTA;
rblocks += gfs2_rg_blocks(ip);
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
}
ret = gfs2_trans_begin(sdp, rblocks, 0);
if (ret)
......@@ -845,7 +845,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
&max_bytes, &data_blocks, &ind_blocks);
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
RES_RG_HDR + gfs2_rg_blocks(ip);
RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
if (gfs2_is_jdata(ip))
rblocks += data_blocks ? data_blocks : 1;
......
......@@ -185,20 +185,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
spin_unlock(&lru_lock);
}
/**
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
* @gl: the glock
*
* If the glock is demotable, then we add it (or move it) to the end
* of the glock LRU list.
*/
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
if (demote_ok(gl))
gfs2_glock_add_to_lru(gl);
}
/**
* gfs2_glock_put_nolock() - Decrement reference count on glock
* @gl: The glock to put
......@@ -883,7 +869,14 @@ static int gfs2_glock_demote_wait(void *word)
return 0;
}
static void wait_on_holder(struct gfs2_holder *gh)
/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
* Returns: 0 on success
*/
int gfs2_glock_wait(struct gfs2_holder *gh)
{
unsigned long time1 = jiffies;
......@@ -894,12 +887,7 @@ static void wait_on_holder(struct gfs2_holder *gh)
gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
GL_GLOCK_HOLD_INCR,
GL_GLOCK_MAX_HOLD);
}
static void wait_on_demote(struct gfs2_glock *gl)
{
might_sleep();
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
return gh->gh_error;
}
/**
......@@ -929,19 +917,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
trace_gfs2_demote_rq(gl);
}
/**
* gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
* Returns: 0 on success
*/
int gfs2_glock_wait(struct gfs2_holder *gh)
{
wait_on_holder(gh);
return gh->gh_error;
}
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
{
struct va_format vaf;
......@@ -979,7 +954,7 @@ __acquires(&gl->gl_spin)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct list_head *insert_pt = NULL;
struct gfs2_holder *gh2;
int try_lock = 0;
int try_futile = 0;
BUG_ON(gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
......@@ -987,7 +962,7 @@ __acquires(&gl->gl_spin)
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
if (test_bit(GLF_LOCK, &gl->gl_flags))
try_lock = 1;
try_futile = !may_grant(gl, gh);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
goto fail;
}
......@@ -996,9 +971,8 @@ __acquires(&gl->gl_spin)
if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
(gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
goto trap_recursive;
if (try_lock &&
!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
!may_grant(gl, gh)) {
if (try_futile &&
!(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
fail:
gh->gh_error = GLR_TRYFAILED;
gfs2_holder_wake(gh);
......@@ -1121,8 +1095,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags))
fast_path = 1;
}
if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
__gfs2_glock_schedule_for_reclaim(gl);
if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
gfs2_glock_add_to_lru(gl);
trace_gfs2_glock_queue(gh, 0);
spin_unlock(&gl->gl_spin);
if (likely(fast_path))
......@@ -1141,7 +1116,8 @@ void gfs2_glock_dq_wait(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
gfs2_glock_dq(gh);
wait_on_demote(gl);
might_sleep();
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
}
/**
......
......@@ -94,6 +94,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
/* A shortened, inline version of gfs2_trans_begin() */
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
tr.tr_ip = (unsigned long)__builtin_return_address(0);
sb_start_intwrite(sdp->sd_vfs);
gfs2_log_reserve(sdp, tr.tr_reserved);
BUG_ON(current->journal_info);
current->journal_info = &tr;
......
......@@ -99,9 +99,26 @@ struct gfs2_rgrpd {
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
spinlock_t rd_rsspin; /* protects reservation related vars */
struct rb_root rd_rstree; /* multi-block reservation tree */
u32 rd_rs_cnt; /* count of current reservations */
};
struct gfs2_rbm {
struct gfs2_rgrpd *rgd;
struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
u32 offset; /* The offset is bitmap relative */
};
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
{
return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
}
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
const struct gfs2_rbm *rbm2)
{
return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) &&
(rbm1->offset == rbm2->offset);
}
enum gfs2_state_bits {
BH_Pinned = BH_PrivateStart,
BH_Escaped = BH_PrivateStart + 1,
......@@ -250,18 +267,11 @@ struct gfs2_blkreserv {
/* components used during write (step 1): */
atomic_t rs_sizehint; /* hint of the write size */
/* components used during inplace_reserve (step 2): */
u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
/* components used during get_local_rgrp (step 3): */
struct gfs2_rgrpd *rs_rgd; /* pointer to the gfs2_rgrpd */
struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
struct rb_node rs_node; /* link to other block reservations */
/* components used during block searches and assignments (step 4): */
struct gfs2_bitmap *rs_bi; /* bitmap for the current allocation */
u32 rs_biblk; /* start block relative to the bi */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
u64 rs_inum; /* Inode number for reservation */
/* ancillary quota stuff */
struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
......
......@@ -712,14 +712,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
/* The newly created inode needs a reservation so it can allocate
xattrs. At the same time, we want new blocks allocated to the new
dinode to be as contiguous as possible. Since we allocated the
dinode block under the directory's reservation, we transfer
ownership of that reservation to the new inode. The directory
doesn't need a reservation unless it needs a new allocation. */
ip->i_res = dip->i_res;
dip->i_res = NULL;
error = gfs2_rs_alloc(ip);
if (error)
goto fail_gunlock2;
error = gfs2_acl_create(dip, inode);
if (error)
......@@ -737,9 +732,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
brelse(bh);
gfs2_trans_end(sdp);
/* Check if we reserved space in the rgrp. Function link_dinode may
not, depending on whether alloc is required. */
if (gfs2_mb_reserved(dip))
gfs2_inplace_release(dip);
gfs2_quota_unlock(dip);
mark_inode_dirty(inode);
......@@ -897,7 +889,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
goto out_gunlock_q;
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
gfs2_rg_blocks(dip) +
gfs2_rg_blocks(dip, sdp->sd_max_dirres) +
2 * RES_DINODE + RES_STATFS +
RES_QUOTA, 0);
if (error)
......@@ -1378,7 +1370,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock_q;
error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
gfs2_rg_blocks(ndip) +
gfs2_rg_blocks(ndip, sdp->sd_max_dirres) +
4 * RES_DINODE + 4 * RES_LEAF +
RES_STATFS + RES_QUOTA + 4, 0);
if (error)
......
......@@ -19,6 +19,7 @@
#include <linux/mount.h>
#include <linux/gfs2_ondisk.h>
#include <linux/quotaops.h>
#include <linux/lockdep.h>
#include "gfs2.h"
#include "incore.h"
......@@ -766,6 +767,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
return error;
}
static struct lock_class_key gfs2_quota_imutex_key;
static int init_inodes(struct gfs2_sbd *sdp, int undo)
{
......@@ -803,6 +805,12 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
fs_err(sdp, "can't get quota file inode: %d\n", error);
goto fail_rindex;
}
/*
* i_mutex on quota files is special. Since this inode is hidden system
* file, we are safe to define locking ourselves.
*/
lockdep_set_class(&sdp->sd_quota_inode->i_mutex,
&gfs2_quota_imutex_key);
error = gfs2_rindex_update(sdp);
if (error)
......
......@@ -765,6 +765,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
struct gfs2_holder *ghs, i_gh;
unsigned int qx, x;
struct gfs2_quota_data *qd;
unsigned reserved;
loff_t offset;
unsigned int nalloc = 0, blocks;
int error;
......@@ -781,7 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
return -ENOMEM;
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
mutex_lock(&ip->i_inode.i_mutex);
for (qx = 0; qx < num_qd; qx++) {
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]);
......@@ -811,13 +812,13 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
error = gfs2_inplace_reserve(ip, 1 +
(nalloc * (data_blocks + ind_blocks)));
reserved = 1 + (nalloc * (data_blocks + ind_blocks));
error = gfs2_inplace_reserve(ip, reserved);
if (error)
goto out_alloc;
if (nalloc)
blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
......@@ -1598,7 +1599,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
error = gfs2_inplace_reserve(ip, blocks);
if (error)
goto out_i;
blocks += gfs2_rg_blocks(ip);
blocks += gfs2_rg_blocks(ip, blocks);
}
/* Some quotas span block boundaries and can update two blocks,
......
......@@ -35,9 +35,6 @@
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
#define RSRV_CONTENTION_FACTOR 4
#define RGRP_RSRV_MAX_CONTENDERS 2
#if BITS_PER_LONG == 32
#define LBITMASK (0x55555555UL)
#define LBITSKIP55 (0x55555555UL)
......@@ -67,53 +64,48 @@ static const char valid_change[16] = {
1, 0, 0, 0
};
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
unsigned char old_state,
struct gfs2_bitmap **rbi);
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
const struct gfs2_inode *ip, bool nowrap);
/**
* gfs2_setbit - Set a bit in the bitmaps
* @rgd: the resource group descriptor
* @buf2: the clone buffer that holds the bitmaps
* @bi: the bitmap structure
* @block: the block to set
* @rbm: The position of the bit to set
* @do_clone: Also set the clone bitmap, if it exists
* @new_state: the new state of the block
*
*/
static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
struct gfs2_bitmap *bi, u32 block,
static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
unsigned char new_state)
{
unsigned char *byte1, *byte2, *end, cur_state;
unsigned int buflen = bi->bi_len;
const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
unsigned int buflen = rbm->bi->bi_len;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
end = bi->bi_bh->b_data + bi->bi_offset + buflen;
byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
BUG_ON(byte1 >= end);
cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
if (unlikely(!valid_change[new_state * 4 + cur_state])) {
printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
"new_state=%d\n",
(unsigned long long)block, cur_state, new_state);
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
(unsigned long long)rgd->rd_addr,
(unsigned long)bi->bi_start);
printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
(unsigned long)bi->bi_offset,
(unsigned long)bi->bi_len);
printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
"new_state=%d\n", rbm->offset, cur_state, new_state);
printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
(unsigned long long)rbm->rgd->rd_addr,
rbm->bi->bi_start);
printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
rbm->bi->bi_offset, rbm->bi->bi_len);
dump_stack();
gfs2_consist_rgrpd(rgd);
gfs2_consist_rgrpd(rbm->rgd);
return;
}
*byte1 ^= (cur_state ^ new_state) << bit;
if (buf2) {
byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
if (do_clone && rbm->bi->bi_clone) {
byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
*byte2 ^= (cur_state ^ new_state) << bit;
}
......@@ -121,30 +113,21 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
/**
* gfs2_testbit - test a bit in the bitmaps
* @rgd: the resource group descriptor
* @buffer: the buffer that holds the bitmaps
* @buflen: the length (in bytes) of the buffer
* @block: the block to read
* @rbm: The bit to test
*
* Returns: The two bit block state of the requested bit
*/
static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
const unsigned char *buffer,
unsigned int buflen, u32 block)
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
{
const unsigned char *byte, *end;
unsigned char cur_state;
const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
const u8 *byte;
unsigned int bit;
byte = buffer + (block / GFS2_NBBY);
bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
end = buffer + buflen;
gfs2_assert(rgd->rd_sbd, byte < end);
byte = buffer + (rbm->offset / GFS2_NBBY);
bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
cur_state = (*byte >> bit) & GFS2_BIT_MASK;
return cur_state;
return (*byte >> bit) & GFS2_BIT_MASK;
}
/**
......@@ -192,7 +175,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
*/
static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
{
u64 startblk = gfs2_rs_startblk(rs);
u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
if (blk >= startblk + rs->rs_free)
return 1;
......@@ -201,36 +184,6 @@ static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
return 0;
}
/**
* rs_find - Find a rgrp multi-block reservation that contains a given block
* @rgd: The rgrp
* @rgblk: The block we're looking for, relative to the rgrp
*/
static struct gfs2_blkreserv *rs_find(struct gfs2_rgrpd *rgd, u32 rgblk)
{
struct rb_node **newn;
int rc;
u64 fsblk = rgblk + rgd->rd_data0;
spin_lock(&rgd->rd_rsspin);
newn = &rgd->rd_rstree.rb_node;
while (*newn) {
struct gfs2_blkreserv *cur =
rb_entry(*newn, struct gfs2_blkreserv, rs_node);
rc = rs_cmp(fsblk, 1, cur);
if (rc < 0)
newn = &((*newn)->rb_left);
else if (rc > 0)
newn = &((*newn)->rb_right);
else {
spin_unlock(&rgd->rd_rsspin);
return cur;
}
}
spin_unlock(&rgd->rd_rsspin);
return NULL;
}
/**
* gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
* a block in a given allocation state.
......@@ -262,8 +215,6 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
u64 mask = 0x5555555555555555ULL;
u32 bit;
BUG_ON(state > 3);
/* Mask off bits we don't care about at the start of the search */
mask <<= spoint;
tmp = gfs2_bit_search(ptr, mask, state);
......@@ -284,6 +235,131 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
}
/**
* gfs2_rbm_from_block - Set the rbm based upon rgd and block number
* @rbm: The rbm with rgd already set correctly
* @block: The block number (filesystem relative)
*
* This sets the bi and offset members of an rbm based on a
* resource group and a filesystem relative block number. The
* resource group must be set in the rbm on entry, the bi and
* offset members will be set by this function.
*
* Returns: 0 on success, or an error code
*/
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
u64 rblock = block - rbm->rgd->rd_data0;
u32 goal = (u32)rblock;
int x;
if (WARN_ON_ONCE(rblock > UINT_MAX))
return -EINVAL;
if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
return -E2BIG;
for (x = 0; x < rbm->rgd->rd_length; x++) {
rbm->bi = rbm->rgd->rd_bits + x;
if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
break;
}
}
return 0;
}
/**
* gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
* @rbm: Position to search (value/result)
* @n_unaligned: Number of unaligned blocks to check
* @len: Decremented for each block found (terminate on zero)
*
* Returns: true if a non-free block is encountered
*/
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
u64 block;
u32 n;
u8 res;
for (n = 0; n < n_unaligned; n++) {
res = gfs2_testbit(rbm);
if (res != GFS2_BLKST_FREE)
return true;
(*len)--;
if (*len == 0)
return true;
block = gfs2_rbm_to_block(rbm);
if (gfs2_rbm_from_block(rbm, block + 1))
return true;
}
return false;
}
/**
* gfs2_free_extlen - Return extent length of free blocks
* @rbm: Starting position
* @len: Max length to check
*
* Starting at the block specified by the rbm, see how many free blocks
* there are, not reading more than len blocks ahead. This can be done
* using memchr_inv when the blocks are byte aligned, but has to be done
* on a block by block basis in case of unaligned blocks. Also this
* function can cope with bitmap boundaries (although it must stop on
* a resource group boundary)
*
* Returns: Number of free blocks in the extent
*/
static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
{
struct gfs2_rbm rbm = *rrbm;
u32 n_unaligned = rbm.offset & 3;
u32 size = len;
u32 bytes;
u32 chunk_size;
u8 *ptr, *start, *end;
u64 block;
if (n_unaligned &&
gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
goto out;
n_unaligned = len & 3;
/* Start is now byte aligned */
while (len > 3) {
start = rbm.bi->bi_bh->b_data;
if (rbm.bi->bi_clone)
start = rbm.bi->bi_clone;
end = start + rbm.bi->bi_bh->b_size;
start += rbm.bi->bi_offset;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
ptr = memchr_inv(start, 0, bytes);
chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
chunk_size *= GFS2_NBBY;
BUG_ON(len < chunk_size);
len -= chunk_size;
block = gfs2_rbm_to_block(&rbm);
gfs2_rbm_from_block(&rbm, block + chunk_size);
n_unaligned = 3;
if (ptr)
break;
n_unaligned = len & 3;
}
/* Deal with any bits left over at the end */
if (n_unaligned)
gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
out:
return size - len;
}
/**
* gfs2_bitcount - count the number of bits in a certain state
* @rgd: the resource group descriptor
......@@ -487,6 +563,8 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
if (!res)
error = -ENOMEM;
RB_CLEAR_NODE(&res->rs_node);
down_write(&ip->i_rw_mutex);
if (ip->i_res)
kmem_cache_free(gfs2_rsrv_cachep, res);
......@@ -496,11 +574,12 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
return error;
}
static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
{
gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
rs->rs_rgd->rd_addr, gfs2_rs_startblk(rs), rs->rs_biblk,
rs->rs_free);
gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
(unsigned long long)rs->rs_inum,
(unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
rs->rs_rbm.offset, rs->rs_free);
}
/**
......@@ -508,41 +587,26 @@ static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
* @rs: The reservation to remove
*
*/
static void __rs_deltree(struct gfs2_blkreserv *rs)
static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
{
struct gfs2_rgrpd *rgd;
if (!gfs2_rs_active(rs))
return;
rgd = rs->rs_rgd;
/* We can't do this: The reason is that when the rgrp is invalidated,
it's in the "middle" of acquiring the glock, but the HOLDER bit
isn't set yet:
BUG_ON(!gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl));*/
trace_gfs2_rs(NULL, rs, TRACE_RS_TREEDEL);
if (!RB_EMPTY_ROOT(&rgd->rd_rstree))
rgd = rs->rs_rbm.rgd;
trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
rb_erase(&rs->rs_node, &rgd->rd_rstree);
BUG_ON(!rgd->rd_rs_cnt);
rgd->rd_rs_cnt--;
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
/* return reserved blocks to the rgrp and the ip */
BUG_ON(rs->rs_rgd->rd_reserved < rs->rs_free);
rs->rs_rgd->rd_reserved -= rs->rs_free;
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
rs->rs_free = 0;
clear_bit(GBF_FULL, &rs->rs_bi->bi_flags);
clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
smp_mb__after_clear_bit();
}
/* We can't change any of the step 1 or step 2 components of the rs.
E.g. We can't set rs_rgd to NULL because the rgd glock is held and
dequeued through this pointer.
Can't: atomic_set(&rs->rs_sizehint, 0);
Can't: rs->rs_requested = 0;
Can't: rs->rs_rgd = NULL;*/
rs->rs_bi = NULL;
rs->rs_biblk = 0;
}
/**
......@@ -550,17 +614,16 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
* @rs: The reservation to remove
*
*/
void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
{
struct gfs2_rgrpd *rgd;
if (!gfs2_rs_active(rs))
return;
rgd = rs->rs_rgd;
rgd = rs->rs_rbm.rgd;
if (rgd) {
spin_lock(&rgd->rd_rsspin);
__rs_deltree(rs);
__rs_deltree(ip, rs);
spin_unlock(&rgd->rd_rsspin);
}
}
/**
......@@ -572,8 +635,7 @@ void gfs2_rs_delete(struct gfs2_inode *ip)
{
down_write(&ip->i_rw_mutex);
if (ip->i_res) {
gfs2_rs_deltree(ip->i_res);
trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
gfs2_rs_deltree(ip, ip->i_res);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
ip->i_res = NULL;
......@@ -597,7 +659,7 @@ static void return_all_reservations(struct gfs2_rgrpd *rgd)
spin_lock(&rgd->rd_rsspin);
while ((n = rb_first(&rgd->rd_rstree))) {
rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
__rs_deltree(rs);
__rs_deltree(NULL, rs);
}
spin_unlock(&rgd->rd_rsspin);
}
......@@ -1270,211 +1332,276 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
/**
* rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
* @bi: the bitmap with the blocks
* @ip: the inode structure
* @biblk: the 32-bit block number relative to the start of the bitmap
* @amount: the number of blocks to reserve
*
* Returns: NULL - reservation was already taken, so not inserted
* pointer to the inserted reservation
*/
static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
struct gfs2_inode *ip, u32 biblk,
int amount)
static void rs_insert(struct gfs2_inode *ip)
{
struct rb_node **newn, *parent = NULL;
int rc;
struct gfs2_blkreserv *rs = ip->i_res;
struct gfs2_rgrpd *rgd = rs->rs_rgd;
u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
BUG_ON(gfs2_rs_active(rs));
spin_lock(&rgd->rd_rsspin);
newn = &rgd->rd_rstree.rb_node;
BUG_ON(!ip->i_res);
BUG_ON(gfs2_rs_active(rs));
/* Figure out where to put new node */
/*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
while (*newn) {
struct gfs2_blkreserv *cur =
rb_entry(*newn, struct gfs2_blkreserv, rs_node);
parent = *newn;
rc = rs_cmp(fsblock, amount, cur);
rc = rs_cmp(fsblock, rs->rs_free, cur);
if (rc > 0)
newn = &((*newn)->rb_right);
else if (rc < 0)
newn = &((*newn)->rb_left);
else {
spin_unlock(&rgd->rd_rsspin);
return NULL; /* reservation already in use */
WARN_ON(1);
return;
}
}
/* Do our reservation work */
rs = ip->i_res;
rs->rs_free = amount;
rs->rs_biblk = biblk;
rs->rs_bi = bi;
rb_link_node(&rs->rs_node, parent, newn);
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
/* Do our inode accounting for the reservation */
/*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
/* Do our rgrp accounting for the reservation */
rgd->rd_reserved += amount; /* blocks reserved */
rgd->rd_rs_cnt++; /* number of in-tree reservations */
rgd->rd_reserved += rs->rs_free; /* blocks reserved */
spin_unlock(&rgd->rd_rsspin);
trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
return rs;
}
/**
* unclaimed_blocks - return number of blocks that aren't spoken for
*/
static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
{
return rgd->rd_free_clone - rgd->rd_reserved;
trace_gfs2_rs(rs, TRACE_RS_INSERT);
}
/**
* rg_mblk_search - find a group of multiple free blocks
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @rs: the block reservation
* @ip: pointer to the inode for which we're reserving blocks
* @requested: number of blocks required for this allocation
*
* This is very similar to rgblk_search, except we're looking for whole
* 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
* on aligned dwords for speed's sake.
*
* Returns: 0 if successful or BFITNOENT if there isn't enough free space
*/
static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
unsigned requested)
{
struct gfs2_bitmap *bi = rgd->rd_bits;
const u32 length = rgd->rd_length;
u32 blk;
unsigned int buf, x, search_bytes;
u8 *buffer = NULL;
u8 *ptr, *end, *nonzero;
u32 goal, rsv_bytes;
struct gfs2_blkreserv *rs;
u32 best_rs_bytes, unclaimed;
int best_rs_blocks;
struct gfs2_rbm rbm = { .rgd = rgd, };
u64 goal;
struct gfs2_blkreserv *rs = ip->i_res;
u32 extlen;
u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
int ret;
extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
return;
/* Find bitmap block that contains bits for goal block */
if (rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
goal = ip->i_goal;
else
goal = rgd->rd_last_alloc;
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
/* Convert scope of "goal" from rgrp-wide to within
found bit block */
if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
goal -= bi->bi_start * GFS2_NBBY;
goto do_search;
}
}
buf = 0;
goal = 0;
goal = rgd->rd_last_alloc + rgd->rd_data0;
do_search:
best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
(RGRP_RSRV_MINBLKS * rgd->rd_length));
best_rs_bytes = (best_rs_blocks *
(1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
unclaimed = unclaimed_blocks(rgd);
if (best_rs_bytes * GFS2_NBBY > unclaimed)
best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
for (x = 0; x <= length; x++) {
bi = rgd->rd_bits + buf;
if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
return;
if (test_bit(GBF_FULL, &bi->bi_flags))
goto skip;
ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
if (ret == 0) {
rs->rs_rbm = rbm;
rs->rs_free = extlen;
rs->rs_inum = ip->i_no_addr;
rs_insert(ip);
}
}
WARN_ON(!buffer_uptodate(bi->bi_bh));
if (bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
else
buffer = bi->bi_bh->b_data + bi->bi_offset;
/**
* gfs2_next_unreserved_block - Return next block that is not reserved
* @rgd: The resource group
* @block: The starting block
* @length: The required length
* @ip: Ignore any reservations for this inode
*
* If the block does not appear in any reservation, then return the
* block number unchanged. If it does appear in the reservation, then
* keep looking through the tree of reservations in order to find the
* first block number which is not reserved.
*/
/* We have to keep the reservations aligned on u64 boundaries
otherwise we could get situations where a byte can't be
used because it's after a reservation, but a free bit still
is within the reservation's area. */
ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
end = (buffer + bi->bi_len);
while (ptr < end) {
rsv_bytes = 0;
if ((ptr + best_rs_bytes) <= end)
search_bytes = best_rs_bytes;
static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
u32 length,
const struct gfs2_inode *ip)
{
struct gfs2_blkreserv *rs;
struct rb_node *n;
int rc;
spin_lock(&rgd->rd_rsspin);
n = rgd->rd_rstree.rb_node;
while (n) {
rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
rc = rs_cmp(block, length, rs);
if (rc < 0)
n = n->rb_left;
else if (rc > 0)
n = n->rb_right;
else
search_bytes = end - ptr;
BUG_ON(!search_bytes);
nonzero = memchr_inv(ptr, 0, search_bytes);
/* If the lot is all zeroes, reserve the whole size. If
there's enough zeroes to satisfy the request, use
what we can. If there's not enough, keep looking. */
if (nonzero == NULL)
rsv_bytes = search_bytes;
else if ((nonzero - ptr) * GFS2_NBBY >=
ip->i_res->rs_requested)
rsv_bytes = (nonzero - ptr);
if (rsv_bytes) {
blk = ((ptr - buffer) * GFS2_NBBY);
BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
rs = rs_insert(bi, ip, blk,
rsv_bytes * GFS2_NBBY);
if (IS_ERR(rs))
return PTR_ERR(rs);
if (rs)
return 0;
break;
}
ptr += ALIGN(search_bytes, sizeof(u64));
if (n) {
while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
n = n->rb_right;
if (n == NULL)
break;
rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
}
skip:
/* Try next bitmap block (wrap back to rgrp header
if at end) */
buf++;
buf %= length;
goal = 0;
}
return BFITNOENT;
spin_unlock(&rgd->rd_rsspin);
return block;
}
/**
* try_rgrp_fit - See if a given reservation will fit in a given RG
* @rgd: the RG data
* @ip: the inode
* gfs2_reservation_check_and_update - Check for reservations during block alloc
* @rbm: The current position in the resource group
* @ip: The inode for which we are searching for blocks
* @minext: The minimum extent length
*
* If there's room for the requested blocks to be allocated from the RG:
* This will try to get a multi-block reservation first, and if that doesn't
* fit, it will take what it can.
* This checks the current position in the rgrp to see whether there is
* a reservation covering this block. If not then this function is a
* no-op. If there is, then the position is moved to the end of the
* contiguous reservation(s) so that we are pointing at the first
* non-reserved block.
*
* Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
* Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
*/
static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
const struct gfs2_inode *ip,
u32 minext)
{
struct gfs2_blkreserv *rs = ip->i_res;
u64 block = gfs2_rbm_to_block(rbm);
u32 extlen = 1;
u64 nblock;
int ret;
/*
* If we have a minimum extent length, then skip over any extent
* which is less than the min extent length in size.
*/
if (minext) {
extlen = gfs2_free_extlen(rbm, minext);
nblock = block + extlen;
if (extlen < minext)
goto fail;
}
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
/*
* Check the extent which has been found against the reservations
* and skip if parts of it are already reserved
*/
nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
if (nblock == block)
return 0;
/* Look for a multi-block reservation. */
if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
rg_mblk_search(rgd, ip) != BFITNOENT)
return 1;
if (unclaimed_blocks(rgd) >= rs->rs_requested)
fail:
ret = gfs2_rbm_from_block(rbm, nblock);
if (ret < 0)
return ret;
return 1;
}
/**
* gfs2_rbm_find - Look for blocks of a particular state
* @rbm: Value/result starting position and final position
* @state: The state which we want to find
* @minext: The requested extent length (0 for a single block)
* @ip: If set, check for reservations
* @nowrap: Stop looking at the end of the rgrp, rather than wrapping
* around until we've reached the starting point.
*
* Side effects:
* - If looking for free blocks, we set GBF_FULL on each bitmap which
* has no free blocks in it.
*
* Returns: 0 on success, -ENOSPC if there is no block of the requested state
*/
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
const struct gfs2_inode *ip, bool nowrap)
{
struct buffer_head *bh;
struct gfs2_bitmap *initial_bi;
u32 initial_offset;
u32 offset;
u8 *buffer;
int index;
int n = 0;
int iters = rbm->rgd->rd_length;
int ret;
/* If we are not starting at the beginning of a bitmap, then we
* need to add one to the bitmap count to ensure that we search
* the starting bitmap twice.
*/
if (rbm->offset != 0)
iters++;
while(1) {
if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
bh = rbm->bi->bi_bh;
buffer = bh->b_data + rbm->bi->bi_offset;
WARN_ON(!buffer_uptodate(bh));
if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
initial_offset = rbm->offset;
offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
if (ip == NULL)
return 0;
initial_bi = rbm->bi;
ret = gfs2_reservation_check_and_update(rbm, ip, minext);
if (ret == 0)
return 0;
if (ret > 0) {
n += (rbm->bi - initial_bi);
goto next_iter;
}
if (ret == -E2BIG) {
index = 0;
rbm->offset = 0;
n += (rbm->bi - initial_bi);
goto res_covered_end_of_rgrp;
}
return ret;
bitmap_full: /* Mark bitmap as full and fall through */
if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
set_bit(GBF_FULL, &rbm->bi->bi_flags);
next_bitmap: /* Find next bitmap in the rgrp */
rbm->offset = 0;
index = rbm->bi - rbm->rgd->rd_bits;
index++;
if (index == rbm->rgd->rd_length)
index = 0;
res_covered_end_of_rgrp:
rbm->bi = &rbm->rgd->rd_bits[index];
if ((index == 0) && nowrap)
break;
n++;
next_iter:
if (n >= iters)
break;
}
return -ENOSPC;
}
/**
......@@ -1489,34 +1616,33 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
{
u32 goal = 0, block;
u64 no_addr;
u64 block;
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_glock *gl;
struct gfs2_inode *ip;
int error;
int found = 0;
struct gfs2_bitmap *bi;
struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
while (goal < rgd->rd_data) {
while (1) {
down_write(&sdp->sd_log_flush_lock);
block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
up_write(&sdp->sd_log_flush_lock);
if (block == BFITNOENT)
if (error == -ENOSPC)
break;
if (WARN_ON_ONCE(error))
break;
block = gfs2_bi2rgd_blk(bi, block);
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
goal = max(block + 1, goal + 1);
if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
block = gfs2_rbm_to_block(&rbm);
if (gfs2_rbm_from_block(&rbm, block + 1))
break;
if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
continue;
if (no_addr == skip)
if (block == skip)
continue;
*last_unlinked = no_addr;
*last_unlinked = block;
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl);
error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
if (error)
continue;
......@@ -1543,6 +1669,19 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
return;
}
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
{
struct gfs2_rgrpd *rgd = *pos;
rgd = gfs2_rgrpd_get_next(rgd);
if (rgd == NULL)
rgd = gfs2_rgrpd_get_next(NULL);
*pos = rgd;
if (rgd != begin) /* If we didn't wrap */
return true;
return false;
}
/**
* gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
......@@ -1562,103 +1701,96 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
rs->rs_requested = requested;
if (gfs2_assert_warn(sdp, requested)) {
error = -EINVAL;
goto out;
}
if (gfs2_assert_warn(sdp, requested))
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rgd;
begin = rs->rs_rbm.rgd;
flags = 0; /* Yoda: Do or do not. There is no try */
} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
rs->rs_rgd = begin = ip->i_rgd;
rs->rs_rbm.rgd = begin = ip->i_rgd;
} else {
rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
if (rs->rs_rgd == NULL)
if (rs->rs_rbm.rgd == NULL)
return -EBADSLT;
while (loops < 3) {
rg_locked = 0;
if (gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl)) {
rg_locked = 1;
error = 0;
} else if (!loops && !gfs2_rs_active(rs) &&
rs->rs_rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
/* If the rgrp already is maxed out for contenders,
we can eliminate it as a "first pass" without even
requesting the rgrp glock. */
error = GLR_TRYFAILED;
} else {
error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
rg_locked = 0;
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
LM_ST_EXCLUSIVE, flags,
&rs->rs_rgd_gh);
if (!error && sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rgd);
if (error) {
if (error == GLR_TRYFAILED)
goto next_rgrp;
if (unlikely(error))
return error;
if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rbm.rgd);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
return error;
}
}
}
switch (error) {
case 0:
if (gfs2_rs_active(rs)) {
if (unclaimed_blocks(rs->rs_rgd) +
rs->rs_free >= rs->rs_requested) {
ip->i_rgd = rs->rs_rgd;
return 0;
}
/* We have a multi-block reservation, but the
rgrp doesn't have enough free blocks to
satisfy the request. Free the reservation
and look for a suitable rgrp. */
gfs2_rs_deltree(rs);
}
if (try_rgrp_fit(rs->rs_rgd, ip)) {
/* Skip unuseable resource groups */
if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
gfs2_rgrp_bh_get(rs->rs_rgd);
ip->i_rgd = rs->rs_rgd;
gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
/* Skip rgrps when we can't get a reservation on first pass */
if (!gfs2_rs_active(rs) && (loops < 1))
goto check_rgrp;
/* If rgrp has enough free space, use it */
if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
ip->i_rgd = rs->rs_rbm.rgd;
return 0;
}
if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) {
if (sdp->sd_args.ar_rgrplvb)
gfs2_rgrp_bh_get(rs->rs_rgd);
try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
/* Drop reservation, if we couldn't use reserved rgrp */
if (gfs2_rs_active(rs))
gfs2_rs_deltree(ip, rs);
check_rgrp:
/* Check for unlinked inodes which can be reclaimed */
if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
ip->i_no_addr);
}
skip_rgrp:
/* Unlock rgrp if required */
if (!rg_locked)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
/* fall through */
case GLR_TRYFAILED:
rs->rs_rgd = gfs2_rgrpd_get_next(rs->rs_rgd);
rs->rs_rgd = rs->rs_rgd ? : begin; /* if NULL, wrap */
if (rs->rs_rgd != begin) /* If we didn't wrap */
break;
next_rgrp:
/* Find the next rgrp, and continue looking */
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
continue;
/* If we've scanned all the rgrps, but found no free blocks
* then this checks for some less likely conditions before
* trying again.
*/
flags &= ~LM_FLAG_TRY;
loops++;
/* Check that fs hasn't grown if writing to rindex */
if (ip == GFS2_I(sdp->sd_rindex) &&
!sdp->sd_rindex_uptodate) {
if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
error = gfs2_ri_update(ip);
if (error)
goto out;
} else if (loops == 2)
return error;
}
/* Flushing the log may release space */
if (loops == 2)
gfs2_log_flush(sdp, NULL);
break;
default:
goto out;
}
}
error = -ENOSPC;
out:
if (error)
rs->rs_requested = 0;
return error;
return -ENOSPC;
}
/**
......@@ -1672,15 +1804,8 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
{
struct gfs2_blkreserv *rs = ip->i_res;
if (!rs)
return;
if (!rs->rs_free)
gfs2_rs_deltree(rs);
if (rs->rs_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
rs->rs_requested = 0;
}
/**
......@@ -1693,173 +1818,47 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
{
struct gfs2_bitmap *bi = NULL;
u32 length, rgrp_block, buf_block;
unsigned int buf;
unsigned char type;
struct gfs2_rbm rbm = { .rgd = rgd, };
int ret;
length = rgd->rd_length;
rgrp_block = block - rgd->rd_data0;
ret = gfs2_rbm_from_block(&rbm, block);
WARN_ON_ONCE(ret != 0);
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
break;
}
gfs2_assert(rgd->rd_sbd, buf < length);
buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len, buf_block);
return type;
return gfs2_testbit(&rbm);
}
/**
* rgblk_search - find a block in @state
* @rgd: the resource group descriptor
* @goal: the goal block within the RG (start here to search for avail block)
* @state: GFS2_BLKST_XXX the before-allocation state to find
* @rbi: address of the pointer to the bitmap containing the block found
*
* Walk rgrp's bitmap to find bits that represent a block in @state.
*
* This function never fails, because we wouldn't call it unless we
* know (from reservation results, etc.) that a block is available.
*
* Scope of @goal is just within rgrp, not the whole filesystem.
* Scope of @returned block is just within bitmap, not the whole filesystem.
*
* Returns: the block number found relative to the bitmap rbi
*/
static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state,
struct gfs2_bitmap **rbi)
{
struct gfs2_bitmap *bi = NULL;
const u32 length = rgd->rd_length;
u32 biblk = BFITNOENT;
unsigned int buf, x;
const u8 *buffer = NULL;
*rbi = NULL;
/* Find bitmap block that contains bits for goal block */
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
/* Convert scope of "goal" from rgrp-wide to within found bit block */
if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
goal -= bi->bi_start * GFS2_NBBY;
goto do_search;
}
}
buf = 0;
goal = 0;
do_search:
/* Search (up to entire) bitmap in this rgrp for allocatable block.
"x <= length", instead of "x < length", because we typically start
the search in the middle of a bit block, but if we can't find an
allocatable block anywhere else, we want to be able wrap around and
search in the first part of our first-searched bit block. */
for (x = 0; x <= length; x++) {
bi = rgd->rd_bits + buf;
if (test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto skip;
/* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
bitmaps, so we must search the originals for that. */
buffer = bi->bi_bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bi->bi_bh));
if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
while (1) {
struct gfs2_blkreserv *rs;
u32 rgblk;
biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
if (biblk == BFITNOENT)
break;
/* Check if this block is reserved() */
rgblk = gfs2_bi2rgd_blk(bi, biblk);
rs = rs_find(rgd, rgblk);
if (rs == NULL)
break;
BUG_ON(rs->rs_bi != bi);
biblk = BFITNOENT;
/* This should jump to the first block after the
reservation. */
goal = rs->rs_biblk + rs->rs_free;
if (goal >= bi->bi_len * GFS2_NBBY)
break;
}
if (biblk != BFITNOENT)
break;
if ((goal == 0) && (state == GFS2_BLKST_FREE))
set_bit(GBF_FULL, &bi->bi_flags);
/* Try next bitmap block (wrap back to rgrp header if at end) */
skip:
buf++;
buf %= length;
goal = 0;
}
if (biblk != BFITNOENT)
*rbi = bi;
return biblk;
}
/**
* gfs2_alloc_extent - allocate an extent from a given bitmap
* @rgd: the resource group descriptor
* @bi: the bitmap within the rgrp
* @blk: the block within the bitmap
* @rbm: the resource group information
* @dinode: TRUE if the first block we allocate is for a dinode
* @n: The extent length
* @n: The extent length (value/result)
*
* Add the found bitmap buffer to the transaction.
* Add the bitmap buffer to the transaction.
* Set the found bits to @new_state to change block's allocation state.
* Returns: starting block number of the extent (fs scope)
*/
static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
u32 blk, bool dinode, unsigned int *n)
static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
unsigned int *n)
{
struct gfs2_rbm pos = { .rgd = rbm->rgd, };
const unsigned int elen = *n;
u32 goal, rgblk;
const u8 *buffer = NULL;
struct gfs2_blkreserv *rs;
*n = 0;
buffer = bi->bi_bh->b_data + bi->bi_offset;
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, bi->bi_clone, bi, blk,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
(*n)++;
goal = blk;
u64 block;
int ret;
*n = 1;
block = gfs2_rbm_to_block(rbm);
gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1);
gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
block++;
while (*n < elen) {
goal++;
if (goal >= (bi->bi_len * GFS2_NBBY))
ret = gfs2_rbm_from_block(&pos, block);
if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
break;
rgblk = gfs2_bi2rgd_blk(bi, goal);
rs = rs_find(rgd, rgblk);
if (rs) /* Oops, we bumped into someone's reservation */
break;
if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
GFS2_BLKST_FREE)
break;
gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
(*n)++;
block++;
}
blk = gfs2_bi2rgd_blk(bi, blk);
rgd->rd_last_alloc = blk + *n - 1;
return rgd->rd_data0 + blk;
}
/**
......@@ -1875,46 +1874,30 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
u32 blen, unsigned char new_state)
{
struct gfs2_rgrpd *rgd;
struct gfs2_bitmap *bi = NULL;
u32 length, rgrp_blk, buf_blk;
unsigned int buf;
struct gfs2_rbm rbm;
rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rgd) {
rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rbm.rgd) {
if (gfs2_consist(sdp))
fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
return NULL;
}
length = rgd->rd_length;
rgrp_blk = bstart - rgd->rd_data0;
while (blen--) {
for (buf = 0; buf < length; buf++) {
bi = rgd->rd_bits + buf;
if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
break;
}
gfs2_assert(rgd->rd_sbd, buf < length);
buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
rgrp_blk++;
if (!bi->bi_clone) {
bi->bi_clone = kmalloc(bi->bi_bh->b_size,
gfs2_rbm_from_block(&rbm, bstart);
bstart++;
if (!rbm.bi->bi_clone) {
rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
GFP_NOFS | __GFP_NOFAIL);
memcpy(bi->bi_clone + bi->bi_offset,
bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len);
memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
rbm.bi->bi_len);
}
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
gfs2_setbit(rgd, NULL, bi, buf_blk, new_state);
gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
gfs2_setbit(&rbm, false, new_state);
}
return rgd;
return rbm.rgd;
}
/**
......@@ -1956,46 +1939,41 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
}
/**
* claim_reserved_blks - Claim previously reserved blocks
* @ip: the inode that's claiming the reservation
* @dinode: 1 if this block is a dinode block, otherwise data block
* @nblocks: desired extent length
* gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
* @ip: The inode we have just allocated blocks for
* @rbm: The start of the allocated blocks
* @len: The extent length
*
* Lay claim to previously reserved blocks.
* Returns: Starting block number of the blocks claimed.
* Sets *nblocks to the actual extent length allocated.
* Adjusts a reservation after an allocation has taken place. If the
* reservation does not match the allocation, or if it is now empty
* then it is removed.
*/
static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
unsigned int *nblocks)
static void gfs2_adjust_reservation(struct gfs2_inode *ip,
const struct gfs2_rbm *rbm, unsigned len)
{
struct gfs2_blkreserv *rs = ip->i_res;
struct gfs2_rgrpd *rgd = rs->rs_rgd;
struct gfs2_bitmap *bi;
u64 start_block = gfs2_rs_startblk(rs);
const unsigned int elen = *nblocks;
bi = rs->rs_bi;
gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) {
if (gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
bi->bi_len, rs->rs_biblk) != GFS2_BLKST_FREE)
break;
gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_biblk,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
rs->rs_biblk++;
rs->rs_free--;
struct gfs2_rgrpd *rgd = rbm->rgd;
unsigned rlen;
u64 block;
int ret;
BUG_ON(!rgd->rd_reserved);
rgd->rd_reserved--;
dinode = false;
spin_lock(&rgd->rd_rsspin);
if (gfs2_rs_active(rs)) {
if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
block = gfs2_rbm_to_block(rbm);
ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
rlen = min(rs->rs_free, len);
rs->rs_free -= rlen;
rgd->rd_reserved -= rlen;
trace_gfs2_rs(rs, TRACE_RS_CLAIM);
if (rs->rs_free && !ret)
goto out;
}
trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
if (!rs->rs_free || *nblocks != elen)
gfs2_rs_deltree(rs);
return start_block;
__rs_deltree(ip, rs);
}
out:
spin_unlock(&rgd->rd_rsspin);
}
/**
......@@ -2014,47 +1992,40 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
struct gfs2_rgrpd *rgd;
struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
unsigned int ndata;
u32 goal, blk; /* block, within the rgrp scope */
u64 goal;
u64 block; /* block, within the file system scope */
int error;
struct gfs2_bitmap *bi;
/* Only happens if there is a bug in gfs2, return something distinctive
* to ensure that it is noticed.
*/
if (ip->i_res->rs_requested == 0)
return -ECANCELED;
/* If we have a reservation, claim blocks from it. */
if (gfs2_rs_active(ip->i_res)) {
BUG_ON(!ip->i_res->rs_free);
rgd = ip->i_res->rs_rgd;
block = claim_reserved_blks(ip, dinode, nblocks);
if (*nblocks)
goto found_blocks;
}
rgd = ip->i_rgd;
if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
if (gfs2_rs_active(ip->i_res))
goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
goal = ip->i_goal;
else
goal = rgd->rd_last_alloc;
goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
gfs2_rbm_from_block(&rbm, goal);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
if (error == -ENOSPC) {
gfs2_rbm_from_block(&rbm, goal);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
}
/* Since all blocks are reserved in advance, this shouldn't happen */
if (blk == BFITNOENT) {
printk(KERN_WARNING "BFITNOENT, nblocks=%u\n", *nblocks);
printk(KERN_WARNING "FULL=%d\n",
test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
if (error) {
fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
(unsigned long long)ip->i_no_addr, error, *nblocks,
test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
goto rgrp_error;
}
block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
found_blocks:
gfs2_alloc_extent(&rbm, dinode, nblocks);
block = gfs2_rbm_to_block(&rbm);
rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
if (gfs2_rs_active(ip->i_res))
gfs2_adjust_reservation(ip, &rbm, *nblocks);
ndata = *nblocks;
if (dinode)
ndata--;
......@@ -2071,22 +2042,22 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
brelse(dibh);
}
}
if (rgd->rd_free < *nblocks) {
if (rbm.rgd->rd_free < *nblocks) {
printk(KERN_WARNING "nblocks=%u\n", *nblocks);
goto rgrp_error;
}
rgd->rd_free -= *nblocks;
rbm.rgd->rd_free -= *nblocks;
if (dinode) {
rgd->rd_dinodes++;
*generation = rgd->rd_igeneration++;
rbm.rgd->rd_dinodes++;
*generation = rbm.rgd->rd_igeneration++;
if (*generation == 0)
*generation = rgd->rd_igeneration++;
*generation = rbm.rgd->rd_igeneration++;
}
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
......@@ -2100,14 +2071,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
ip->i_inode.i_gid);
rgd->rd_free_clone -= *nblocks;
trace_gfs2_block_alloc(ip, rgd, block, *nblocks,
rbm.rgd->rd_free_clone -= *nblocks;
trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
*bn = block;
return 0;
rgrp_error:
gfs2_rgrp_error(rgd);
gfs2_rgrp_error(rbm.rgd);
return -EIO;
}
......
......@@ -46,7 +46,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
bool dinode, u64 *generation);
extern int gfs2_rs_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
extern void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs);
extern void gfs2_rs_delete(struct gfs2_inode *ip);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
......@@ -73,30 +73,10 @@ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
extern int gfs2_fitrim(struct file *filp, void __user *argp);
/* This is how to tell if a multi-block reservation is "inplace" reserved: */
static inline int gfs2_mb_reserved(struct gfs2_inode *ip)
/* This is how to tell if a reservation is in the rgrp tree: */
static inline bool gfs2_rs_active(struct gfs2_blkreserv *rs)
{
if (ip->i_res && ip->i_res->rs_requested)
return 1;
return 0;
}
/* This is how to tell if a multi-block reservation is in the rgrp tree: */
static inline int gfs2_rs_active(struct gfs2_blkreserv *rs)
{
if (rs && rs->rs_bi)
return 1;
return 0;
}
static inline u32 gfs2_bi2rgd_blk(const struct gfs2_bitmap *bi, u32 blk)
{
return (bi->bi_start * GFS2_NBBY) + blk;
}
static inline u64 gfs2_rs_startblk(const struct gfs2_blkreserv *rs)
{
return gfs2_bi2rgd_blk(rs->rs_bi, rs->rs_biblk) + rs->rs_rgd->rd_data0;
return rs && !RB_EMPTY_NODE(&rs->rs_node);
}
#endif /* __RGRP_DOT_H__ */
......@@ -1366,6 +1366,8 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
seq_printf(s, ",statfs_quantum=%d", val);
else if (sdp->sd_tune.gt_statfs_slow)
seq_puts(s, ",statfs_quantum=0");
val = sdp->sd_tune.gt_quota_quantum;
if (val != 60)
seq_printf(s, ",quota_quantum=%d", val);
......@@ -1543,6 +1545,11 @@ static void gfs2_evict_inode(struct inode *inode)
out_truncate:
gfs2_log_flush(sdp, ip->i_gl);
if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
filemap_fdatawrite(metamapping);
filemap_fdatawait(metamapping);
}
write_inode_now(inode, 1);
gfs2_ail_flush(ip->i_gl, 0);
......@@ -1557,7 +1564,7 @@ static void gfs2_evict_inode(struct inode *inode)
out_unlock:
/* Error path for case 1 */
if (gfs2_rs_active(ip->i_res))
gfs2_rs_deltree(ip->i_res);
gfs2_rs_deltree(ip, ip->i_res);
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
gfs2_glock_dq(&ip->i_iopen_gh);
......
......@@ -509,10 +509,9 @@ TRACE_EVENT(gfs2_block_alloc,
/* Keep track of multi-block reservations as they are allocated/freed */
TRACE_EVENT(gfs2_rs,
TP_PROTO(const struct gfs2_inode *ip, const struct gfs2_blkreserv *rs,
u8 func),
TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
TP_ARGS(ip, rs, func),
TP_ARGS(rs, func),
TP_STRUCT__entry(
__field( dev_t, dev )
......@@ -526,18 +525,17 @@ TRACE_EVENT(gfs2_rs,
),
TP_fast_assign(
__entry->dev = rs->rs_rgd ? rs->rs_rgd->rd_sbd->sd_vfs->s_dev : 0;
__entry->rd_addr = rs->rs_rgd ? rs->rs_rgd->rd_addr : 0;
__entry->rd_free_clone = rs->rs_rgd ? rs->rs_rgd->rd_free_clone : 0;
__entry->rd_reserved = rs->rs_rgd ? rs->rs_rgd->rd_reserved : 0;
__entry->inum = ip ? ip->i_no_addr : 0;
__entry->start = gfs2_rs_startblk(rs);
__entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
__entry->inum = rs->rs_inum;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
__entry->free = rs->rs_free;
__entry->func = func;
),
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s "
"f:%lu",
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->start,
......
......@@ -28,11 +28,10 @@ struct gfs2_glock;
/* reserve either the number of blocks to be allocated plus the rg header
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{
const struct gfs2_blkreserv *rs = ip->i_res;
if (rs && rs->rs_requested < ip->i_rgd->rd_length)
return rs->rs_requested + 1;
if (requested < ip->i_rgd->rd_length)
return requested + 1;
return ip->i_rgd->rd_length;
}
......
......@@ -448,17 +448,18 @@ ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
/**
* ea_get_unstuffed - actually copies the unstuffed data into the
* ea_iter_unstuffed - copies the unstuffed xattr data to/from the
* request buffer
* @ip: The GFS2 inode
* @ea: The extended attribute header structure
* @data: The data to be copied
* @din: The data to be copied in
* @dout: The data to be copied out (one of din,dout will be NULL)
*
* Returns: errno
*/
static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
char *data)
static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
const char *din, char *dout)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head **bh;
......@@ -467,6 +468,8 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error = 0;
unsigned char *pos;
unsigned cp_size;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
......@@ -497,12 +500,21 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
goto out;
}
memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
amount -= sdp->sd_jbsize;
data += sdp->sd_jbsize;
if (dout) {
memcpy(dout, pos, cp_size);
dout += sdp->sd_jbsize;
}
if (din) {
gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
memcpy(pos, din, cp_size);
din += sdp->sd_jbsize;
}
amount -= sdp->sd_jbsize;
brelse(bh[x]);
}
......@@ -523,7 +535,7 @@ static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
memcpy(data, GFS2_EA2DATA(el->el_ea), len);
return len;
}
ret = ea_get_unstuffed(ip, el->el_ea, data);
ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
if (ret < 0)
return ret;
return len;
......@@ -727,7 +739,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
goto out_gunlock_q;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
blks + gfs2_rg_blocks(ip) +
blks + gfs2_rg_blocks(ip, blks) +
RES_DINODE + RES_STATFS + RES_QUOTA, 0);
if (error)
goto out_ipres;
......@@ -1220,69 +1232,23 @@ static int gfs2_xattr_set(struct dentry *dentry, const char *name,
size, flags, type);
}
static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
struct gfs2_ea_header *ea, char *data)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head **bh;
unsigned int amount = GFS2_EA_DATA_LEN(ea);
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
return -ENOMEM;
error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
if (error)
goto out;
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
bh + x);
if (error) {
while (x--)
brelse(bh[x]);
goto fail;
}
dataptrs++;
}
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_wait(sdp, bh[x]);
if (error) {
for (; x < nptrs; x++)
brelse(bh[x]);
goto fail;
}
if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
for (; x < nptrs; x++)
brelse(bh[x]);
error = -EIO;
goto fail;
}
gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
amount -= sdp->sd_jbsize;
data += sdp->sd_jbsize;
brelse(bh[x]);
}
int ret;
out:
kfree(bh);
return error;
ret = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
if (ret)
return ret;
fail:
ret = gfs2_iter_unstuffed(ip, ea, data, NULL);
gfs2_trans_end(sdp);
kfree(bh);
return error;
return ret;
}
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment