Commit e8c92ed7 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Clean up log write code path

Prior to this patch, we have two ways of sending i/o to the log.
One of those is used when we need to allocate both the data
to be written itself and also a buffer head to submit it. This
is done via sb_getblk and friends. This is used mostly for writing
log headers.

The other method is used when writing blocks which have some
in-place counterpart. This is the case for all the metadata
blocks which are journalled, and when journaled data is in use,
for unescaped journalled data blocks.

This patch replaces both of those two methods, and about half
a dozen separate i/o submission points with a single i/o
submission function. We also go direct to bio rather than
using buffer heads, since this allows us to build i/o
requests of the maximum size for the block device in
question. It also reduces the memory required for flushing
the log, which can be very useful in low memory situations.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 2f7ee358
......@@ -716,7 +716,9 @@ struct gfs2_sbd {
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
struct bio *sd_log_bio;
wait_queue_head_t sd_log_flush_wait;
int sd_log_error;
unsigned int sd_log_flush_head;
u64 sd_log_flush_wrapped;
......
......@@ -357,18 +357,6 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
return 0;
}
u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
{
struct gfs2_journal_extent *je;
list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
return je->dblock + lbn - je->lblock;
}
return -1;
}
/**
* log_distance - Compute distance between two journal blocks
* @sdp: The GFS2 superblock
......@@ -464,17 +452,6 @@ static unsigned int current_tail(struct gfs2_sbd *sdp)
return tail;
}
void gfs2_log_incr_head(struct gfs2_sbd *sdp)
{
BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
(sdp->sd_log_flush_head != sdp->sd_log_head));
if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
sdp->sd_log_flush_head = 0;
sdp->sd_log_flush_wrapped = 1;
}
}
static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
{
unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
......@@ -580,23 +557,17 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
{
u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
struct buffer_head *bh;
struct gfs2_log_header *lh;
unsigned int tail;
u32 hash;
bh = sb_getblk(sdp->sd_vfs, blkno);
lock_buffer(bh);
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
int rw = WRITE_FLUSH_FUA | REQ_META;
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
lh = page_address(page);
clear_page(lh);
gfs2_ail1_empty(sdp);
tail = current_tail(sdp);
lh = (struct gfs2_log_header *)bh->b_data;
memset(lh, 0, sizeof(struct gfs2_log_header));
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
lh->lh_header.__pad0 = cpu_to_be64(0);
......@@ -606,29 +577,22 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
lh->lh_flags = cpu_to_be32(flags);
lh->lh_tail = cpu_to_be32(tail);
lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
lh->lh_hash = cpu_to_be32(hash);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
log_flush_wait(sdp);
submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
} else {
submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
rw = WRITE_SYNC | REQ_META | REQ_PRIO;
}
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
brelse(bh);
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
gfs2_log_write_page(sdp, page);
gfs2_log_flush_bio(sdp, rw);
log_flush_wait(sdp);
if (sdp->sd_log_tail != tail)
log_pull_tail(sdp, tail);
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
}
/**
......@@ -674,6 +638,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
gfs2_ordered_write(sdp);
lops_before_commit(sdp);
gfs2_log_flush_bio(sdp, WRITE);
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_write_header(sdp, 0);
......
......@@ -52,8 +52,6 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
unsigned int ssize);
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn);
extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
......
This diff is collapsed.
......@@ -27,6 +27,8 @@ extern const struct gfs2_log_operations gfs2_rg_lops;
extern const struct gfs2_log_operations gfs2_databuf_lops;
extern const struct gfs2_log_operations *gfs2_log_ops[];
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
......
......@@ -70,16 +70,6 @@ static void gfs2_init_gl_aspace_once(void *foo)
address_space_init_once(mapping);
}
static void *gfs2_bh_alloc(gfp_t mask, void *data)
{
return alloc_buffer_head(mask);
}
static void gfs2_bh_free(void *ptr, void *data)
{
return free_buffer_head(ptr);
}
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
......@@ -170,8 +160,8 @@ static int __init init_gfs2_fs(void)
if (!gfs2_control_wq)
goto fail_recovery;
gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL);
if (!gfs2_bh_pool)
gfs2_page_pool = mempool_create_page_pool(64, 0);
if (!gfs2_page_pool)
goto fail_control;
gfs2_register_debugfs();
......@@ -234,7 +224,7 @@ static void __exit exit_gfs2_fs(void)
rcu_barrier();
mempool_destroy(gfs2_bh_pool);
mempool_destroy(gfs2_page_pool);
kmem_cache_destroy(gfs2_rsrv_cachep);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
......
......@@ -26,7 +26,7 @@ struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
struct kmem_cache *gfs2_quotad_cachep __read_mostly;
struct kmem_cache *gfs2_rsrv_cachep __read_mostly;
mempool_t *gfs2_bh_pool __read_mostly;
mempool_t *gfs2_page_pool __read_mostly;
void gfs2_assert_i(struct gfs2_sbd *sdp)
{
......
......@@ -153,7 +153,7 @@ extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
extern struct kmem_cache *gfs2_quotad_cachep;
extern struct kmem_cache *gfs2_rsrv_cachep;
extern mempool_t *gfs2_bh_pool;
extern mempool_t *gfs2_page_pool;
static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
unsigned int *p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment