Commit 5ae8fff8 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Clean up gfs2_log_reserve

Wake up log waiters in gfs2_log_release when log space has actually become
available.  This is a much better place for the wakeup than gfs2_logd.

Check if enough log space is immeditely available before anything else.  If
there isn't, use io_wait_event to wait instead of open-coding it.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent 4a3d049d
...@@ -415,11 +415,12 @@ bool gfs2_log_is_empty(struct gfs2_sbd *sdp) { ...@@ -415,11 +415,12 @@ bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
{ {
atomic_add(blks, &sdp->sd_log_blks_free); atomic_add(blks, &sdp->sd_log_blks_free);
trace_gfs2_log_blocks(sdp, blks); trace_gfs2_log_blocks(sdp, blks);
gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
sdp->sd_jdesc->jd_blocks); sdp->sd_jdesc->jd_blocks);
if (atomic_read(&sdp->sd_log_blks_needed))
wake_up(&sdp->sd_log_waitq);
} }
/** /**
...@@ -444,36 +445,33 @@ void gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) ...@@ -444,36 +445,33 @@ void gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
{ {
unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
unsigned wanted = blks + reserved_blks; unsigned wanted = blks + reserved_blks;
DEFINE_WAIT(wait);
int did_wait = 0;
unsigned int free_blocks; unsigned int free_blocks;
atomic_add(blks, &sdp->sd_log_blks_needed);
retry:
free_blocks = atomic_read(&sdp->sd_log_blks_free); free_blocks = atomic_read(&sdp->sd_log_blks_free);
if (unlikely(free_blocks <= wanted)) { while (free_blocks >= wanted) {
do { if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, free_blocks - blks))
TASK_UNINTERRUPTIBLE); return;
}
atomic_add(blks, &sdp->sd_log_blks_needed);
for (;;) {
if (current != sdp->sd_logd_process)
wake_up(&sdp->sd_logd_waitq); wake_up(&sdp->sd_logd_waitq);
did_wait = 1; io_wait_event(sdp->sd_log_waitq,
if (atomic_read(&sdp->sd_log_blks_free) <= wanted) (free_blocks = atomic_read(&sdp->sd_log_blks_free),
io_schedule(); free_blocks >= wanted));
free_blocks = atomic_read(&sdp->sd_log_blks_free); do {
} while(free_blocks <= wanted); if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
finish_wait(&sdp->sd_log_waitq, &wait); &free_blocks,
free_blocks - blks))
goto reserved;
} while (free_blocks >= wanted);
} }
if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
free_blocks - blks) != free_blocks)
goto retry;
atomic_sub(blks, &sdp->sd_log_blks_needed);
trace_gfs2_log_blocks(sdp, -blks);
/* reserved:
* If we waited, then so might others, wake them up _after_ we get trace_gfs2_log_blocks(sdp, -blks);
* our share of the log. if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
*/
if (unlikely(did_wait))
wake_up(&sdp->sd_log_waitq); wake_up(&sdp->sd_log_waitq);
} }
...@@ -1107,7 +1105,8 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -1107,7 +1105,8 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
gfs2_assert_withdraw(sdp, maxres >= reserved); gfs2_assert_withdraw(sdp, maxres >= reserved);
unused = maxres - reserved; unused = maxres - reserved;
gfs2_log_release(sdp, unused); if (unused)
gfs2_log_release(sdp, unused);
sdp->sd_log_blks_reserved = reserved; sdp->sd_log_blks_reserved = reserved;
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
...@@ -1192,7 +1191,6 @@ int gfs2_logd(void *data) ...@@ -1192,7 +1191,6 @@ int gfs2_logd(void *data)
struct gfs2_sbd *sdp = data; struct gfs2_sbd *sdp = data;
unsigned long t = 1; unsigned long t = 1;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
bool did_flush;
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
...@@ -1211,12 +1209,10 @@ int gfs2_logd(void *data) ...@@ -1211,12 +1209,10 @@ int gfs2_logd(void *data)
continue; continue;
} }
did_flush = false;
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp, 0); gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_LOGD_JFLUSH_REQD); GFS2_LFC_LOGD_JFLUSH_REQD);
did_flush = true;
} }
if (gfs2_ail_flush_reqd(sdp)) { if (gfs2_ail_flush_reqd(sdp)) {
...@@ -1225,12 +1221,8 @@ int gfs2_logd(void *data) ...@@ -1225,12 +1221,8 @@ int gfs2_logd(void *data)
gfs2_ail1_empty(sdp, 0); gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_LOGD_AIL_FLUSH_REQD); GFS2_LFC_LOGD_AIL_FLUSH_REQD);
did_flush = true;
} }
if (!gfs2_ail_flush_reqd(sdp) || did_flush)
wake_up(&sdp->sd_log_waitq);
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
try_to_freeze(); try_to_freeze();
......
...@@ -73,10 +73,9 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp, ...@@ -73,10 +73,9 @@ int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
down_read(&sdp->sd_log_flush_lock); down_read(&sdp->sd_log_flush_lock);
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
gfs2_log_release(sdp, tr->tr_reserved);
up_read(&sdp->sd_log_flush_lock); up_read(&sdp->sd_log_flush_lock);
gfs2_log_release(sdp, tr->tr_reserved);
sb_end_intwrite(sdp->sd_vfs); sb_end_intwrite(sdp->sd_vfs);
wake_up(&sdp->sd_log_waitq);
return -EROFS; return -EROFS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment