Commit b74cd55a authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: low-memory forced flush fixes

First, function gfs2_ail_flush_reqd checks the SDF_FORCE_AIL_FLUSH flag
to determine if an AIL flush should be forced in low-memory situations.
However, it also immediately clears the flag, and when called repeatedly
as in function gfs2_logd, the flag will be lost.  Fix that by pulling
the SDF_FORCE_AIL_FLUSH flag check out of gfs2_ail_flush_reqd.

Second, function gfs2_writepages sets the SDF_FORCE_AIL_FLUSH flag
whether or not enough pages were written.  If enough pages could be
written, flushing the AIL is unnecessary, though.

Third, gfs2_writepages doesn't wake up logd after setting the
SDF_FORCE_AIL_FLUSH flag, so it can take a long time for logd to react.
It would be preferable to wake up logd, but that hurts the performance
of some workloads and we don't quite understand why so far, so don't
wake up logd so far.

Fixes: b066a4ee ("gfs2: forcibly flush ail to relieve memory pressure")
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent 6df373b0
...@@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping, ...@@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping,
int ret; int ret;
/* /*
* Even if we didn't write any pages here, we might still be holding * Even if we didn't write enough pages here, we might still be holding
* dirty pages in the ail. We forcibly flush the ail because we don't * dirty pages in the ail. We forcibly flush the ail because we don't
* want balance_dirty_pages() to loop indefinitely trying to write out * want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find. * pages held in the ail that it can't find.
*/ */
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops); ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
if (ret == 0) if (ret == 0 && wbc->nr_to_write > 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags); set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret; return ret;
} }
......
...@@ -1282,9 +1282,6 @@ static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) ...@@ -1282,9 +1282,6 @@ static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
{ {
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
return 1;
return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
atomic_read(&sdp->sd_log_thresh2); atomic_read(&sdp->sd_log_thresh2);
} }
...@@ -1325,7 +1322,9 @@ int gfs2_logd(void *data) ...@@ -1325,7 +1322,9 @@ int gfs2_logd(void *data)
GFS2_LFC_LOGD_JFLUSH_REQD); GFS2_LFC_LOGD_JFLUSH_REQD);
} }
if (gfs2_ail_flush_reqd(sdp)) { if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
gfs2_ail_flush_reqd(sdp)) {
clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
gfs2_ail1_start(sdp); gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp); gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp, 0); gfs2_ail1_empty(sdp, 0);
...@@ -1338,6 +1337,7 @@ int gfs2_logd(void *data) ...@@ -1338,6 +1337,7 @@ int gfs2_logd(void *data)
try_to_freeze(); try_to_freeze();
t = wait_event_interruptible_timeout(sdp->sd_logd_waitq, t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
gfs2_ail_flush_reqd(sdp) || gfs2_ail_flush_reqd(sdp) ||
gfs2_jrnl_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp) ||
kthread_should_stop(), kthread_should_stop(),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment