Commit 9bb48a70 authored by Kemeng Shi's avatar Kemeng Shi Committed by Andrew Morton

writeback: factor out code of freerun to remove repeated code

Factor out code of freerun into new helper functions domain_poll_intv and
domain_dirty_freerun to remove repeated code.

Link: https://lkml.kernel.org/r/20240514125254.142203-5-shikemeng@huaweicloud.comSigned-off-by: default avatarKemeng Shi <shikemeng@huaweicloud.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6e208329
...@@ -139,6 +139,7 @@ struct dirty_throttle_control { ...@@ -139,6 +139,7 @@ struct dirty_throttle_control {
unsigned long wb_bg_thresh; unsigned long wb_bg_thresh;
unsigned long pos_ratio; unsigned long pos_ratio;
bool freerun;
}; };
/* /*
...@@ -1702,6 +1703,49 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) ...@@ -1702,6 +1703,49 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
} }
} }
static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc,
bool strictlimit)
{
unsigned long dirty, thresh;
if (strictlimit) {
dirty = dtc->wb_dirty;
thresh = dtc->wb_thresh;
} else {
dirty = dtc->dirty;
thresh = dtc->thresh;
}
return dirty_poll_interval(dirty, thresh);
}
/*
* Throttle it only when the background writeback cannot catch-up. This avoids
* (excessively) small writeouts when the wb limits are ramping up in case of
* !strictlimit.
*
* In strictlimit case make decision based on the wb counters and limits. Small
* writeouts when the wb limits are ramping up are the price we consciously pay
* for strictlimit-ing.
*/
static void domain_dirty_freerun(struct dirty_throttle_control *dtc,
bool strictlimit)
{
unsigned long dirty, thresh, bg_thresh;
if (unlikely(strictlimit)) {
wb_dirty_limits(dtc);
dirty = dtc->wb_dirty;
thresh = dtc->wb_thresh;
bg_thresh = dtc->wb_bg_thresh;
} else {
dirty = dtc->dirty;
thresh = dtc->thresh;
bg_thresh = dtc->bg_thresh;
}
dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh);
}
/* /*
* balance_dirty_pages() must be called by processes which are generating dirty * balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force * data. It looks at the number of dirty pages in the machine and will force
...@@ -1734,27 +1778,12 @@ static int balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1734,27 +1778,12 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
for (;;) { for (;;) {
unsigned long now = jiffies; unsigned long now = jiffies;
unsigned long dirty, thresh, bg_thresh;
unsigned long m_dirty = 0; /* stop bogus uninit warnings */
unsigned long m_thresh = 0;
unsigned long m_bg_thresh = 0;
nr_dirty = global_node_page_state(NR_FILE_DIRTY); nr_dirty = global_node_page_state(NR_FILE_DIRTY);
domain_dirty_avail(gdtc, true); domain_dirty_avail(gdtc, true);
domain_dirty_limits(gdtc); domain_dirty_limits(gdtc);
domain_dirty_freerun(gdtc, strictlimit);
if (unlikely(strictlimit)) {
wb_dirty_limits(gdtc);
dirty = gdtc->wb_dirty;
thresh = gdtc->wb_thresh;
bg_thresh = gdtc->wb_bg_thresh;
} else {
dirty = gdtc->dirty;
thresh = gdtc->thresh;
bg_thresh = gdtc->bg_thresh;
}
if (mdtc) { if (mdtc) {
/* /*
...@@ -1763,17 +1792,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1763,17 +1792,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
*/ */
domain_dirty_avail(mdtc, true); domain_dirty_avail(mdtc, true);
domain_dirty_limits(mdtc); domain_dirty_limits(mdtc);
domain_dirty_freerun(mdtc, strictlimit);
if (unlikely(strictlimit)) {
wb_dirty_limits(mdtc);
m_dirty = mdtc->wb_dirty;
m_thresh = mdtc->wb_thresh;
m_bg_thresh = mdtc->wb_bg_thresh;
} else {
m_dirty = mdtc->dirty;
m_thresh = mdtc->thresh;
m_bg_thresh = mdtc->bg_thresh;
}
} }
/* /*
...@@ -1790,31 +1809,21 @@ static int balance_dirty_pages(struct bdi_writeback *wb, ...@@ -1790,31 +1809,21 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
wb_start_background_writeback(wb); wb_start_background_writeback(wb);
/* /*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the wb limits are ramping up in case of !strictlimit.
*
* In strictlimit case make decision based on the wb counters
* and limits. Small writeouts when the wb limits are ramping
* up are the price we consciously pay for strictlimit-ing.
*
* If memcg domain is in effect, @dirty should be under * If memcg domain is in effect, @dirty should be under
* both global and memcg freerun ceilings. * both global and memcg freerun ceilings.
*/ */
if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) && if (gdtc->freerun && (!mdtc || mdtc->freerun)) {
(!mdtc ||
m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
unsigned long intv; unsigned long intv;
unsigned long m_intv; unsigned long m_intv;
free_running: free_running:
intv = dirty_poll_interval(dirty, thresh); intv = domain_poll_intv(gdtc, strictlimit);
m_intv = ULONG_MAX; m_intv = ULONG_MAX;
current->dirty_paused_when = now; current->dirty_paused_when = now;
current->nr_dirtied = 0; current->nr_dirtied = 0;
if (mdtc) if (mdtc)
m_intv = dirty_poll_interval(m_dirty, m_thresh); m_intv = domain_poll_intv(mdtc, strictlimit);
current->nr_dirtied_pause = min(intv, m_intv); current->nr_dirtied_pause = min(intv, m_intv);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment