Commit 1babe183 authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds

writeback: add comment to the dirty limit functions

Document global_dirty_limits() and bdi_dirty_limit().
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 16c4042f
...@@ -261,11 +261,18 @@ static inline void task_dirties_fraction(struct task_struct *tsk, ...@@ -261,11 +261,18 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
} }
/* /*
* scale the dirty limit * task_dirty_limit - scale down dirty throttling threshold for one task
* *
* task specific dirty limit: * task specific dirty limit:
* *
* dirty -= (dirty/8) * p_{t} * dirty -= (dirty/8) * p_{t}
*
* To protect light/slow dirtying tasks from heavier/fast ones, we start
* throttling individual tasks before reaching the bdi dirty limit.
* Relatively low thresholds will be allocated to heavy dirtiers. So when
* dirty pages grow large, heavy dirtiers will be throttled first, which will
* effectively curb the growth of dirty pages. Light dirtiers with high enough
* dirty threshold may never get throttled.
*/ */
static unsigned long task_dirty_limit(struct task_struct *tsk, static unsigned long task_dirty_limit(struct task_struct *tsk,
unsigned long bdi_dirty) unsigned long bdi_dirty)
...@@ -390,6 +397,15 @@ unsigned long determine_dirtyable_memory(void) ...@@ -390,6 +397,15 @@ unsigned long determine_dirtyable_memory(void)
return x + 1; /* Ensure that we never return 0 */ return x + 1; /* Ensure that we never return 0 */
} }
/**
* global_dirty_limits - background-writeback and dirty-throttling thresholds
*
* Calculate the dirty thresholds based on sysctl parameters
* - vm.dirty_background_ratio or vm.dirty_background_bytes
* - vm.dirty_ratio or vm.dirty_bytes
* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
* runtime tasks.
*/
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{ {
unsigned long background; unsigned long background;
...@@ -424,8 +440,17 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) ...@@ -424,8 +440,17 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
*pdirty = dirty; *pdirty = dirty;
} }
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, /**
unsigned long dirty) * bdi_dirty_limit - @bdi's share of dirty throttling threshold
*
* Allocate high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
* - piling up dirty pages (that will take long time to sync) on slow devices
*
* The bdi's share of dirty limit will be adapting to its throughput and
* bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
*/
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
{ {
u64 bdi_dirty; u64 bdi_dirty;
long numerator, denominator; long numerator, denominator;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment