Commit 5b0830cb authored by Jens Axboe's avatar Jens Axboe

writeback: get rid to incorrect references to pdflush in comments

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 71fd05a8
...@@ -274,7 +274,7 @@ void invalidate_bdev(struct block_device *bdev) ...@@ -274,7 +274,7 @@ void invalidate_bdev(struct block_device *bdev)
} }
/* /*
* Kick pdflush then try to free up some ZONE_NORMAL memory. * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/ */
static void free_more_memory(void) static void free_more_memory(void)
{ {
...@@ -1699,9 +1699,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1699,9 +1699,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
/* /*
* If it's a fully non-blocking write attempt and we cannot * If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can * lock the buffer then redirty the page. Note that this can
* potentially cause a busy-wait loop from pdflush and kswapd * potentially cause a busy-wait loop from writeback threads
* activity, but those code paths have their own higher-level * and kswapd activity, but those code paths have their own
* throttling. * higher-level throttling.
*/ */
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh); lock_buffer(bh);
...@@ -3191,7 +3191,7 @@ void block_sync_page(struct page *page) ...@@ -3191,7 +3191,7 @@ void block_sync_page(struct page *page)
* still running obsolete flush daemons, so we terminate them here. * still running obsolete flush daemons, so we terminate them here.
* *
* Use of bdflush() is deprecated and will be removed in a future kernel. * Use of bdflush() is deprecated and will be removed in a future kernel.
* The `pdflush' kernel threads fully replace bdflush daemons and this call. * The `flush-X' kernel threads fully replace bdflush daemons and this call.
*/ */
SYSCALL_DEFINE2(bdflush, int, func, long, data) SYSCALL_DEFINE2(bdflush, int, func, long, data)
{ {
......
...@@ -320,7 +320,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) ...@@ -320,7 +320,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
* For inodes being constantly redirtied, dirtied_when can get stuck. * For inodes being constantly redirtied, dirtied_when can get stuck.
* It _appears_ to be in the future, but is actually in distant past. * It _appears_ to be in the future, but is actually in distant past.
* This test is necessary to prevent such wrapped-around relative times * This test is necessary to prevent such wrapped-around relative times
* from permanently stopping the whole pdflush writeback. * from permanently stopping the whole bdi writeback.
*/ */
ret = ret && time_before_eq(inode->dirtied_when, jiffies); ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif #endif
...@@ -1085,9 +1085,6 @@ EXPORT_SYMBOL(__mark_inode_dirty); ...@@ -1085,9 +1085,6 @@ EXPORT_SYMBOL(__mark_inode_dirty);
* If older_than_this is non-NULL, then only write out inodes which * If older_than_this is non-NULL, then only write out inodes which
* had their first dirtying at a time earlier than *older_than_this. * had their first dirtying at a time earlier than *older_than_this.
* *
* If we're a pdlfush thread, then implement pdflush collision avoidance
* against the entire list.
*
* If `bdi' is non-zero then we're being asked to writeback a specific queue. * If `bdi' is non-zero then we're being asked to writeback a specific queue.
* This function assumes that the blockdev superblock's inodes are backed by * This function assumes that the blockdev superblock's inodes are backed by
* a variety of queues, so all inodes are searched. For other superblocks, * a variety of queues, so all inodes are searched. For other superblocks,
......
...@@ -58,7 +58,7 @@ static inline long sync_writeback_pages(unsigned long dirtied) ...@@ -58,7 +58,7 @@ static inline long sync_writeback_pages(unsigned long dirtied)
/* The following parameters are exported via /proc/sys/vm */ /* The following parameters are exported via /proc/sys/vm */
/* /*
* Start background writeback (via pdflush) at this percentage * Start background writeback (via writeback threads) at this percentage
*/ */
int dirty_background_ratio = 10; int dirty_background_ratio = 10;
...@@ -477,8 +477,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, ...@@ -477,8 +477,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
* balance_dirty_pages() must be called by processes which are generating dirty * balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force * data. It looks at the number of dirty pages in the machine and will force
* the caller to perform writeback if the system is over `vm_dirty_ratio'. * the caller to perform writeback if the system is over `vm_dirty_ratio'.
* If we're over `background_thresh' then pdflush is woken to perform some * If we're over `background_thresh' then the writeback threads are woken to
* writeout. * perform some writeout.
*/ */
static void balance_dirty_pages(struct address_space *mapping, static void balance_dirty_pages(struct address_space *mapping,
unsigned long write_chunk) unsigned long write_chunk)
...@@ -582,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -582,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping,
bdi->dirty_exceeded = 0; bdi->dirty_exceeded = 0;
if (writeback_in_progress(bdi)) if (writeback_in_progress(bdi))
return; /* pdflush is already working this queue */ return;
/* /*
* In laptop mode, we wait until hitting the higher threshold before * In laptop mode, we wait until hitting the higher threshold before
......
...@@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* sync from ever calling shmem_writepage; but a stacking filesystem * sync from ever calling shmem_writepage; but a stacking filesystem
* may use the ->writepage of its underlying filesystem, in which case * may use the ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure, * tmpfs should write out to swap only in response to memory pressure,
* and not for pdflush or sync. However, in those cases, we do still * and not for the writeback threads or sync. However, in those cases,
* want to check if there's a redundant swappage to be discarded. * we do still want to check if there's a redundant swappage to be
* discarded.
*/ */
if (wbc->for_reclaim) if (wbc->for_reclaim)
swap = get_swap_page(); swap = get_swap_page();
......
...@@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist, ...@@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
* *
* If the caller is !__GFP_FS then the probability of a failure is reasonably * If the caller is !__GFP_FS then the probability of a failure is reasonably
* high - the zone may be full of dirty or under-writeback pages, which this * high - the zone may be full of dirty or under-writeback pages, which this
* caller can't do much about. We kick pdflush and take explicit naps in the * caller can't do much about. We kick the writeback threads and take explicit
* hope that some of these pages can be written. But if the allocating task * naps in the hope that some of these pages can be written. But if the
* holds filesystem locks which prevent writeout this might not work, and the * allocating task holds filesystem locks which prevent writeout this might not
* allocation attempt will fail. * work, and the allocation attempt will fail.
* *
* returns: 0, if no pages reclaimed * returns: 0, if no pages reclaimed
* else, the number of pages reclaimed * else, the number of pages reclaimed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment