Commit cc4f11e6 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: migrate: lock buffers before migrate_page_move_mapping()

Lock buffers before calling into migrate_page_move_mapping() so that that
function doesn't have to know about buffers (which is somewhat unexpected
anyway) and all the buffer head logic is in buffer_migrate_page().

Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0b3901b3
...@@ -486,20 +486,6 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -486,20 +486,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
return -EAGAIN; return -EAGAIN;
} }
/*
* In the async migration case of moving a page with buffers, lock the
* buffers using trylock before the mapping is moved. If the mapping
* was moved, we later failed to lock the buffers and could not move
* the mapping back due to an elevated page count, we would have to
* block waiting on other references to be dropped.
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_ref_unfreeze(page, expected_count);
xas_unlock_irq(&xas);
return -EAGAIN;
}
/* /*
* Now we know that no one else is looking at the page: * Now we know that no one else is looking at the page:
* no turning back from here. * no turning back from here.
...@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping, ...@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
{ {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int rc; int rc;
int expected_count;
if (!page_has_buffers(page)) if (!page_has_buffers(page))
return migrate_page(mapping, newpage, page, mode); return migrate_page(mapping, newpage, page, mode);
head = page_buffers(page); /* Check whether page does not have extra refs before we do more work */
expected_count = expected_page_refs(page);
if (page_count(page) != expected_count)
return -EAGAIN;
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); head = page_buffers(page);
if (!buffer_migrate_lock_buffers(head, mode))
return -EAGAIN;
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
return rc; goto unlock_buffers;
/*
* In the async case, migrate_page_move_mapping locked the buffers
* with an IRQ-safe spinlock held. In the sync case, the buffers
* need to be locked now
*/
if (mode != MIGRATE_ASYNC)
BUG_ON(!buffer_migrate_lock_buffers(head, mode));
ClearPagePrivate(page); ClearPagePrivate(page);
set_page_private(newpage, page_private(page)); set_page_private(newpage, page_private(page));
...@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping, ...@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
else else
migrate_page_states(newpage, page); migrate_page_states(newpage, page);
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
bh = head; bh = head;
do { do {
unlock_buffer(bh); unlock_buffer(bh);
...@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping, ...@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
} while (bh != head); } while (bh != head);
return MIGRATEPAGE_SUCCESS; return rc;
} }
EXPORT_SYMBOL(buffer_migrate_page); EXPORT_SYMBOL(buffer_migrate_page);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment