Commit c555772c authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Greg Kroah-Hartman

dax: Don't access a freed inode

commit 55e56f06 upstream.

After we drop the i_pages lock, the inode can be freed at any time.
The get_unlocked_entry() code has no choice but to reacquire the lock,
so it can't be used here.  Create a new wait_entry_unlocked() which takes
care not to acquire the lock or dereference the address_space in any way.

Fixes: c2a7d2a1 ("filesystem-dax: Introduce dax_lock_mapping_entry()")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent a9935a12
...@@ -229,8 +229,8 @@ static void put_unlocked_mapping_entry(struct address_space *mapping, ...@@ -229,8 +229,8 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
* *
* Must be called with the i_pages lock held. * Must be called with the i_pages lock held.
*/ */
static void *__get_unlocked_mapping_entry(struct address_space *mapping, static void *get_unlocked_mapping_entry(struct address_space *mapping,
pgoff_t index, void ***slotp, bool (*wait_fn)(void)) pgoff_t index, void ***slotp)
{ {
void *entry, **slot; void *entry, **slot;
struct wait_exceptional_entry_queue ewait; struct wait_exceptional_entry_queue ewait;
...@@ -240,8 +240,6 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, ...@@ -240,8 +240,6 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
ewait.wait.func = wake_exceptional_entry_func; ewait.wait.func = wake_exceptional_entry_func;
for (;;) { for (;;) {
bool revalidate;
entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
&slot); &slot);
if (!entry || if (!entry ||
...@@ -256,30 +254,39 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping, ...@@ -256,30 +254,39 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
prepare_to_wait_exclusive(wq, &ewait.wait, prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
revalidate = wait_fn(); schedule();
finish_wait(wq, &ewait.wait); finish_wait(wq, &ewait.wait);
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
if (revalidate) {
put_unlocked_mapping_entry(mapping, index, entry);
return ERR_PTR(-EAGAIN);
}
} }
} }
static bool entry_wait(void) /*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
*/
static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
void ***slotp, void *entry)
{ {
struct wait_exceptional_entry_queue ewait;
wait_queue_head_t *wq;
init_wait(&ewait.wait);
ewait.wait.func = wake_exceptional_entry_func;
wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
xa_unlock_irq(&mapping->i_pages);
schedule(); schedule();
finish_wait(wq, &ewait.wait);
/* /*
* Never return an ERR_PTR() from * Entry lock waits are exclusive. Wake up the next waiter since
* __get_unlocked_mapping_entry(), just keep looping. * we aren't sure we will acquire the entry lock and thus wake
* the next waiter up on unlock.
*/ */
return false; if (waitqueue_active(wq))
} __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
static void *get_unlocked_mapping_entry(struct address_space *mapping,
pgoff_t index, void ***slotp)
{
return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
} }
static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
...@@ -398,19 +405,6 @@ static struct page *dax_busy_page(void *entry) ...@@ -398,19 +405,6 @@ static struct page *dax_busy_page(void *entry)
return NULL; return NULL;
} }
static bool entry_wait_revalidate(void)
{
rcu_read_unlock();
schedule();
rcu_read_lock();
/*
* Tell __get_unlocked_mapping_entry() to take a break, we need
* to revalidate page->mapping after dropping locks
*/
return true;
}
bool dax_lock_mapping_entry(struct page *page) bool dax_lock_mapping_entry(struct page *page)
{ {
pgoff_t index; pgoff_t index;
...@@ -446,14 +440,15 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -446,14 +440,15 @@ bool dax_lock_mapping_entry(struct page *page)
} }
index = page->index; index = page->index;
entry = __get_unlocked_mapping_entry(mapping, index, &slot, entry = __radix_tree_lookup(&mapping->i_pages, index,
entry_wait_revalidate); NULL, &slot);
if (!entry) { if (!entry) {
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
break; break;
} else if (IS_ERR(entry)) { } else if (slot_locked(mapping, slot)) {
xa_unlock_irq(&mapping->i_pages); rcu_read_unlock();
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); wait_entry_unlocked(mapping, index, &slot, entry);
rcu_read_lock();
continue; continue;
} }
lock_slot(mapping, slot); lock_slot(mapping, slot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment