Commit 27359fd6 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Dan Williams

dax: Fix unlock mismatch with updated API

Internal to dax_unlock_mapping_entry(), dax_unlock_entry() is used to
store a replacement entry in the Xarray at the given xas-index with the
DAX_LOCKED bit clear. When called, dax_unlock_entry() expects the unlocked
value of the entry relative to the current Xarray state to be specified.

In most contexts dax_unlock_entry() is operating in the same scope as
the matched dax_lock_entry(). However, in the dax_unlock_mapping_entry()
case the implementation needs to recall the original entry. In the case
where the original entry is a 'pmd' entry it is possible that the pfn
performed to do the lookup is misaligned to the value retrieved in the
Xarray.

Change the api to return the unlock cookie from dax_lock_page() and pass
it to dax_unlock_page(). This fixes a bug where dax_unlock_page() was
assuming that the page was PMD-aligned if the entry was a PMD entry with
signatures like:

 WARNING: CPU: 38 PID: 1396 at fs/dax.c:340 dax_insert_entry+0x2b2/0x2d0
 RIP: 0010:dax_insert_entry+0x2b2/0x2d0
 [..]
 Call Trace:
  dax_iomap_pte_fault.isra.41+0x791/0xde0
  ext4_dax_huge_fault+0x16f/0x1f0
  ? up_read+0x1c/0xa0
  __do_fault+0x1f/0x160
  __handle_mm_fault+0x1033/0x1490
  handle_mm_fault+0x18b/0x3d0

Link: https://lkml.kernel.org/r/20181130154902.GL10377@bombadil.infradead.org
Fixes: 9f32d221 ("dax: Convert dax_lock_mapping_entry to XArray")
Reported-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 55e56f06
...@@ -379,20 +379,20 @@ static struct page *dax_busy_page(void *entry) ...@@ -379,20 +379,20 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock * @page: The page whose entry we want to lock
* *
* Context: Process context. * Context: Process context.
* Return: %true if the entry was locked or does not need to be locked. * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
* not be locked.
*/ */
bool dax_lock_mapping_entry(struct page *page) dax_entry_t dax_lock_page(struct page *page)
{ {
XA_STATE(xas, NULL, 0); XA_STATE(xas, NULL, 0);
void *entry; void *entry;
bool locked;
/* Ensure page->mapping isn't freed while we look at it */ /* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock(); rcu_read_lock();
for (;;) { for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping); struct address_space *mapping = READ_ONCE(page->mapping);
locked = false; entry = NULL;
if (!mapping || !dax_mapping(mapping)) if (!mapping || !dax_mapping(mapping))
break; break;
...@@ -403,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -403,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page() * otherwise we would not have a valid pfn_to_page()
* translation. * translation.
*/ */
locked = true; entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
break; break;
...@@ -426,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -426,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
break; break;
} }
rcu_read_unlock(); rcu_read_unlock();
return locked; return (dax_entry_t)entry;
} }
void dax_unlock_mapping_entry(struct page *page) void dax_unlock_page(struct page *page, dax_entry_t cookie)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index); XA_STATE(xas, &mapping->i_pages, page->index);
void *entry;
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
return; return;
rcu_read_lock(); dax_unlock_entry(&xas, (void *)cookie);
entry = xas_load(&xas);
rcu_read_unlock();
entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
dax_unlock_entry(&xas, entry);
} }
/* /*
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
typedef unsigned long dax_entry_t;
struct iomap_ops; struct iomap_ops;
struct dax_device; struct dax_device;
struct dax_operations { struct dax_operations {
...@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc); struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page(struct address_space *mapping);
bool dax_lock_mapping_entry(struct page *page); dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_mapping_entry(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else #else
static inline bool bdev_dax_supported(struct block_device *bdev, static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize) int blocksize)
...@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline bool dax_lock_mapping_entry(struct page *page) static inline dax_entry_t dax_lock_page(struct page *page)
{ {
if (IS_DAX(page->mapping->host)) if (IS_DAX(page->mapping->host))
return true; return ~0UL;
return false; return 0;
} }
static inline void dax_unlock_mapping_entry(struct page *page) static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{ {
} }
#endif #endif
......
...@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
LIST_HEAD(tokill); LIST_HEAD(tokill);
int rc = -EBUSY; int rc = -EBUSY;
loff_t start; loff_t start;
dax_entry_t cookie;
/* /*
* Prevent the inode from being freed while we are interrogating * Prevent the inode from being freed while we are interrogating
...@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* also prevents changes to the mapping of this pfn until * also prevents changes to the mapping of this pfn until
* poison signaling is complete. * poison signaling is complete.
*/ */
if (!dax_lock_mapping_entry(page)) cookie = dax_lock_page(page);
if (!cookie)
goto out; goto out;
if (hwpoison_filter(page)) { if (hwpoison_filter(page)) {
...@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
rc = 0; rc = 0;
unlock: unlock:
dax_unlock_mapping_entry(page); dax_unlock_page(page, cookie);
out: out:
/* drop pgmap ref acquired in caller */ /* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment