Commit dbd8805b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge more fixes from Andrew Morton:
 "Three fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  include/linux/property.h: fix typo/compile error
  ocfs2: fix deadlock on mmapped page in ocfs2_write_begin_nolock()
  mm: workingset: fix crash in shadow node shrinker caused by replace_page_cache_page()
parents 9a2172a8 37aa7271
...@@ -1842,6 +1842,16 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, ...@@ -1842,6 +1842,16 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
out: out:
/*
* The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
* even in case of error here like ENOSPC and ENOMEM. So, we need
* to unlock the target page manually to prevent deadlocks when
* retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
* to VM code.
*/
if (wc->w_target_locked)
unlock_page(mmap_page);
ocfs2_free_write_ctxt(inode, wc); ocfs2_free_write_ctxt(inode, wc);
if (data_ac) { if (data_ac) {
......
...@@ -190,7 +190,7 @@ struct property_entry { ...@@ -190,7 +190,7 @@ struct property_entry {
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \ .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \ .is_array = true, \
.is_string = false, \ .is_string = false, \
{ .pointer = { _type_##_data = _val_ } }, \ { .pointer = { ._type_##_data = _val_ } }, \
} }
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
......
...@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node) ...@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node) static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{ {
VM_BUG_ON(!workingset_node_pages(node));
node->count--; node->count--;
} }
...@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node) ...@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node) static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{ {
VM_BUG_ON(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT; node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
} }
......
...@@ -110,6 +110,62 @@ ...@@ -110,6 +110,62 @@
* ->tasklist_lock (memory_failure, collect_procs_ao) * ->tasklist_lock (memory_failure, collect_procs_ao)
*/ */
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
{
struct radix_tree_node *node;
void **slot;
int error;
error = __radix_tree_create(&mapping->page_tree, page->index, 0,
&node, &slot);
if (error)
return error;
if (*slot) {
void *p;
p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
if (!radix_tree_exceptional_entry(p))
return -EEXIST;
mapping->nrexceptional--;
if (!dax_mapping(mapping)) {
if (shadowp)
*shadowp = p;
if (node)
workingset_node_shadows_dec(node);
} else {
/* DAX can replace empty locked entry with a hole */
WARN_ON_ONCE(p !=
(void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
RADIX_DAX_ENTRY_LOCK));
/* DAX accounts exceptional entries as normal pages */
if (node)
workingset_node_pages_dec(node);
/* Wakeup waiters for exceptional entry lock */
dax_wake_mapping_entry_waiter(mapping, page->index,
false);
}
}
radix_tree_replace_slot(slot, page);
mapping->nrpages++;
if (node) {
workingset_node_pages_inc(node);
/*
* Don't track node that contains actual pages.
*
* Avoid acquiring the list_lru lock if already
* untracked. The list_empty() test is safe as
* node->private_list is protected by
* mapping->tree_lock.
*/
if (!list_empty(&node->private_list))
list_lru_del(&workingset_shadow_nodes,
&node->private_list);
}
return 0;
}
static void page_cache_tree_delete(struct address_space *mapping, static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow) struct page *page, void *shadow)
{ {
...@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
spin_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL); __delete_from_page_cache(old, NULL);
error = radix_tree_insert(&mapping->page_tree, offset, new); error = page_cache_tree_insert(mapping, new, NULL);
BUG_ON(error); BUG_ON(error);
mapping->nrpages++; mapping->nrpages++;
...@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
} }
EXPORT_SYMBOL_GPL(replace_page_cache_page); EXPORT_SYMBOL_GPL(replace_page_cache_page);
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
{
struct radix_tree_node *node;
void **slot;
int error;
error = __radix_tree_create(&mapping->page_tree, page->index, 0,
&node, &slot);
if (error)
return error;
if (*slot) {
void *p;
p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
if (!radix_tree_exceptional_entry(p))
return -EEXIST;
mapping->nrexceptional--;
if (!dax_mapping(mapping)) {
if (shadowp)
*shadowp = p;
if (node)
workingset_node_shadows_dec(node);
} else {
/* DAX can replace empty locked entry with a hole */
WARN_ON_ONCE(p !=
(void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
RADIX_DAX_ENTRY_LOCK));
/* DAX accounts exceptional entries as normal pages */
if (node)
workingset_node_pages_dec(node);
/* Wakeup waiters for exceptional entry lock */
dax_wake_mapping_entry_waiter(mapping, page->index,
false);
}
}
radix_tree_replace_slot(slot, page);
mapping->nrpages++;
if (node) {
workingset_node_pages_inc(node);
/*
* Don't track node that contains actual pages.
*
* Avoid acquiring the list_lru lock if already
* untracked. The list_empty() test is safe as
* node->private_list is protected by
* mapping->tree_lock.
*/
if (!list_empty(&node->private_list))
list_lru_del(&workingset_shadow_nodes,
&node->private_list);
}
return 0;
}
static int __add_to_page_cache_locked(struct page *page, static int __add_to_page_cache_locked(struct page *page,
struct address_space *mapping, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask, pgoff_t offset, gfp_t gfp_mask,
......
...@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, ...@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
* no pages, so we expect to be able to remove them all and * no pages, so we expect to be able to remove them all and
* delete and free the empty node afterwards. * delete and free the empty node afterwards.
*/ */
BUG_ON(!workingset_node_shadows(node));
BUG_ON(!node->count); BUG_ON(workingset_node_pages(node));
BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
if (node->slots[i]) { if (node->slots[i]) {
BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
node->slots[i] = NULL; node->slots[i] = NULL;
BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); workingset_node_shadows_dec(node);
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
BUG_ON(!mapping->nrexceptional); BUG_ON(!mapping->nrexceptional);
mapping->nrexceptional--; mapping->nrexceptional--;
} }
} }
BUG_ON(node->count); BUG_ON(workingset_node_shadows(node));
inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
if (!__radix_tree_delete_node(&mapping->page_tree, node)) if (!__radix_tree_delete_node(&mapping->page_tree, node))
BUG(); BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment