Commit 706ce3ca authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-05-06-10-45' of...

Merge tag 'mm-hotfixes-stable-2023-05-06-10-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
 "Five hotfixes.

  Three are cc:stable, two pertain to merge window changes"

* tag 'mm-hotfixes-stable-2023-05-06-10-45' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  afs: fix the afs_dir_get_folio return value
  nilfs2: do not write dirty data after degenerating to read-only
  mm: do not reclaim private data from pinned page
  nilfs2: fix infinite loop in nilfs_mdt_get_block()
  mm/mmap/vma_merge: always check invariants
parents 994e2419 58f5f669
...@@ -115,11 +115,12 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) ...@@ -115,11 +115,12 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
folio = __filemap_get_folio(mapping, index, folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
mapping->gfp_mask); mapping->gfp_mask);
if (IS_ERR(folio)) if (IS_ERR(folio)) {
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
else if (folio && !folio_test_private(folio)) return NULL;
}
if (!folio_test_private(folio))
folio_attach_private(folio, (void *)1); folio_attach_private(folio, (void *)1);
return folio; return folio;
} }
......
...@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, ...@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
down_read(&bmap->b_sem); down_read(&bmap->b_sem);
ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
if (ret < 0) { if (ret < 0)
ret = nilfs_bmap_convert_error(bmap, __func__, ret);
goto out; goto out;
}
if (NILFS_BMAP_USE_VBN(bmap)) { if (NILFS_BMAP_USE_VBN(bmap)) {
ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
&blocknr); &blocknr);
if (!ret) if (!ret)
*ptrp = blocknr; *ptrp = blocknr;
else if (ret == -ENOENT) {
/*
* If there was no valid entry in DAT for the block
* address obtained by b_ops->bop_lookup, then pass
* internal code -EINVAL to nilfs_bmap_convert_error
* to treat it as metadata corruption.
*/
ret = -EINVAL;
}
} }
out: out:
up_read(&bmap->b_sem); up_read(&bmap->b_sem);
return ret; return nilfs_bmap_convert_error(bmap, __func__, ret);
} }
int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
......
...@@ -2041,6 +2041,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) ...@@ -2041,6 +2041,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int err; int err;
if (sb_rdonly(sci->sc_super))
return -EROFS;
nilfs_sc_cstage_set(sci, NILFS_ST_INIT); nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
sci->sc_cno = nilfs->ns_cno; sci->sc_cno = nilfs->ns_cno;
...@@ -2724,7 +2727,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) ...@@ -2724,7 +2727,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
flush_work(&sci->sc_iput_work); flush_work(&sci->sc_iput_work);
} while (ret && retrycount-- > 0); } while (ret && ret != -EROFS && retrycount-- > 0);
} }
/** /**
......
...@@ -960,17 +960,17 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, ...@@ -960,17 +960,17 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
merge_next = true; merge_next = true;
} }
/* Verify some invariant that must be enforced by the caller. */
VM_WARN_ON(prev && addr <= prev->vm_start);
VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
VM_WARN_ON(addr >= end);
if (!merge_prev && !merge_next) if (!merge_prev && !merge_next)
return NULL; /* Not mergeable. */ return NULL; /* Not mergeable. */
res = vma = prev; res = vma = prev;
remove = remove2 = adjust = NULL; remove = remove2 = adjust = NULL;
/* Verify some invariant that must be enforced by the caller. */
VM_WARN_ON(prev && addr <= prev->vm_start);
VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
VM_WARN_ON(addr >= end);
/* Can we merge both the predecessor and the successor? */ /* Can we merge both the predecessor and the successor? */
if (merge_prev && merge_next && if (merge_prev && merge_next &&
is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
......
...@@ -1967,6 +1967,16 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, ...@@ -1967,6 +1967,16 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
} }
} }
/*
* Folio is unmapped now so it cannot be newly pinned anymore.
* No point in trying to reclaim folio if it is pinned.
* Furthermore we don't want to reclaim underlying fs metadata
* if the folio is pinned and thus potentially modified by the
* pinning process as that may upset the filesystem.
*/
if (folio_maybe_dma_pinned(folio))
goto activate_locked;
mapping = folio_mapping(folio); mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) { if (folio_test_dirty(folio)) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment