Commit 8faccfef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-07-03-22-23' of...

Merge tag 'mm-hotfixes-stable-2024-07-03-22-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from, Andrew Morton:
 "6 hotfies, all cc:stable. Some fixes for longstanding nilfs2 issues
  and three unrelated MM fixes"

* tag 'mm-hotfixes-stable-2024-07-03-22-23' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nilfs2: fix incorrect inode allocation from reserved inodes
  nilfs2: add missing check for inode numbers on directory entries
  nilfs2: fix inode number range checks
  mm: avoid overflows in dirty throttling logic
  Revert "mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again"
  mm: optimize the redundant loop of mm_update_owner_next()
parents 795c58e4 93aef9ed
......@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
* @target: offset number of an entry in the group (start point)
* @bsize: size in bits
* @lock: spin lock protecting @bitmap
* @wrap: whether to wrap around
*/
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
unsigned long target,
unsigned int bsize,
spinlock_t *lock)
spinlock_t *lock, bool wrap)
{
int pos, end = bsize;
......@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
end = target;
}
if (!wrap)
return -ENOSPC;
/* wrap around */
for (pos = 0; pos < end; pos++) {
......@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
* nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
* @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the allocation
* @wrap: whether to wrap around
*/
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req)
struct nilfs_palloc_req *req, bool wrap)
{
struct buffer_head *desc_bh, *bitmap_bh;
struct nilfs_palloc_group_desc *desc;
......@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
entries_per_group = nilfs_palloc_entries_per_group(inode);
for (i = 0; i < ngroups; i += n) {
if (group >= ngroups) {
if (group >= ngroups && wrap) {
/* wrap around */
group = 0;
maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
......@@ -550,7 +554,14 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
bitmap_kaddr = kmap_local_page(bitmap_bh->b_page);
bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
pos = nilfs_palloc_find_available_slot(
bitmap, group_offset, entries_per_group, lock);
bitmap, group_offset, entries_per_group, lock,
wrap);
/*
* Since the search for a free slot in the second and
* subsequent bitmap blocks always starts from the
* beginning, the wrap flag only has an effect on the
* first search.
*/
kunmap_local(bitmap_kaddr);
if (pos >= 0)
goto found;
......
......@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
struct buffer_head *pr_entry_bh;
};
int nilfs_palloc_prepare_alloc_entry(struct inode *,
struct nilfs_palloc_req *);
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req, bool wrap);
void nilfs_palloc_commit_alloc_entry(struct inode *,
struct nilfs_palloc_req *);
void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);
......
......@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
ret = nilfs_palloc_prepare_alloc_entry(dat, req);
ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
if (ret < 0)
return ret;
......
......@@ -135,6 +135,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
goto Enamelen;
if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
goto Espan;
if (unlikely(p->inode &&
NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
goto Einumber;
}
if (offs != limit)
goto Eend;
......@@ -160,6 +163,9 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
goto bad_entry;
Espan:
error = "directory entry across blocks";
goto bad_entry;
Einumber:
error = "disallowed inode number";
bad_entry:
nilfs_error(sb,
"bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%zd, name_len=%d",
......
......@@ -56,13 +56,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
struct nilfs_palloc_req req;
int ret;
req.pr_entry_nr = 0; /*
* 0 says find free inode from beginning
* of a group. dull code!!
*/
req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
req.pr_entry_bh = NULL;
ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
if (!ret) {
ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
&req.pr_entry_bh);
......
......@@ -116,9 +116,15 @@ enum {
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
#define NILFS_VALID_INODE(sb, ino) \
((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
((ino) >= NILFS_FIRST_INO(sb) || \
((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
#define NILFS_PRIVATE_INODE(ino) ({ \
ino_t __ino = (ino); \
((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO && \
(__ino) != NILFS_SKETCH_INO); })
/**
* struct nilfs_transaction_info: context information for synchronization
......
......@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
}
nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
if (nilfs->ns_first_ino < NILFS_USER_INO) {
nilfs_err(nilfs->ns_sb,
"too small lower limit for non-reserved inode numbers: %u",
nilfs->ns_first_ino);
return -EINVAL;
}
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
......
......@@ -182,7 +182,7 @@ struct the_nilfs {
unsigned long ns_nrsvsegs;
unsigned long ns_first_data_block;
int ns_inode_size;
int ns_first_ino;
unsigned int ns_first_ino;
u32 ns_crc_seed;
/* /sys/fs/<nilfs>/<device> */
......
......@@ -484,6 +484,8 @@ void mm_update_next_owner(struct mm_struct *mm)
* Search through everything else, we should not get here often.
*/
for_each_process(g) {
if (atomic_read(&mm->mm_users) <= 1)
break;
if (g->flags & PF_KTHREAD)
continue;
for_each_thread(g, c) {
......
......@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
tsk = current;
if (rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
}
/*
* Dirty throttling logic assumes the limits in page units fit into
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/
if (thresh > UINT_MAX)
thresh = UINT_MAX;
/* This makes sure bg_thresh is within 32-bits as well */
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
dtc->thresh = thresh;
dtc->bg_thresh = bg_thresh;
......@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
if (rt_task(tsk))
dirty += dirty / 4;
return dirty;
/*
* Dirty throttling logic assumes the limits in page units fit into
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/
return min_t(unsigned long, dirty, UINT_MAX);
}
/**
......@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
unsigned long old_bytes = dirty_background_bytes;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write)
if (ret == 0 && write) {
if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
UINT_MAX) {
dirty_background_bytes = old_bytes;
return -ERANGE;
}
dirty_background_ratio = 0;
}
return ret;
}
......@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
vm_dirty_bytes = old_bytes;
return -ERANGE;
}
writeback_set_ratelimit();
vm_dirty_ratio = 0;
}
......@@ -1660,7 +1682,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh);
dtc->wb_bg_thresh = dtc->thresh ?
div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment