Commit e2eff52c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'v6.4/vfs.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull misc vfs updates from Christian Brauner:
 "This contains a pile of various smaller fixes. Most of them aren't
  very interesting so this just highlights things worth mentioning:

   - Various filesystems contained the same little helper to convert
     from the mode of a dentry to the DT_* type of that dentry.

     They have now all been switched to rely on the generic
     fs_umode_to_dtype() helper. All custom helpers are removed (Jeff)

   - Fsnotify now reports ACCESS and MODIFY events for splice
     (Chung-Chiang Cheng)

   - After converting timerfd a long time ago to rely on
     wait_event_interruptible_*() apis, convert eventfd as well. This
     removes the complex open-coded wait code (Wen Yang)

   - Simplify sysctl registration for devpts, avoiding the declaration
     of two tables. Instead, just use a prefixed path with
     register_sysctl() (Luis)

   - The setattr_should_drop_sgid() helper is now exported so NFS can
     use it. By switching NFS to this helper an NFS setgid inheritance
     bug is fixed (me)"

* tag 'v6.4/vfs.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  fs: hfsplus: remove WARN_ON() from hfsplus_cat_{read,write}_inode()
  pnode: pass mountpoint directly
  eventfd: use wait_event_interruptible_locked_irq() helper
  splice: report related fsnotify events
  fs: consolidate duplicate dt_type helpers
  nfs: use vfs setgid helper
  Update relatime comments to include equality
  fs/buffer: Remove redundant assignment to err
  fs_context: drop the unused lsm_flags member
  fs/namespace: fnic: Switch to use %ptTd
  Documentation: update idmappings.rst
  devpts: simplify two-level sysctl registration for pty_kern_table
  eventpoll: align comment with nested epoll limitation
parents 7bcff5a3 81b21c0f
This diff is collapsed.
......@@ -79,7 +79,6 @@ context. This is represented by the fs_context structure::
unsigned int sb_flags;
unsigned int sb_flags_mask;
unsigned int s_iflags;
unsigned int lsm_flags;
enum fs_context_purpose purpose:8;
...
};
......
......@@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct mnt_idmap *idmap,
return ATTR_KILL_SGID;
return 0;
}
EXPORT_SYMBOL(setattr_should_drop_sgid);
/**
* setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to
......
......@@ -2581,7 +2581,7 @@ int block_truncate_page(struct address_space *mapping,
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head *bh;
int err;
int err = 0;
blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
......@@ -2594,9 +2594,8 @@ int block_truncate_page(struct address_space *mapping,
iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
if (!page)
goto out;
return -ENOMEM;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
......@@ -2610,7 +2609,6 @@ int block_truncate_page(struct address_space *mapping,
pos += blocksize;
}
err = 0;
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
......@@ -2634,12 +2632,11 @@ int block_truncate_page(struct address_space *mapping,
zero_user(page, offset, length);
mark_buffer_dirty(bh);
err = 0;
unlock:
unlock_page(page);
put_page(page);
out:
return err;
}
EXPORT_SYMBOL(block_truncate_page);
......
......@@ -1599,12 +1599,6 @@ static int configfs_dir_close(struct inode *inode, struct file *file)
return 0;
}
/* Relationship between s_mode and the DT_xxx types */
static inline unsigned char dt_type(struct configfs_dirent *sd)
{
return (sd->s_mode >> 12) & 15;
}
static int configfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
......@@ -1654,7 +1648,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
name = configfs_get_name(next);
len = strlen(name);
if (!dir_emit(ctx, name, len, ino, dt_type(next)))
if (!dir_emit(ctx, name, len, ino,
fs_umode_to_dtype(next->s_mode)))
return 0;
spin_lock(&configfs_dirent_lock);
......
......@@ -72,24 +72,6 @@ static struct ctl_table pty_table[] = {
{}
};
static struct ctl_table pty_kern_table[] = {
{
.procname = "pty",
.mode = 0555,
.child = pty_table,
},
{}
};
static struct ctl_table pty_root_table[] = {
{
.procname = "kernel",
.mode = 0555,
.child = pty_kern_table,
},
{}
};
struct pts_mount_opts {
int setuid;
int setgid;
......@@ -630,7 +612,7 @@ static int __init init_devpts_fs(void)
{
int err = register_filesystem(&devpts_fs_type);
if (!err) {
register_sysctl_table(pty_root_table);
register_sysctl("kernel/pty", pty_table);
}
return err;
}
......
......@@ -228,7 +228,6 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
struct file *file = iocb->ki_filp;
struct eventfd_ctx *ctx = file->private_data;
__u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
if (iov_iter_count(to) < sizeof(ucnt))
return -EINVAL;
......@@ -239,23 +238,11 @@ static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
spin_unlock_irq(&ctx->wqh.lock);
return -EAGAIN;
}
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ctx->count)
break;
if (signal_pending(current)) {
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
spin_unlock_irq(&ctx->wqh.lock);
return -ERESTARTSYS;
}
if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
spin_unlock_irq(&ctx->wqh.lock);
schedule();
spin_lock_irq(&ctx->wqh.lock);
return -ERESTARTSYS;
}
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
eventfd_ctx_do_read(ctx, &ucnt);
current->in_eventfd = 1;
......@@ -275,7 +262,6 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
__u64 ucnt;
DECLARE_WAITQUEUE(wait, current);
if (count < sizeof(ucnt))
return -EINVAL;
......@@ -288,23 +274,10 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c
if (ULLONG_MAX - ctx->count > ucnt)
res = sizeof(ucnt);
else if (!(file->f_flags & O_NONBLOCK)) {
__add_wait_queue(&ctx->wqh, &wait);
for (res = 0;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ULLONG_MAX - ctx->count > ucnt) {
res = sizeof(ucnt);
break;
}
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
spin_unlock_irq(&ctx->wqh.lock);
schedule();
spin_lock_irq(&ctx->wqh.lock);
}
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
res = wait_event_interruptible_locked_irq(ctx->wqh,
ULLONG_MAX - ctx->count > ucnt);
if (!res)
res = sizeof(ucnt);
}
if (likely(res > 0)) {
ctx->count += ucnt;
......
......@@ -483,8 +483,8 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
* (efd1) notices that it may have some event ready, so it needs to wake up
* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
* that ends up in another wake_up(), after having checked about the
* recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
* avoid stack blasting.
* recursion constraints. That are, no more than EP_MAX_NESTS, to avoid
* stack blasting.
*
* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
* this special case of epoll.
......
......@@ -511,7 +511,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
if (type == HFSPLUS_FOLDER) {
struct hfsplus_cat_folder *folder = &entry.folder;
WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_folder));
hfsplus_get_perms(inode, &folder->permissions, 1);
......@@ -531,7 +535,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
} else if (type == HFSPLUS_FILE) {
struct hfsplus_cat_file *file = &entry.file;
WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
if (fd->entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
sizeof(struct hfsplus_cat_file));
......@@ -562,6 +570,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
pr_err("bad catalog entry used to create inode\n");
res = -EIO;
}
out:
return res;
}
......@@ -570,6 +579,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfsplus_cat_entry entry;
int res = 0;
if (HFSPLUS_IS_RSRC(inode))
main_inode = HFSPLUS_I(inode)->rsrc_inode;
......@@ -588,7 +598,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
if (S_ISDIR(main_inode->i_mode)) {
struct hfsplus_cat_folder *folder = &entry.folder;
WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
pr_err("bad catalog folder entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
......@@ -613,7 +627,11 @@ int hfsplus_cat_write_inode(struct inode *inode)
} else {
struct hfsplus_cat_file *file = &entry.file;
WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
pr_err("bad catalog file entry\n");
res = -EIO;
goto out;
}
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
hfsplus_inode_write_fork(inode, &file->data_fork);
......@@ -634,7 +652,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
out:
hfs_find_exit(&fd);
return 0;
return res;
}
int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
......
......@@ -1804,8 +1804,8 @@ EXPORT_SYMBOL(bmap);
/*
* With relative atime, only update atime if the previous atime is
* earlier than either the ctime or mtime or if at least a day has
* passed since the last atime update.
* earlier than or equal to either the ctime or mtime,
* or if at least a day has passed since the last atime update.
*/
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
......@@ -1814,12 +1814,12 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
/*
* Is mtime younger than atime? If yes, update atime:
* Is mtime younger than or equal to atime? If yes, update atime:
*/
if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
return 1;
/*
* Is ctime younger than atime? If yes, update atime:
* Is ctime younger than or equal to atime? If yes, update atime:
*/
if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
return 1;
......
......@@ -259,8 +259,6 @@ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *po
/*
* fs/attr.c
*/
int setattr_should_drop_sgid(struct mnt_idmap *idmap,
const struct inode *inode);
struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns);
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
void mnt_idmap_put(struct mnt_idmap *idmap);
......@@ -1748,12 +1748,6 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
return error;
}
/* Relationship between mode and the DT_xxx types */
static inline unsigned char dt_type(struct kernfs_node *kn)
{
return (kn->mode >> 12) & 15;
}
static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
{
kernfs_put(filp->private_data);
......@@ -1831,7 +1825,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
const char *name = pos->name;
unsigned int type = dt_type(pos);
unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
......
......@@ -174,12 +174,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
}
EXPORT_SYMBOL(dcache_dir_lseek);
/* Relationship between i_mode and the DT_xxx types */
static inline unsigned char dt_type(struct inode *inode)
{
return (inode->i_mode >> 12) & 15;
}
/*
* Directory is locked and all positive dentries in it are safe, since
* for ramfs-type trees they can't go away without unlink() or rmdir(),
......@@ -206,7 +200,8 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
d_inode(next)->i_ino,
fs_umode_to_dtype(d_inode(next)->i_mode)))
break;
ctx->pos++;
p = &next->d_child;
......
......@@ -2617,15 +2617,12 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *
(ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
char *buf = (char *)__get_free_page(GFP_KERNEL);
char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
struct tm tm;
time64_to_tm(sb->s_time_max, 0, &tm);
pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n",
sb->s_type->name,
is_mounted(mnt) ? "remounted" : "mounted",
mntpath,
tm.tm_year+1900, (unsigned long long)sb->s_time_max);
mntpath, &sb->s_time_max,
(unsigned long long)sb->s_time_max);
free_page((unsigned long)buf);
sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
......
......@@ -717,9 +717,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
inode->i_mode & S_ISUID)
inode->i_mode &= ~S_ISUID;
if ((attr->ia_valid & ATTR_KILL_SGID) != 0 &&
(inode->i_mode & (S_ISGID | S_IXGRP)) ==
(S_ISGID | S_IXGRP))
if (setattr_should_drop_sgid(&nop_mnt_idmap, inode))
inode->i_mode &= ~S_ISGID;
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;
......
......@@ -1274,9 +1274,6 @@ int nfs_get_tree_common(struct fs_context *fc)
if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
fc->sb_flags |= SB_SYNCHRONOUS;
if (server->caps & NFS_CAP_SECURITY_LABEL)
fc->lsm_flags |= SECURITY_LSM_NATIVE_LABELS;
/* Get a superblock - note that we may end up sharing one that already exists */
fc->s_fs_info = server;
s = sget_fc(fc, compare_super, nfs_set_super);
......
......@@ -214,7 +214,6 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
/* all accesses are serialized by namespace_sem */
static struct mount *last_dest, *first_source, *last_source, *dest_master;
static struct mountpoint *mp;
static struct hlist_head *list;
static inline bool peers(struct mount *m1, struct mount *m2)
......@@ -222,7 +221,7 @@ static inline bool peers(struct mount *m1, struct mount *m2)
return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
}
static int propagate_one(struct mount *m)
static int propagate_one(struct mount *m, struct mountpoint *dest_mp)
{
struct mount *child;
int type;
......@@ -230,7 +229,7 @@ static int propagate_one(struct mount *m)
if (IS_MNT_NEW(m))
return 0;
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
return 0;
if (peers(m, last_dest)) {
type = CL_MAKE_SHARED;
......@@ -262,7 +261,7 @@ static int propagate_one(struct mount *m)
if (IS_ERR(child))
return PTR_ERR(child);
read_seqlock_excl(&mount_lock);
mnt_set_mountpoint(m, mp, child);
mnt_set_mountpoint(m, dest_mp, child);
if (m->mnt_master != dest_master)
SET_MNT_MARK(m->mnt_master);
read_sequnlock_excl(&mount_lock);
......@@ -299,13 +298,12 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
last_dest = dest_mnt;
first_source = source_mnt;
last_source = source_mnt;
mp = dest_mp;
list = tree_list;
dest_master = dest_mnt->mnt_master;
/* all peers of dest_mnt, except dest_mnt itself */
for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
ret = propagate_one(n);
ret = propagate_one(n, dest_mp);
if (ret)
goto out;
}
......@@ -316,7 +314,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
/* everything in that slave group */
n = m;
do {
ret = propagate_one(n);
ret = propagate_one(n, dest_mp);
if (ret)
goto out;
n = next_peer(n);
......
......@@ -30,6 +30,7 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/uio.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include <linux/gfp.h>
#include <linux/socket.h>
......@@ -1165,6 +1166,9 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
ret = do_splice_from(ipipe, out, &offset, len, flags);
file_end_write(out);
if (ret > 0)
fsnotify_modify(out);
if (!off_out)
out->f_pos = offset;
else
......@@ -1188,6 +1192,10 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
flags |= SPLICE_F_NONBLOCK;
ret = splice_file_to_pipe(in, opipe, &offset, len, flags);
if (ret > 0)
fsnotify_access(in);
if (!off_in)
in->f_pos = offset;
else
......
......@@ -2675,6 +2675,8 @@ extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
extern int file_remove_privs(struct file *);
int setattr_should_drop_sgid(struct mnt_idmap *idmap,
const struct inode *inode);
/*
* This must be used for allocating filesystems specific inodes to set
......
......@@ -104,7 +104,6 @@ struct fs_context {
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
unsigned int s_iflags; /* OR'd with sb->s_iflags */
unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
enum fs_context_phase phase:8; /* The phase the context is in */
bool need_free:1; /* Need to call ops->free() */
......
......@@ -68,7 +68,7 @@ struct watch_notification;
/* If capable is being called by a setid function */
#define CAP_OPT_INSETID BIT(2)
/* LSM Agnostic defines for fs_context::lsm_flags */
/* LSM Agnostic defines for security_sb_set_mnt_opts() flags */
#define SECURITY_LSM_NATIVE_LABELS 1
struct ctl_table;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment