Commit 6abc20f8 authored by Christian Brauner's avatar Christian Brauner

selftests/core: add regression test for CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC

This test is a minimalized version of the reproducer given by syzbot
(cf. [1]).

After introducing CLOSE_RANGE_CLOEXEC syzbot reported a crash when
CLOSE_RANGE_CLOEXEC is specified in conjunction with
CLOSE_RANGE_UNSHARE. When CLOSE_RANGE_UNSHARE is specified the caller
will receive a private file descriptor table in case their file
descriptor table is currently shared.
For the case where the caller has requested all file descriptors to be
actually closed via e.g. close_range(3, ~0U, 0) the kernel knows that
the caller does not need any of the file descriptors anymore and will
optimize the close operation by only copying all files in the range from
0 to 3 and no others.

However, if the caller requested CLOSE_RANGE_CLOEXEC together with
CLOSE_RANGE_UNSHARE the caller wants to still make use of the file
descriptors so the kernel needs to copy all of them and can't optimize.

The original patch didn't account for this and thus could cause oopses
as evidenced by the syzbot report. Add tests for this regression.

We first create a huge gap in the fd table. When we now call
CLOSE_RANGE_UNSHARE with a shared fd table and and with ~0U as upper
bound the kernel will only copy up to fd1 file descriptors into the new
fd table. If the kernel is buggy and doesn't handle CLOSE_RANGE_CLOEXEC
correctly it will not have copied all file descriptors and we will oops!

This test passes on a fixed kernel and will trigger an oops on a buggy
kernel.

[1]: https://syzkaller.appspot.com/text?tag=KernelConfig&x=db720fe37a6a41d8

Cc: Giuseppe Scrivano <gscrivan@redhat.com>
Cc: linux-fsdevel@vger.kernel.org
Link: syzbot+96cfd2b22b3213646a93@syzkaller.appspotmail.com
Link: https://lore.kernel.org/r/20201218145415.801063-4-christian.brauner@ubuntu.comSigned-off-by: default avatarChristian Brauner <christian.brauner@ubuntu.com>
parent fe325c3f
...@@ -384,4 +384,187 @@ TEST(close_range_cloexec_unshare) ...@@ -384,4 +384,187 @@ TEST(close_range_cloexec_unshare)
} }
} }
/*
* Regression test for syzbot+96cfd2b22b3213646a93@syzkaller.appspotmail.com
*/
TEST(close_range_cloexec_syzbot)
{
int fd1, fd2, fd3, flags, ret, status;
pid_t pid;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
/* Create a huge gap in the fd table. */
fd1 = open("/dev/null", O_RDWR);
EXPECT_GT(fd1, 0);
fd2 = dup2(fd1, 1000);
EXPECT_GT(fd2, 0);
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_CLOEXEC);
if (ret)
exit(EXIT_FAILURE);
/*
* We now have a private file descriptor table and all
* our open fds should still be open but made
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
/*
* Duplicating the file descriptor must remove the
* FD_CLOEXEC flag.
*/
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* We had a shared file descriptor table before along with requesting
* close-on-exec so the original fds must not be close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
}
/*
* Regression test for syzbot+96cfd2b22b3213646a93@syzkaller.appspotmail.com
*/
TEST(close_range_cloexec_unshare_syzbot)
{
int i, fd1, fd2, fd3, flags, ret, status;
pid_t pid;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
/*
* Create a huge gap in the fd table. When we now call
* CLOSE_RANGE_UNSHARE with a shared fd table and and with ~0U as upper
* bound the kernel will only copy up to fd1 file descriptors into the
* new fd table. If the kernel is buggy and doesn't handle
* CLOSE_RANGE_CLOEXEC correctly it will not have copied all file
* descriptors and we will oops!
*
* On a buggy kernel this should immediately oops. But let's loop just
* to be sure.
*/
fd1 = open("/dev/null", O_RDWR);
EXPECT_GT(fd1, 0);
fd2 = dup2(fd1, 1000);
EXPECT_GT(fd2, 0);
for (i = 0; i < 100; i++) {
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = sys_close_range(3, ~0U, CLOSE_RANGE_UNSHARE |
CLOSE_RANGE_CLOEXEC);
if (ret)
exit(EXIT_FAILURE);
/*
* We now have a private file descriptor table and all
* our open fds should still be open but made
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, FD_CLOEXEC);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
/*
* Duplicating the file descriptor must remove the
* FD_CLOEXEC flag.
*/
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
/*
* We created a private file descriptor table before along with
* requesting close-on-exec so the original fds must not be
* close-on-exec.
*/
flags = fcntl(fd1, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
flags = fcntl(fd2, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
fd3 = dup2(fd1, 42);
EXPECT_GT(fd3, 0);
flags = fcntl(fd3, F_GETFD);
EXPECT_GT(flags, -1);
EXPECT_EQ(flags & FD_CLOEXEC, 0);
EXPECT_EQ(close(fd1), 0);
EXPECT_EQ(close(fd2), 0);
EXPECT_EQ(close(fd3), 0);
}
TEST_HARNESS_MAIN TEST_HARNESS_MAIN
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment