Commit 72fda6c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'execve-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull execve updates from Kees Cook:

 - Use value of kernel.randomize_va_space once per exec (Alexey
   Dobriyan)

 - Honor PT_LOAD alignment for static PIE

 - Make bprm->argmin only visible under CONFIG_MMU

 - Add KUnit testing of bprm_stack_limits()

* tag 'execve-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  exec: Avoid pathological argc, envc, and bprm->p values
  execve: Keep bprm->argmin behind CONFIG_MMU
  ELF: fix kernel.randomize_va_space double read
  exec: Add KUnit test for bprm_stack_limits()
  binfmt_elf: Honor PT_LOAD alignment for static PIE
  binfmt_elf: Calculate total_size earlier
  selftests/exec: Build both static and non-static load_address tests
parents f83e38fc 21f93108
...@@ -8285,7 +8285,9 @@ S: Supported ...@@ -8285,7 +8285,9 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
F: Documentation/userspace-api/ELF.rst F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c F: fs/*binfmt_*.c
F: fs/Kconfig.binfmt
F: fs/exec.c F: fs/exec.c
F: fs/exec_test.c
F: include/linux/binfmts.h F: include/linux/binfmts.h
F: include/linux/elf.h F: include/linux/elf.h
F: include/uapi/linux/binfmts.h F: include/uapi/linux/binfmts.h
......
...@@ -176,4 +176,12 @@ config COREDUMP ...@@ -176,4 +176,12 @@ config COREDUMP
certainly want to say Y here. Not necessary on systems that never certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code. need debugging or only ever run flawless code.
config EXEC_KUNIT_TEST
bool "Build execve tests" if !KUNIT_ALL_TESTS
depends on KUNIT=y
default KUNIT_ALL_TESTS
help
This builds the exec KUnit tests, which tests boundary conditions
of various aspects of the exec internals.
endmenu endmenu
...@@ -1003,7 +1003,8 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1003,7 +1003,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (elf_read_implies_exec(*elf_ex, executable_stack)) if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC; current->personality |= READ_IMPLIES_EXEC;
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) const int snapshot_randomize_va_space = READ_ONCE(randomize_va_space);
if (!(current->personality & ADDR_NO_RANDOMIZE) && snapshot_randomize_va_space)
current->flags |= PF_RANDOMIZE; current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm); setup_new_exec(bprm);
...@@ -1061,10 +1062,40 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1061,10 +1062,40 @@ static int load_elf_binary(struct linux_binprm *bprm)
* Header for ET_DYN binaries to calculate the * Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD * randomization (load_bias) for all the LOAD
* Program Headers. * Program Headers.
*/
/*
* Calculate the entire size of the ELF mapping
* (total_size), used for the initial mapping,
* due to load_addr_set which is set to true later
* once the initial mapping is performed.
*
* Note that this is only sensible when the LOAD
* segments are contiguous (or overlapping). If
* used for LOADs that are far apart, this would
* cause the holes between LOADs to be mapped,
* running the risk of having the mapping fail,
* as it would be larger than the ELF file itself.
* *
* As a result, only ET_DYN does this, since
* some ET_EXEC (e.g. ia64) may have large virtual
* memory holes between LOADs.
*
*/
total_size = total_mapping_size(elf_phdata,
elf_ex->e_phnum);
if (!total_size) {
retval = -EINVAL;
goto out_free_dentry;
}
/* Calculate any requested alignment. */
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
/*
* There are effectively two types of ET_DYN * There are effectively two types of ET_DYN
* binaries: programs (i.e. PIE: ET_DYN with INTERP) * binaries: programs (i.e. PIE: ET_DYN with PT_INTERP)
* and loaders (ET_DYN without INTERP, since they * and loaders (ET_DYN without PT_INTERP, since they
* _are_ the ELF interpreter). The loaders must * _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program * be loaded away from programs since the program
* may otherwise collide with the loader (especially * may otherwise collide with the loader (especially
...@@ -1084,15 +1115,44 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1084,15 +1115,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
* without MAP_FIXED nor MAP_FIXED_NOREPLACE). * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/ */
if (interpreter) { if (interpreter) {
/* On ET_DYN with PT_INTERP, we do the ASLR. */
load_bias = ELF_ET_DYN_BASE; load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE) if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd(); load_bias += arch_mmap_rnd();
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); /* Adjust alignment as requested. */
if (alignment) if (alignment)
load_bias &= ~(alignment - 1); load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE; elf_flags |= MAP_FIXED_NOREPLACE;
} else } else {
load_bias = 0; /*
* For ET_DYN without PT_INTERP, we rely on
* the architectures's (potentially ASLR) mmap
* base address (via a load_bias of 0).
*
* When a large alignment is requested, we
* must do the allocation at address "0" right
* now to discover where things will load so
* that we can adjust the resulting alignment.
* In this case (load_bias != 0), we can use
* MAP_FIXED_NOREPLACE to make sure the mapping
* doesn't collide with anything.
*/
if (alignment > ELF_MIN_ALIGN) {
load_bias = elf_load(bprm->file, 0, elf_ppnt,
elf_prot, elf_flags, total_size);
if (BAD_ADDR(load_bias)) {
retval = IS_ERR_VALUE(load_bias) ?
PTR_ERR((void*)load_bias) : -EINVAL;
goto out_free_dentry;
}
vm_munmap(load_bias, total_size);
/* Adjust alignment as requested. */
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
} else
load_bias = 0;
}
/* /*
* Since load_bias is used for all subsequent loading * Since load_bias is used for all subsequent loading
...@@ -1102,31 +1162,6 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1102,31 +1162,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
* is then page aligned. * is then page aligned.
*/ */
load_bias = ELF_PAGESTART(load_bias - vaddr); load_bias = ELF_PAGESTART(load_bias - vaddr);
/*
* Calculate the entire size of the ELF mapping
* (total_size), used for the initial mapping,
* due to load_addr_set which is set to true later
* once the initial mapping is performed.
*
* Note that this is only sensible when the LOAD
* segments are contiguous (or overlapping). If
* used for LOADs that are far apart, this would
* cause the holes between LOADs to be mapped,
* running the risk of having the mapping fail,
* as it would be larger than the ELF file itself.
*
* As a result, only ET_DYN does this, since
* some ET_EXEC (e.g. ia64) may have large virtual
* memory holes between LOADs.
*
*/
total_size = total_mapping_size(elf_phdata,
elf_ex->e_phnum);
if (!total_size) {
retval = -EINVAL;
goto out_free_dentry;
}
} }
error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt,
...@@ -1250,7 +1285,7 @@ static int load_elf_binary(struct linux_binprm *bprm) ...@@ -1250,7 +1285,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
mm->end_data = end_data; mm->end_data = end_data;
mm->start_stack = bprm->p; mm->start_stack = bprm->p;
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) {
/* /*
* For architectures with ELF randomization, when executing * For architectures with ELF randomization, when executing
* a loader directly (i.e. no interpreter listed in ELF * a loader directly (i.e. no interpreter listed in ELF
......
...@@ -486,6 +486,35 @@ static int count_strings_kernel(const char *const *argv) ...@@ -486,6 +486,35 @@ static int count_strings_kernel(const char *const *argv)
return i; return i;
} }
static inline int bprm_set_stack_limit(struct linux_binprm *bprm,
unsigned long limit)
{
#ifdef CONFIG_MMU
/* Avoid a pathological bprm->p. */
if (bprm->p < limit)
return -E2BIG;
bprm->argmin = bprm->p - limit;
#endif
return 0;
}
static inline bool bprm_hit_stack_limit(struct linux_binprm *bprm)
{
#ifdef CONFIG_MMU
return bprm->p < bprm->argmin;
#else
return false;
#endif
}
/*
* Calculate bprm->argmin from:
* - _STK_LIM
* - ARG_MAX
* - bprm->rlim_stack.rlim_cur
* - bprm->argc
* - bprm->envc
* - bprm->p
*/
static int bprm_stack_limits(struct linux_binprm *bprm) static int bprm_stack_limits(struct linux_binprm *bprm)
{ {
unsigned long limit, ptr_size; unsigned long limit, ptr_size;
...@@ -505,6 +534,9 @@ static int bprm_stack_limits(struct linux_binprm *bprm) ...@@ -505,6 +534,9 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* of argument strings even with small stacks * of argument strings even with small stacks
*/ */
limit = max_t(unsigned long, limit, ARG_MAX); limit = max_t(unsigned long, limit, ARG_MAX);
/* Reject totally pathological counts. */
if (bprm->argc < 0 || bprm->envc < 0)
return -E2BIG;
/* /*
* We must account for the size of all the argv and envp pointers to * We must account for the size of all the argv and envp pointers to
* the argv and envp strings, since they will also take up space in * the argv and envp strings, since they will also take up space in
...@@ -518,13 +550,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm) ...@@ -518,13 +550,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm)
* argc can never be 0, to keep them from walking envp by accident. * argc can never be 0, to keep them from walking envp by accident.
* See do_execveat_common(). * See do_execveat_common().
*/ */
ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); if (check_add_overflow(max(bprm->argc, 1), bprm->envc, &ptr_size) ||
check_mul_overflow(ptr_size, sizeof(void *), &ptr_size))
return -E2BIG;
if (limit <= ptr_size) if (limit <= ptr_size)
return -E2BIG; return -E2BIG;
limit -= ptr_size; limit -= ptr_size;
bprm->argmin = bprm->p - limit; return bprm_set_stack_limit(bprm, limit);
return 0;
} }
/* /*
...@@ -562,10 +595,8 @@ static int copy_strings(int argc, struct user_arg_ptr argv, ...@@ -562,10 +595,8 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
pos = bprm->p; pos = bprm->p;
str += len; str += len;
bprm->p -= len; bprm->p -= len;
#ifdef CONFIG_MMU if (bprm_hit_stack_limit(bprm))
if (bprm->p < bprm->argmin)
goto out; goto out;
#endif
while (len > 0) { while (len > 0) {
int offset, bytes_to_copy; int offset, bytes_to_copy;
...@@ -640,7 +671,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm) ...@@ -640,7 +671,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
/* We're going to work our way backwards. */ /* We're going to work our way backwards. */
arg += len; arg += len;
bprm->p -= len; bprm->p -= len;
if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin) if (bprm_hit_stack_limit(bprm))
return -E2BIG; return -E2BIG;
while (len > 0) { while (len > 0) {
...@@ -2203,3 +2234,7 @@ static int __init init_fs_exec_sysctls(void) ...@@ -2203,3 +2234,7 @@ static int __init init_fs_exec_sysctls(void)
fs_initcall(init_fs_exec_sysctls); fs_initcall(init_fs_exec_sysctls);
#endif /* CONFIG_SYSCTL */ #endif /* CONFIG_SYSCTL */
#ifdef CONFIG_EXEC_KUNIT_TEST
#include "exec_test.c"
#endif
// SPDX-License-Identifier: GPL-2.0-only
#include <kunit/test.h>
struct bprm_stack_limits_result {
struct linux_binprm bprm;
int expected_rc;
unsigned long expected_argmin;
};
static const struct bprm_stack_limits_result bprm_stack_limits_results[] = {
/* Negative argc/envc counts produce -E2BIG */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = INT_MIN, .envc = INT_MIN }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 5, .envc = -1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = -1, .envc = 10 }, .expected_rc = -E2BIG },
/* The max value of argc or envc is MAX_ARG_STRINGS. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = INT_MAX, .envc = INT_MAX }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = MAX_ARG_STRINGS, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 0, .envc = MAX_ARG_STRINGS }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = MAX_ARG_STRINGS, .envc = 0 }, .expected_rc = -E2BIG },
/*
* On 32-bit system these argc and envc counts, while likely impossible
* to represent within the associated TASK_SIZE, could overflow the
* limit calculation, and bypass the ptr_size <= limit check.
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 0x20000001, .envc = 0x20000001 }, .expected_rc = -E2BIG },
#ifdef CONFIG_MMU
/* Make sure a pathological bprm->p doesn't cause an overflow. */
{ { .p = sizeof(void *), .rlim_stack.rlim_cur = ULONG_MAX,
.argc = 10, .envc = 10 }, .expected_rc = -E2BIG },
#endif
/*
* 0 rlim_stack will get raised to ARG_MAX. With 1 string pointer,
* we should see p - ARG_MAX + sizeof(void *).
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 1, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
/* Validate that argc is always raised to a minimum of 1. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = 0 }, .expected_argmin = ULONG_MAX - ARG_MAX + sizeof(void *)},
/*
* 0 rlim_stack will get raised to ARG_MAX. With pointers filling ARG_MAX,
* we should see -E2BIG. (Note argc is always raised to at least 1.)
*/
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
/* And with one less, we see space for exactly 1 pointer. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 0,
.argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* If we raise rlim_stack / 4 to exactly ARG_MAX, nothing changes. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = ARG_MAX / sizeof(void *), .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = ARG_MAX / sizeof(void *) + 1, .envc = 0 }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) }, .expected_rc = -E2BIG },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = (ARG_MAX / sizeof(void *)) - 1, .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = ARG_MAX * 4,
.argc = 0, .envc = (ARG_MAX / sizeof(void *)) - 2, },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* But raising it another pointer * 4 will provide space for 1 more pointer. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
.argc = ARG_MAX / sizeof(void *), .envc = 0 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = (ARG_MAX + sizeof(void *)) * 4,
.argc = 0, .envc = ARG_MAX / sizeof(void *) - 1 },
.expected_argmin = ULONG_MAX - sizeof(void *) },
/* Raising rlim_stack / 4 to _STK_LIM / 4 * 3 will see more space. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
/* But raising it any further will see no increase. */
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3 + sizeof(void *)),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * + sizeof(void *)),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
};
static void exec_test_bprm_stack_limits(struct kunit *test)
{
/* Double-check the constants. */
KUNIT_EXPECT_EQ(test, _STK_LIM, SZ_8M);
KUNIT_EXPECT_EQ(test, ARG_MAX, 32 * SZ_4K);
KUNIT_EXPECT_EQ(test, MAX_ARG_STRINGS, 0x7FFFFFFF);
for (int i = 0; i < ARRAY_SIZE(bprm_stack_limits_results); i++) {
const struct bprm_stack_limits_result *result = &bprm_stack_limits_results[i];
struct linux_binprm bprm = result->bprm;
int rc;
rc = bprm_stack_limits(&bprm);
KUNIT_EXPECT_EQ_MSG(test, rc, result->expected_rc, "on loop %d", i);
#ifdef CONFIG_MMU
KUNIT_EXPECT_EQ_MSG(test, bprm.argmin, result->expected_argmin, "on loop %d", i);
#endif
}
}
static struct kunit_case exec_test_cases[] = {
KUNIT_CASE(exec_test_bprm_stack_limits),
{},
};
static struct kunit_suite exec_test_suite = {
.name = "exec",
.test_cases = exec_test_cases,
};
kunit_test_suite(exec_test_suite);
...@@ -19,13 +19,13 @@ struct linux_binprm { ...@@ -19,13 +19,13 @@ struct linux_binprm {
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long vma_pages; unsigned long vma_pages;
unsigned long argmin; /* rlimit marker for copy_strings() */
#else #else
# define MAX_ARG_PAGES 32 # define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES]; struct page *page[MAX_ARG_PAGES];
#endif #endif
struct mm_struct *mm; struct mm_struct *mm;
unsigned long p; /* current top of mem */ unsigned long p; /* current top of mem */
unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int unsigned int
/* Should an execfd be passed to userspace? */ /* Should an execfd be passed to userspace? */
have_execfd:1, have_execfd:1,
......
...@@ -3,8 +3,13 @@ CFLAGS = -Wall ...@@ -3,8 +3,13 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE CFLAGS += -D_GNU_SOURCE
ALIGNS := 0x1000 0x200000 0x1000000
ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS))
ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS))
ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES)
TEST_PROGS := binfmt_script.py TEST_PROGS := binfmt_script.py
TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS)
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test # Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile TEST_FILES := Makefile
...@@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat ...@@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat
$(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
cp $< $@ cp $< $@
chmod -x $@ chmod -x $@
$(OUTPUT)/load_address_4096: load_address.c $(OUTPUT)/load_address.0x%: load_address.c
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@ $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
$(OUTPUT)/load_address_2097152: load_address.c -fPIE -pie $< -o $@
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@ $(OUTPUT)/load_address.static.0x%: load_address.c
$(OUTPUT)/load_address_16777216: load_address.c $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \
$(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@ -fPIE -static-pie $< -o $@
...@@ -5,11 +5,13 @@ ...@@ -5,11 +5,13 @@
#include <link.h> #include <link.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdbool.h>
#include "../kselftest.h" #include "../kselftest.h"
struct Statistics { struct Statistics {
unsigned long long load_address; unsigned long long load_address;
unsigned long long alignment; unsigned long long alignment;
bool interp;
}; };
int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
...@@ -26,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) ...@@ -26,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
stats->alignment = 0; stats->alignment = 0;
for (i = 0; i < info->dlpi_phnum; i++) { for (i = 0; i < info->dlpi_phnum; i++) {
unsigned long long align;
if (info->dlpi_phdr[i].p_type == PT_INTERP) {
stats->interp = true;
continue;
}
if (info->dlpi_phdr[i].p_type != PT_LOAD) if (info->dlpi_phdr[i].p_type != PT_LOAD)
continue; continue;
if (info->dlpi_phdr[i].p_align > stats->alignment) align = info->dlpi_phdr[i].p_align;
stats->alignment = info->dlpi_phdr[i].p_align;
if (align > stats->alignment)
stats->alignment = align;
} }
return 1; // Terminate dl_iterate_phdr. return 1; // Terminate dl_iterate_phdr.
...@@ -38,27 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) ...@@ -38,27 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct Statistics extracted; struct Statistics extracted = { };
unsigned long long misalign; unsigned long long misalign, pow2;
bool interp_needed;
char buf[1024];
FILE *maps;
int ret; int ret;
ksft_print_header(); ksft_print_header();
ksft_set_plan(1); ksft_set_plan(4);
/* Dump maps file for debugging reference. */
maps = fopen("/proc/self/maps", "r");
if (!maps)
ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno));
while (fgets(buf, sizeof(buf), maps)) {
ksft_print_msg("%s", buf);
}
fclose(maps);
/* Walk the program headers. */
ret = dl_iterate_phdr(ExtractStatistics, &extracted); ret = dl_iterate_phdr(ExtractStatistics, &extracted);
if (ret != 1) if (ret != 1)
ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n"); ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n");
if (extracted.alignment == 0) /* Report our findings. */
ksft_exit_fail_msg("FAILED: No alignment found\n"); ksft_print_msg("load_address=%#llx alignment=%#llx\n",
else if (extracted.alignment & (extracted.alignment - 1)) extracted.load_address, extracted.alignment);
ksft_exit_fail_msg("FAILED: Alignment is not a power of 2\n");
/* If we're named with ".static." we expect no INTERP. */
interp_needed = strstr(argv[0], ".static.") == NULL;
/* Were we built as expected? */
ksft_test_result(interp_needed == extracted.interp,
"%s INTERP program header %s\n",
interp_needed ? "Wanted" : "Unwanted",
extracted.interp ? "seen" : "missing");
/* Did we find an alignment? */
ksft_test_result(extracted.alignment != 0,
"Alignment%s found\n", extracted.alignment ? "" : " NOT");
/* Is the alignment sane? */
pow2 = extracted.alignment & (extracted.alignment - 1);
ksft_test_result(pow2 == 0,
"Alignment is%s a power of 2: %#llx\n",
pow2 == 0 ? "" : " NOT", extracted.alignment);
/* Is the load address aligned? */
misalign = extracted.load_address & (extracted.alignment - 1); misalign = extracted.load_address & (extracted.alignment - 1);
if (misalign) ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n",
ksft_exit_fail_msg("FAILED: alignment = %llu, load_address = %llu\n", misalign ? "MIS" : "", misalign);
extracted.alignment, extracted.load_address);
ksft_test_result_pass("Completed\n");
ksft_finished(); ksft_finished();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment