Commit 02d9fe1c authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Andrii Nakryiko

Merge branch 'add-bpf-lsm-return-value-range-check-bpf-part'

Xu Kuohai says:

====================
Add BPF LSM return value range check, BPF part

From: Xu Kuohai <xukuohai@huawei.com>

LSM BPF prog may make kernel panic when returning an unexpected value,
such as returning positive value on hook file_alloc_security.

To fix it, series [1] refactored LSM hook return values and added
BPF return value check on top of that. Since the refactoring of LSM
hooks and checking BPF prog return value patches is not closely related,
this series separates BPF-related patches from [1].

v2:
- Update Shung-Hsi's patch with [3]

v1: https://lore.kernel.org/bpf/20240719081749.769748-1-xukuohai@huaweicloud.com/

Changes to [1]:

1. Extend LSM disabled list to include hooks refactored in [1] to avoid
   dependency on the hooks return value refactoring patches.

2. Replace the special case patch for bitwise AND on [-1, 0] with Shung-Hsi's
   general bitwise AND improvement patch [2].

3. Remove unused patches.

[1] https://lore.kernel.org/bpf/20240711111908.3817636-1-xukuohai@huaweicloud.com
    https://lore.kernel.org/bpf/20240711113828.3818398-1-xukuohai@huaweicloud.com

[2] https://lore.kernel.org/bpf/ykuhustu7vt2ilwhl32kj655xfdgdlm2xkl5rff6tw2ycksovp@ss2n4gpjysnw

[3] https://lore.kernel.org/bpf/20240719081702.137173-1-shung-hsi.yu@suse.com/

Shung-Hsi Yu (1):
  bpf, verifier: improve signed ranges inference for BPF_AND
====================

Link: https://lore.kernel.org/r/20240719110059.797546-1-xukuohai@huaweicloud.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents e2854bc3 04d8243b
......@@ -294,6 +294,7 @@ struct bpf_map {
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
const struct btf_type *attach_func_proto;
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
......@@ -927,6 +928,7 @@ struct bpf_insn_access_aux {
};
};
struct bpf_verifier_log *log; /* for verbose logs */
bool is_retval; /* is accessing function return value ? */
};
static inline void
......
......@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
......@@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
struct bpf_retval_range *range);
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
......@@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
{
}
static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
struct bpf_retval_range *range)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
......@@ -11,7 +11,6 @@
#include <linux/lsm_hooks.h>
#include <linux/bpf_lsm.h>
#include <linux/kallsyms.h>
#include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h>
#include <linux/btf_ids.h>
......@@ -36,6 +35,24 @@ BTF_SET_START(bpf_lsm_hooks)
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)
BTF_SET_START(bpf_lsm_disabled_hooks)
BTF_ID(func, bpf_lsm_vm_enough_memory)
BTF_ID(func, bpf_lsm_inode_need_killpriv)
BTF_ID(func, bpf_lsm_inode_getsecurity)
BTF_ID(func, bpf_lsm_inode_listsecurity)
BTF_ID(func, bpf_lsm_inode_copy_up_xattr)
BTF_ID(func, bpf_lsm_getselfattr)
BTF_ID(func, bpf_lsm_getprocattr)
BTF_ID(func, bpf_lsm_setprocattr)
#ifdef CONFIG_KEYS
BTF_ID(func, bpf_lsm_key_getsecurity)
#endif
#ifdef CONFIG_AUDIT
BTF_ID(func, bpf_lsm_audit_rule_match)
#endif
BTF_ID(func, bpf_lsm_ismaclabel)
BTF_SET_END(bpf_lsm_disabled_hooks)
/* List of LSM hooks that should operate on 'current' cgroup regardless
* of function signature.
*/
......@@ -97,15 +114,24 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
u32 btf_id = prog->aux->attach_btf_id;
const char *func_name = prog->aux->attach_func_name;
if (!prog->gpl_compatible) {
bpf_log(vlog,
"LSM programs must have a GPL compatible license\n");
return -EINVAL;
}
if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n",
btf_id, func_name);
return -EINVAL;
}
if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
prog->aux->attach_btf_id, prog->aux->attach_func_name);
btf_id, func_name);
return -EINVAL;
}
......@@ -390,3 +416,36 @@ const struct bpf_verifier_ops lsm_verifier_ops = {
.get_func_proto = bpf_lsm_func_proto,
.is_valid_access = btf_ctx_access,
};
/* hooks return 0 or 1 */
BTF_SET_START(bool_lsm_hooks)
#ifdef CONFIG_SECURITY_NETWORK_XFRM
BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match)
#endif
#ifdef CONFIG_AUDIT
BTF_ID(func, bpf_lsm_audit_rule_known)
#endif
BTF_ID(func, bpf_lsm_inode_xattr_skipcap)
BTF_SET_END(bool_lsm_hooks)
int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
struct bpf_retval_range *retval_range)
{
/* no return value range for void hooks */
if (!prog->aux->attach_func_proto->type)
return -EINVAL;
if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) {
retval_range->minval = 0;
retval_range->maxval = 1;
} else {
/* All other available LSM hooks, except task_prctl, return 0
* on success and negative error code on failure.
* To keep things simple, we only allow bpf progs to return 0
* or negative errno for task_prctl too.
*/
retval_range->minval = -MAX_ERRNO;
retval_range->maxval = 0;
}
return 0;
}
......@@ -6416,8 +6416,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (arg == nr_args) {
switch (prog->expected_attach_type) {
case BPF_LSM_CGROUP:
case BPF_LSM_MAC:
/* mark we are accessing the return value */
info->is_retval = true;
fallthrough;
case BPF_LSM_CGROUP:
case BPF_TRACE_FEXIT:
/* When LSM programs are attached to void LSM hooks
* they use FEXIT trampolines and when attached to
......
......@@ -2302,6 +2302,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
{
enum bpf_prog_type prog_type = resolve_prog_type(fp);
bool ret;
struct bpf_prog_aux *aux = fp->aux;
if (fp->kprobe_override)
return false;
......@@ -2311,7 +2312,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
* in the case of devmap and cpumap). Until device checks
* are implemented, prohibit adding dev-bound programs to program maps.
*/
if (bpf_prog_is_dev_bound(fp->aux))
if (bpf_prog_is_dev_bound(aux))
return false;
spin_lock(&map->owner.lock);
......@@ -2321,12 +2322,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
*/
map->owner.type = prog_type;
map->owner.jited = fp->jited;
map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
map->owner.xdp_has_frags = aux->xdp_has_frags;
map->owner.attach_func_proto = aux->attach_func_proto;
ret = true;
} else {
ret = map->owner.type == prog_type &&
map->owner.jited == fp->jited &&
map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
map->owner.xdp_has_frags == aux->xdp_has_frags;
if (ret &&
map->owner.attach_func_proto != aux->attach_func_proto) {
switch (prog_type) {
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_LSM:
case BPF_PROG_TYPE_EXT:
case BPF_PROG_TYPE_STRUCT_OPS:
ret = false;
break;
default:
break;
}
}
}
spin_unlock(&map->owner.lock);
......
......@@ -2334,6 +2334,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
__mark_reg_unknown(env, regs + regno);
}
static int __mark_reg_s32_range(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
u32 regno,
s32 s32_min,
s32 s32_max)
{
struct bpf_reg_state *reg = regs + regno;
reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min);
reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max);
reg->smin_value = max_t(s64, reg->smin_value, s32_min);
reg->smax_value = min_t(s64, reg->smax_value, s32_max);
reg_bounds_sync(reg);
return reg_bounds_sanity_check(env, reg, "s32_range");
}
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
{
......@@ -5607,11 +5626,12 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type,
struct btf **btf, u32 *btf_id)
struct btf **btf, u32 *btf_id, bool *is_retval)
{
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
.log = &env->log,
.is_retval = false,
};
if (env->ops->is_valid_access &&
......@@ -5624,6 +5644,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
* type of narrower access.
*/
*reg_type = info.reg_type;
*is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) {
*btf = info.btf;
......@@ -6792,6 +6813,17 @@ static int check_stack_access_within_bounds(
return grow_stack_state(env, state, -min_off /* size */);
}
static bool get_func_retval_range(struct bpf_prog *prog,
struct bpf_retval_range *range)
{
if (prog->type == BPF_PROG_TYPE_LSM &&
prog->expected_attach_type == BPF_LSM_MAC &&
!bpf_lsm_get_retval_range(prog, range)) {
return true;
}
return false;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
......@@ -6896,6 +6928,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
mark_reg_unknown(env, regs, value_regno);
} else if (reg->type == PTR_TO_CTX) {
bool is_retval = false;
struct bpf_retval_range range;
enum bpf_reg_type reg_type = SCALAR_VALUE;
struct btf *btf = NULL;
u32 btf_id = 0;
......@@ -6911,7 +6945,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return err;
err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
&btf_id);
&btf_id, &is_retval);
if (err)
verbose_linfo(env, insn_idx, "; ");
if (!err && t == BPF_READ && value_regno >= 0) {
......@@ -6920,7 +6954,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
* case, we know the offset is zero.
*/
if (reg_type == SCALAR_VALUE) {
mark_reg_unknown(env, regs, value_regno);
if (is_retval && get_func_retval_range(env->prog, &range)) {
err = __mark_reg_s32_range(env, regs, value_regno,
range.minval, range.maxval);
if (err)
return err;
} else {
mark_reg_unknown(env, regs, value_regno);
}
} else {
mark_reg_known_zero(env, regs,
value_regno);
......@@ -9943,9 +9984,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
}
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg,
bool return_32bit)
{
return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
if (return_32bit)
return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval;
else
return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
......@@ -9982,8 +10027,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
if (err)
return err;
/* enforce R0 return value range */
if (!retval_range_within(callee->callback_ret_range, r0)) {
/* enforce R0 return value range, and bpf_callback_t returns 64bit */
if (!retval_range_within(callee->callback_ret_range, r0, false)) {
verbose_invalid_scalar(env, r0, callee->callback_ret_range,
"At callback return", "R0");
return -EINVAL;
......@@ -15657,6 +15702,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
const bool is_subprog = frame->subprogno;
bool return_32bit = false;
/* LSM and struct_ops func-ptr's return type could be "void" */
if (!is_subprog || frame->in_exception_callback_fn) {
......@@ -15762,12 +15808,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_LSM:
if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
/* Regular BPF_PROG_TYPE_LSM programs can return
* any value.
*/
return 0;
}
if (!env->prog->aux->attach_func_proto->type) {
/* no range found, any return value is allowed */
if (!get_func_retval_range(env->prog, &range))
return 0;
/* no restricted range, any return value is allowed */
if (range.minval == S32_MIN && range.maxval == S32_MAX)
return 0;
return_32bit = true;
} else if (!env->prog->aux->attach_func_proto->type) {
/* Make sure programs that attach to void
* hooks don't try to modify return value.
*/
......@@ -15797,7 +15845,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
if (err)
return err;
if (!retval_range_within(range, reg)) {
if (!retval_range_within(range, reg, return_32bit)) {
verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
if (!is_subprog &&
prog->expected_attach_type == BPF_LSM_CGROUP &&
......
......@@ -12,6 +12,7 @@
#include <stdlib.h>
#include "lsm.skel.h"
#include "lsm_tailcall.skel.h"
char *CMD_ARGS[] = {"true", NULL};
......@@ -95,7 +96,7 @@ static int test_lsm(struct lsm *skel)
return 0;
}
void test_test_lsm(void)
static void test_lsm_basic(void)
{
struct lsm *skel = NULL;
int err;
......@@ -114,3 +115,46 @@ void test_test_lsm(void)
close_prog:
lsm__destroy(skel);
}
static void test_lsm_tailcall(void)
{
struct lsm_tailcall *skel = NULL;
int map_fd, prog_fd;
int err, key;
skel = lsm_tailcall__open_and_load();
if (!ASSERT_OK_PTR(skel, "lsm_tailcall__skel_load"))
goto close_prog;
map_fd = bpf_map__fd(skel->maps.jmp_table);
if (CHECK_FAIL(map_fd < 0))
goto close_prog;
prog_fd = bpf_program__fd(skel->progs.lsm_file_permission_prog);
if (CHECK_FAIL(prog_fd < 0))
goto close_prog;
key = 0;
err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
if (CHECK_FAIL(!err))
goto close_prog;
prog_fd = bpf_program__fd(skel->progs.lsm_file_alloc_security_prog);
if (CHECK_FAIL(prog_fd < 0))
goto close_prog;
err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
if (CHECK_FAIL(err))
goto close_prog;
close_prog:
lsm_tailcall__destroy(skel);
}
void test_test_lsm(void)
{
if (test__start_subtest("lsm_basic"))
test_lsm_basic();
if (test__start_subtest("lsm_tailcall"))
test_lsm_tailcall();
}
......@@ -88,6 +88,7 @@
#include "verifier_xdp.skel.h"
#include "verifier_xdp_direct_packet_access.skel.h"
#include "verifier_bits_iter.skel.h"
#include "verifier_lsm.skel.h"
#define MAX_ENTRIES 11
......@@ -206,6 +207,7 @@ void test_verifier_xadd(void) { RUN(verifier_xadd); }
void test_verifier_xdp(void) { RUN(verifier_xdp); }
void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); }
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
void test_verifier_lsm(void) { RUN(verifier_lsm); }
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
......
......@@ -5,6 +5,16 @@
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO
#define __STR(x) #x
#define set_if_not_errno_or_zero(x, y) \
({ \
asm volatile ("if %0 s< -4095 goto +1\n" \
"if %0 s<= 0 goto +1\n" \
"%0 = " __STR(y) "\n" \
: "+r"(x)); \
})
static inline int IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Huawei Technologies Co., Ltd */
#include "vmlinux.h"
#include <errno.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
SEC("lsm/file_permission")
int lsm_file_permission_prog(void *ctx)
{
return 0;
}
SEC("lsm/file_alloc_security")
int lsm_file_alloc_security_prog(void *ctx)
{
return 0;
}
SEC("lsm/file_alloc_security")
int lsm_file_alloc_security_entry(void *ctx)
{
bpf_tail_call_static(ctx, &jmp_table, 0);
return 0;
}
......@@ -6,6 +6,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_kfuncs.h"
#include "err.h"
char _license[] SEC("license") = "GPL";
......@@ -79,5 +80,8 @@ int BPF_PROG(test_file_open, struct file *f)
ret = bpf_verify_pkcs7_signature(&digest_ptr, &sig_ptr, trusted_keyring);
bpf_key_put(trusted_keyring);
set_if_not_errno_or_zero(ret, -EFAULT);
return ret;
}
......@@ -11,6 +11,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_kfuncs.h"
#include "err.h"
#define MAX_DATA_SIZE (1024 * 1024)
#define MAX_SIG_SIZE 1024
......@@ -55,12 +56,12 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
ret = bpf_probe_read_kernel(&value, sizeof(value), &attr->value);
if (ret)
return ret;
goto out;
ret = bpf_copy_from_user(data_val, sizeof(struct data),
(void *)(unsigned long)value);
if (ret)
return ret;
goto out;
if (data_val->data_len > sizeof(data_val->data))
return -EINVAL;
......@@ -84,5 +85,8 @@ int BPF_PROG(bpf, int cmd, union bpf_attr *attr, unsigned int size)
bpf_key_put(trusted_keyring);
out:
set_if_not_errno_or_zero(ret, -EFAULT);
return ret;
}
......@@ -8,8 +8,8 @@
char _license[] SEC("license") = "GPL";
int my_pid;
bool reject_capable;
bool reject_cmd;
int reject_capable;
int reject_cmd;
SEC("lsm/bpf_token_capable")
int BPF_PROG(token_capable, struct bpf_token *token, int cap)
......
......@@ -7,6 +7,7 @@
#include "bpf_misc.h"
#include "xdp_metadata.h"
#include "bpf_kfuncs.h"
#include "err.h"
/* The compiler may be able to detect the access to uninitialized
memory in the routines performing out of bound memory accesses and
......@@ -331,7 +332,11 @@ SEC("?lsm/bpf")
__success __log_level(2)
int BPF_PROG(arg_tag_ctx_lsm)
{
return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
int ret;
ret = tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
set_if_not_errno_or_zero(ret, -1);
return ret;
}
SEC("?struct_ops/test_1")
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("lsm/file_alloc_security")
__description("lsm bpf prog with -4095~0 retval. test 1")
__success
__naked int errno_zero_retval_test1(void *ctx)
{
asm volatile (
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_alloc_security")
__description("lsm bpf prog with -4095~0 retval. test 2")
__success
__naked int errno_zero_retval_test2(void *ctx)
{
asm volatile (
"r0 = -4095;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_mprotect")
__description("lsm bpf prog with -4095~0 retval. test 4")
__failure __msg("R0 has smin=-4096 smax=-4096 should have been in [-4095, 0]")
__naked int errno_zero_retval_test4(void *ctx)
{
asm volatile (
"r0 = -4096;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_mprotect")
__description("lsm bpf prog with -4095~0 retval. test 5")
__failure __msg("R0 has smin=4096 smax=4096 should have been in [-4095, 0]")
__naked int errno_zero_retval_test5(void *ctx)
{
asm volatile (
"r0 = 4096;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_mprotect")
__description("lsm bpf prog with -4095~0 retval. test 6")
__failure __msg("R0 has smin=1 smax=1 should have been in [-4095, 0]")
__naked int errno_zero_retval_test6(void *ctx)
{
asm volatile (
"r0 = 1;"
"exit;"
::: __clobber_all);
}
SEC("lsm/audit_rule_known")
__description("lsm bpf prog with bool retval. test 1")
__success
__naked int bool_retval_test1(void *ctx)
{
asm volatile (
"r0 = 1;"
"exit;"
::: __clobber_all);
}
SEC("lsm/audit_rule_known")
__description("lsm bpf prog with bool retval. test 2")
__success
__success
__naked int bool_retval_test2(void *ctx)
{
asm volatile (
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("lsm/audit_rule_known")
__description("lsm bpf prog with bool retval. test 3")
__failure __msg("R0 has smin=-1 smax=-1 should have been in [0, 1]")
__naked int bool_retval_test3(void *ctx)
{
asm volatile (
"r0 = -1;"
"exit;"
::: __clobber_all);
}
SEC("lsm/audit_rule_known")
__description("lsm bpf prog with bool retval. test 4")
__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]")
__naked int bool_retval_test4(void *ctx)
{
asm volatile (
"r0 = 2;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_free_security")
__success
__description("lsm bpf prog with void retval. test 1")
__naked int void_retval_test1(void *ctx)
{
asm volatile (
"r0 = -4096;"
"exit;"
::: __clobber_all);
}
SEC("lsm/file_free_security")
__success
__description("lsm bpf prog with void retval. test 2")
__naked int void_retval_test2(void *ctx)
{
asm volatile (
"r0 = 4096;"
"exit;"
::: __clobber_all);
}
SEC("lsm/getprocattr")
__description("lsm disabled hook: getprocattr")
__failure __msg("points to disabled hook")
__naked int disabled_hook_test1(void *ctx)
{
asm volatile (
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("lsm/setprocattr")
__description("lsm disabled hook: setprocattr")
__failure __msg("points to disabled hook")
__naked int disabled_hook_test2(void *ctx)
{
asm volatile (
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC("lsm/ismaclabel")
__description("lsm disabled hook: ismaclabel")
__failure __msg("points to disabled hook")
__naked int disabled_hook_test3(void *ctx)
{
asm volatile (
"r0 = 0;"
"exit;"
::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment