Commit 52578f7f authored by Martin KaFai Lau's avatar Martin KaFai Lau

Merge branch 'BPF crypto API framework'

Vadim Fedorenko says:

====================
This series introduces crypto kfuncs to make BPF programs able to
utilize kernel crypto subsystem. Crypto operations made pluggable to
avoid extensive growth of kernel when it's not needed. Only skcipher is
added within this series, but it can be easily extended to other types
of operations. No hardware offload supported as it needs sleepable
context which is not available for TX or XDP programs. At the same time
crypto context initialization kfunc can only run in sleepable context,
that's why it should be run separately and store the result in the map.

Selftests show the common way to implement crypto actions in BPF
programs. Benchmark is also added to have a baseline.
====================
Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parents 95c07d58 8000e627
......@@ -3822,6 +3822,14 @@ F: kernel/bpf/tnum.c
F: kernel/bpf/trampoline.c
F: kernel/bpf/verifier.c
BPF [CRYPTO]
M: Vadim Fedorenko <vadim.fedorenko@linux.dev>
L: bpf@vger.kernel.org
S: Maintained
F: crypto/bpf_crypto_skcipher.c
F: include/linux/bpf_crypto.h
F: kernel/bpf/crypto.c
BPF [DOCUMENTATION] (Related to Standardization)
R: David Vernet <void@manifault.com>
L: bpf@vger.kernel.org
......
......@@ -20,6 +20,9 @@ crypto_skcipher-y += lskcipher.o
crypto_skcipher-y += skcipher.o
obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
obj-$(CONFIG_CRYPTO_SKCIPHER2) += bpf_crypto_skcipher.o
endif
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2024 Meta, Inc */
#include <linux/types.h>
#include <linux/module.h>
#include <linux/bpf_crypto.h>
#include <crypto/skcipher.h>
static void *bpf_crypto_lskcipher_alloc_tfm(const char *algo)
{
return crypto_alloc_lskcipher(algo, 0, 0);
}
static void bpf_crypto_lskcipher_free_tfm(void *tfm)
{
crypto_free_lskcipher(tfm);
}
static int bpf_crypto_lskcipher_has_algo(const char *algo)
{
return crypto_has_skcipher(algo, CRYPTO_ALG_TYPE_LSKCIPHER, CRYPTO_ALG_TYPE_MASK);
}
static int bpf_crypto_lskcipher_setkey(void *tfm, const u8 *key, unsigned int keylen)
{
return crypto_lskcipher_setkey(tfm, key, keylen);
}
static u32 bpf_crypto_lskcipher_get_flags(void *tfm)
{
return crypto_lskcipher_get_flags(tfm);
}
static unsigned int bpf_crypto_lskcipher_ivsize(void *tfm)
{
return crypto_lskcipher_ivsize(tfm);
}
static unsigned int bpf_crypto_lskcipher_statesize(void *tfm)
{
return crypto_lskcipher_statesize(tfm);
}
static int bpf_crypto_lskcipher_encrypt(void *tfm, const u8 *src, u8 *dst,
unsigned int len, u8 *siv)
{
return crypto_lskcipher_encrypt(tfm, src, dst, len, siv);
}
static int bpf_crypto_lskcipher_decrypt(void *tfm, const u8 *src, u8 *dst,
unsigned int len, u8 *siv)
{
return crypto_lskcipher_decrypt(tfm, src, dst, len, siv);
}
static const struct bpf_crypto_type bpf_crypto_lskcipher_type = {
.alloc_tfm = bpf_crypto_lskcipher_alloc_tfm,
.free_tfm = bpf_crypto_lskcipher_free_tfm,
.has_algo = bpf_crypto_lskcipher_has_algo,
.setkey = bpf_crypto_lskcipher_setkey,
.encrypt = bpf_crypto_lskcipher_encrypt,
.decrypt = bpf_crypto_lskcipher_decrypt,
.ivsize = bpf_crypto_lskcipher_ivsize,
.statesize = bpf_crypto_lskcipher_statesize,
.get_flags = bpf_crypto_lskcipher_get_flags,
.owner = THIS_MODULE,
.name = "skcipher",
};
static int __init bpf_crypto_skcipher_init(void)
{
return bpf_crypto_register_type(&bpf_crypto_lskcipher_type);
}
static void __exit bpf_crypto_skcipher_exit(void)
{
int err = bpf_crypto_unregister_type(&bpf_crypto_lskcipher_type);
WARN_ON_ONCE(err);
}
module_init(bpf_crypto_skcipher_init);
module_exit(bpf_crypto_skcipher_exit);
MODULE_LICENSE("GPL");
......@@ -1275,6 +1275,7 @@ int bpf_dynptr_check_size(u32 size);
u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#ifndef _BPF_CRYPTO_H
#define _BPF_CRYPTO_H
struct bpf_crypto_type {
void *(*alloc_tfm)(const char *algo);
void (*free_tfm)(void *tfm);
int (*has_algo)(const char *algo);
int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
int (*setauthsize)(void *tfm, unsigned int authsize);
int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
unsigned int (*ivsize)(void *tfm);
unsigned int (*statesize)(void *tfm);
u32 (*get_flags)(void *tfm);
struct module *owner;
char name[14];
};
int bpf_crypto_register_type(const struct bpf_crypto_type *type);
int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
#endif /* _BPF_CRYPTO_H */
......@@ -44,6 +44,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
ifeq ($(CONFIG_CRYPTO),y)
obj-$(CONFIG_BPF_SYSCALL) += crypto.o
endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
......
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2024 Meta, Inc */
#include <linux/bpf.h>
#include <linux/bpf_crypto.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <crypto/skcipher.h>
struct bpf_crypto_type_list {
const struct bpf_crypto_type *type;
struct list_head list;
};
/* BPF crypto initialization parameters struct */
/**
* struct bpf_crypto_params - BPF crypto initialization parameters structure
* @type: The string of crypto operation type.
* @reserved: Reserved member, will be reused for more options in future
* Values:
* 0
* @algo: The string of algorithm to initialize.
* @key: The cipher key used to init crypto algorithm.
* @key_len: The length of cipher key.
* @authsize: The length of authentication tag used by algorithm.
*/
struct bpf_crypto_params {
char type[14];
u8 reserved[2];
char algo[128];
u8 key[256];
u32 key_len;
u32 authsize;
};
static LIST_HEAD(bpf_crypto_types);
static DECLARE_RWSEM(bpf_crypto_types_sem);
/**
* struct bpf_crypto_ctx - refcounted BPF crypto context structure
* @type: The pointer to bpf crypto type
* @tfm: The pointer to instance of crypto API struct.
* @siv_len: Size of IV and state storage for cipher
* @rcu: The RCU head used to free the crypto context with RCU safety.
* @usage: Object reference counter. When the refcount goes to 0, the
* memory is released back to the BPF allocator, which provides
* RCU safety.
*/
struct bpf_crypto_ctx {
const struct bpf_crypto_type *type;
void *tfm;
u32 siv_len;
struct rcu_head rcu;
refcount_t usage;
};
int bpf_crypto_register_type(const struct bpf_crypto_type *type)
{
struct bpf_crypto_type_list *node;
int err = -EEXIST;
down_write(&bpf_crypto_types_sem);
list_for_each_entry(node, &bpf_crypto_types, list) {
if (!strcmp(node->type->name, type->name))
goto unlock;
}
node = kmalloc(sizeof(*node), GFP_KERNEL);
err = -ENOMEM;
if (!node)
goto unlock;
node->type = type;
list_add(&node->list, &bpf_crypto_types);
err = 0;
unlock:
up_write(&bpf_crypto_types_sem);
return err;
}
EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
{
struct bpf_crypto_type_list *node;
int err = -ENOENT;
down_write(&bpf_crypto_types_sem);
list_for_each_entry(node, &bpf_crypto_types, list) {
if (strcmp(node->type->name, type->name))
continue;
list_del(&node->list);
kfree(node);
err = 0;
break;
}
up_write(&bpf_crypto_types_sem);
return err;
}
EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
{
const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
struct bpf_crypto_type_list *node;
down_read(&bpf_crypto_types_sem);
list_for_each_entry(node, &bpf_crypto_types, list) {
if (strcmp(node->type->name, name))
continue;
if (try_module_get(node->type->owner))
type = node->type;
break;
}
up_read(&bpf_crypto_types_sem);
return type;
}
__bpf_kfunc_start_defs();
/**
* bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
*
* Allocates a crypto context that can be used, acquired, and released by
* a BPF program. The crypto context returned by this function must either
* be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
* As crypto API functions use GFP_KERNEL allocations, this function can
* only be used in sleepable BPF programs.
*
* bpf_crypto_ctx_create() allocates memory for crypto context.
* It may return NULL if no memory is available.
* @params: pointer to struct bpf_crypto_params which contains all the
* details needed to initialise crypto context.
* @params__sz: size of steuct bpf_crypto_params usef by bpf program
* @err: integer to store error code when NULL is returned.
*/
__bpf_kfunc struct bpf_crypto_ctx *
bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz,
int *err)
{
const struct bpf_crypto_type *type;
struct bpf_crypto_ctx *ctx;
if (!params || params->reserved[0] || params->reserved[1] ||
params__sz != sizeof(struct bpf_crypto_params)) {
*err = -EINVAL;
return NULL;
}
type = bpf_crypto_get_type(params->type);
if (IS_ERR(type)) {
*err = PTR_ERR(type);
return NULL;
}
if (!type->has_algo(params->algo)) {
*err = -EOPNOTSUPP;
goto err_module_put;
}
if (!!params->authsize ^ !!type->setauthsize) {
*err = -EOPNOTSUPP;
goto err_module_put;
}
if (!params->key_len || params->key_len > sizeof(params->key)) {
*err = -EINVAL;
goto err_module_put;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
*err = -ENOMEM;
goto err_module_put;
}
ctx->type = type;
ctx->tfm = type->alloc_tfm(params->algo);
if (IS_ERR(ctx->tfm)) {
*err = PTR_ERR(ctx->tfm);
goto err_free_ctx;
}
if (params->authsize) {
*err = type->setauthsize(ctx->tfm, params->authsize);
if (*err)
goto err_free_tfm;
}
*err = type->setkey(ctx->tfm, params->key, params->key_len);
if (*err)
goto err_free_tfm;
if (type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY) {
*err = -EINVAL;
goto err_free_tfm;
}
ctx->siv_len = type->ivsize(ctx->tfm) + type->statesize(ctx->tfm);
refcount_set(&ctx->usage, 1);
return ctx;
err_free_tfm:
type->free_tfm(ctx->tfm);
err_free_ctx:
kfree(ctx);
err_module_put:
module_put(type->owner);
return NULL;
}
static void crypto_free_cb(struct rcu_head *head)
{
struct bpf_crypto_ctx *ctx;
ctx = container_of(head, struct bpf_crypto_ctx, rcu);
ctx->type->free_tfm(ctx->tfm);
module_put(ctx->type->owner);
kfree(ctx);
}
/**
* bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
* @ctx: The BPF crypto context being acquired. The ctx must be a trusted
* pointer.
*
* Acquires a reference to a BPF crypto context. The context returned by this function
* must either be embedded in a map as a kptr, or freed with
* bpf_crypto_ctx_release().
*/
__bpf_kfunc struct bpf_crypto_ctx *
bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
{
if (!refcount_inc_not_zero(&ctx->usage))
return NULL;
return ctx;
}
/**
* bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
* @ctx: The crypto context being released.
*
* Releases a previously acquired reference to a BPF crypto context. When the final
* reference of the BPF crypto context has been released, its memory
* will be released.
*/
__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
{
if (refcount_dec_and_test(&ctx->usage))
call_rcu(&ctx->rcu, crypto_free_cb);
}
static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
const struct bpf_dynptr_kern *src,
const struct bpf_dynptr_kern *dst,
const struct bpf_dynptr_kern *siv,
bool decrypt)
{
u32 src_len, dst_len, siv_len;
const u8 *psrc;
u8 *pdst, *piv;
int err;
if (__bpf_dynptr_is_rdonly(dst))
return -EINVAL;
siv_len = __bpf_dynptr_size(siv);
src_len = __bpf_dynptr_size(src);
dst_len = __bpf_dynptr_size(dst);
if (!src_len || !dst_len)
return -EINVAL;
if (siv_len != ctx->siv_len)
return -EINVAL;
psrc = __bpf_dynptr_data(src, src_len);
if (!psrc)
return -EINVAL;
pdst = __bpf_dynptr_data_rw(dst, dst_len);
if (!pdst)
return -EINVAL;
piv = siv_len ? __bpf_dynptr_data_rw(siv, siv_len) : NULL;
if (siv_len && !piv)
return -EINVAL;
err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
: ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
return err;
}
/**
* bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
* @ctx: The crypto context being used. The ctx must be a trusted pointer.
* @src: bpf_dynptr to the encrypted data. Must be a trusted pointer.
* @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
* @siv: bpf_dynptr to IV data and state data to be used by decryptor.
*
* Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
*/
__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
const struct bpf_dynptr_kern *src,
const struct bpf_dynptr_kern *dst,
const struct bpf_dynptr_kern *siv)
{
return bpf_crypto_crypt(ctx, src, dst, siv, true);
}
/**
* bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
* @ctx: The crypto context being used. The ctx must be a trusted pointer.
* @src: bpf_dynptr to the plain data. Must be a trusted pointer.
* @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
* @siv: bpf_dynptr to IV data and state data to be used by decryptor.
*
* Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
*/
__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
const struct bpf_dynptr_kern *src,
const struct bpf_dynptr_kern *dst,
const struct bpf_dynptr_kern *siv)
{
return bpf_crypto_crypt(ctx, src, dst, siv, false);
}
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(crypt_init_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_KFUNCS_END(crypt_init_kfunc_btf_ids)
static const struct btf_kfunc_id_set crypt_init_kfunc_set = {
.owner = THIS_MODULE,
.set = &crypt_init_kfunc_btf_ids,
};
BTF_KFUNCS_START(crypt_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU)
BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU)
BTF_KFUNCS_END(crypt_kfunc_btf_ids)
static const struct btf_kfunc_id_set crypt_kfunc_set = {
.owner = THIS_MODULE,
.set = &crypt_kfunc_btf_ids,
};
BTF_ID_LIST(bpf_crypto_dtor_ids)
BTF_ID(struct, bpf_crypto_ctx)
BTF_ID(func, bpf_crypto_ctx_release)
static int __init crypto_kfunc_init(void)
{
int ret;
const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = {
{
.btf_id = bpf_crypto_dtor_ids[0],
.kfunc_btf_id = bpf_crypto_dtor_ids[1]
},
};
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
&crypt_init_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors,
ARRAY_SIZE(bpf_crypto_dtors),
THIS_MODULE);
}
late_initcall(crypto_kfunc_init);
......@@ -1583,7 +1583,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
#define DYNPTR_SIZE_MASK 0xFFFFFF
#define DYNPTR_RDONLY_BIT BIT(31)
static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{
return ptr->size & DYNPTR_RDONLY_BIT;
}
......
......@@ -5310,6 +5310,7 @@ BTF_ID(struct, cgroup)
BTF_ID(struct, bpf_cpumask)
#endif
BTF_ID(struct, task_struct)
BTF_ID(struct, bpf_crypto_ctx)
BTF_SET_END(rcu_protected_types)
static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
......
......@@ -730,6 +730,7 @@ $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tas
$(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.skel.h
$(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h
$(OUTPUT)/bench_htab_mem.o: $(OUTPUT)/htab_mem_bench.skel.h
$(OUTPUT)/bench_bpf_crypto.o: $(OUTPUT)/crypto_bench.skel.h
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
$(OUTPUT)/bench: LDLIBS += -lm
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
......@@ -749,6 +750,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
$(OUTPUT)/bench_bpf_hashmap_lookup.o \
$(OUTPUT)/bench_local_storage_create.o \
$(OUTPUT)/bench_htab_mem.o \
$(OUTPUT)/bench_bpf_crypto.o \
#
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
......
......@@ -281,6 +281,7 @@ extern struct argp bench_hashmap_lookup_argp;
extern struct argp bench_local_storage_create_argp;
extern struct argp bench_htab_mem_argp;
extern struct argp bench_trigger_batch_argp;
extern struct argp bench_crypto_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
......@@ -294,6 +295,7 @@ static const struct argp_child bench_parsers[] = {
{ &bench_local_storage_create_argp, 0, "local-storage-create benchmark", 0 },
{ &bench_htab_mem_argp, 0, "hash map memory benchmark", 0 },
{ &bench_trigger_batch_argp, 0, "BPF triggering benchmark", 0 },
{ &bench_crypto_argp, 0, "bpf crypto benchmark", 0 },
{},
};
......@@ -538,6 +540,8 @@ extern const struct bench bench_local_storage_tasks_trace;
extern const struct bench bench_bpf_hashmap_lookup;
extern const struct bench bench_local_storage_create;
extern const struct bench bench_htab_mem;
extern const struct bench bench_crypto_encrypt;
extern const struct bench bench_crypto_decrypt;
static const struct bench *benchs[] = {
&bench_count_global,
......@@ -590,6 +594,8 @@ static const struct bench *benchs[] = {
&bench_bpf_hashmap_lookup,
&bench_local_storage_create,
&bench_htab_mem,
&bench_crypto_encrypt,
&bench_crypto_decrypt,
};
static void find_benchmark(void)
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <argp.h>
#include "bench.h"
#include "crypto_bench.skel.h"
#define MAX_CIPHER_LEN 32
static char *input;
static struct crypto_ctx {
struct crypto_bench *skel;
int pfd;
} ctx;
static struct crypto_args {
u32 crypto_len;
char *crypto_cipher;
} args = {
.crypto_len = 16,
.crypto_cipher = "ecb(aes)",
};
enum {
ARG_CRYPTO_LEN = 5000,
ARG_CRYPTO_CIPHER = 5001,
};
static const struct argp_option opts[] = {
{ "crypto-len", ARG_CRYPTO_LEN, "CRYPTO_LEN", 0,
"Set the length of crypto buffer" },
{ "crypto-cipher", ARG_CRYPTO_CIPHER, "CRYPTO_CIPHER", 0,
"Set the cipher to use (default:ecb(aes))" },
{},
};
static error_t crypto_parse_arg(int key, char *arg, struct argp_state *state)
{
switch (key) {
case ARG_CRYPTO_LEN:
args.crypto_len = strtoul(arg, NULL, 10);
if (!args.crypto_len ||
args.crypto_len > sizeof(ctx.skel->bss->dst)) {
fprintf(stderr, "Invalid crypto buffer len (limit %zu)\n",
sizeof(ctx.skel->bss->dst));
argp_usage(state);
}
break;
case ARG_CRYPTO_CIPHER:
args.crypto_cipher = strdup(arg);
if (!strlen(args.crypto_cipher) ||
strlen(args.crypto_cipher) > MAX_CIPHER_LEN) {
fprintf(stderr, "Invalid crypto cipher len (limit %d)\n",
MAX_CIPHER_LEN);
argp_usage(state);
}
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
const struct argp bench_crypto_argp = {
.options = opts,
.parser = crypto_parse_arg,
};
static void crypto_validate(void)
{
if (env.consumer_cnt != 0) {
fprintf(stderr, "bpf crypto benchmark doesn't support consumer!\n");
exit(1);
}
}
static void crypto_setup(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts);
int err, pfd;
size_t i, sz;
sz = args.crypto_len;
if (!sz || sz > sizeof(ctx.skel->bss->dst)) {
fprintf(stderr, "invalid encrypt buffer size (source %zu, target %zu)\n",
sz, sizeof(ctx.skel->bss->dst));
exit(1);
}
setup_libbpf();
ctx.skel = crypto_bench__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
snprintf(ctx.skel->bss->cipher, 128, "%s", args.crypto_cipher);
memcpy(ctx.skel->bss->key, "12345678testtest", 16);
ctx.skel->bss->key_len = 16;
ctx.skel->bss->authsize = 0;
srandom(time(NULL));
input = malloc(sz);
for (i = 0; i < sz - 1; i++)
input[i] = '1' + random() % 9;
input[sz - 1] = '\0';
ctx.skel->rodata->len = args.crypto_len;
err = crypto_bench__load(ctx.skel);
if (err) {
fprintf(stderr, "failed to load skeleton\n");
crypto_bench__destroy(ctx.skel);
exit(1);
}
pfd = bpf_program__fd(ctx.skel->progs.crypto_setup);
if (pfd < 0) {
fprintf(stderr, "failed to get fd for setup prog\n");
crypto_bench__destroy(ctx.skel);
exit(1);
}
err = bpf_prog_test_run_opts(pfd, &opts);
if (err || ctx.skel->bss->status) {
fprintf(stderr, "failed to run setup prog: err %d, status %d\n",
err, ctx.skel->bss->status);
crypto_bench__destroy(ctx.skel);
exit(1);
}
}
static void crypto_encrypt_setup(void)
{
crypto_setup();
ctx.pfd = bpf_program__fd(ctx.skel->progs.crypto_encrypt);
}
static void crypto_decrypt_setup(void)
{
crypto_setup();
ctx.pfd = bpf_program__fd(ctx.skel->progs.crypto_decrypt);
}
static void crypto_measure(struct bench_res *res)
{
res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
}
static void *crypto_producer(void *unused)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.repeat = 64,
.data_in = input,
.data_size_in = args.crypto_len,
);
while (true)
(void)bpf_prog_test_run_opts(ctx.pfd, &opts);
return NULL;
}
const struct bench bench_crypto_encrypt = {
.name = "crypto-encrypt",
.argp = &bench_crypto_argp,
.validate = crypto_validate,
.setup = crypto_encrypt_setup,
.producer_thread = crypto_producer,
.measure = crypto_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
const struct bench bench_crypto_decrypt = {
.name = "crypto-decrypt",
.argp = &bench_crypto_argp,
.validate = crypto_validate,
.setup = crypto_decrypt_setup,
.producer_thread = crypto_producer,
.measure = crypto_measure,
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
......@@ -13,7 +13,12 @@ CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_USER_API=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_AES=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
#include <linux/in6.h>
#include <linux/if_alg.h>
#include "test_progs.h"
#include "network_helpers.h"
#include "crypto_sanity.skel.h"
#include "crypto_basic.skel.h"
#define NS_TEST "crypto_sanity_ns"
#define IPV6_IFACE_ADDR "face::1"
static const unsigned char crypto_key[] = "testtest12345678";
static const char plain_text[] = "stringtoencrypt0";
static int opfd = -1, tfmfd = -1;
static const char algo[] = "ecb(aes)";
static int init_afalg(void)
{
struct sockaddr_alg sa = {
.salg_family = AF_ALG,
.salg_type = "skcipher",
.salg_name = "ecb(aes)"
};
tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
if (tfmfd == -1)
return errno;
if (bind(tfmfd, (struct sockaddr *)&sa, sizeof(sa)) == -1)
return errno;
if (setsockopt(tfmfd, SOL_ALG, ALG_SET_KEY, crypto_key, 16) == -1)
return errno;
opfd = accept(tfmfd, NULL, 0);
if (opfd == -1)
return errno;
return 0;
}
static void deinit_afalg(void)
{
if (tfmfd != -1)
close(tfmfd);
if (opfd != -1)
close(opfd);
}
static void do_crypt_afalg(const void *src, void *dst, int size, bool encrypt)
{
struct msghdr msg = {};
struct cmsghdr *cmsg;
char cbuf[CMSG_SPACE(4)] = {0};
struct iovec iov;
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_ALG;
cmsg->cmsg_type = ALG_SET_OP;
cmsg->cmsg_len = CMSG_LEN(4);
*(__u32 *)CMSG_DATA(cmsg) = encrypt ? ALG_OP_ENCRYPT : ALG_OP_DECRYPT;
iov.iov_base = (char *)src;
iov.iov_len = size;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
sendmsg(opfd, &msg, 0);
read(opfd, dst, size);
}
void test_crypto_basic(void)
{
RUN_TESTS(crypto_basic);
}
void test_crypto_sanity(void)
{
LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
LIBBPF_OPTS(bpf_tc_opts, tc_attach_enc);
LIBBPF_OPTS(bpf_tc_opts, tc_attach_dec);
LIBBPF_OPTS(bpf_test_run_opts, opts);
struct nstoken *nstoken = NULL;
struct crypto_sanity *skel;
char afalg_plain[16] = {0};
char afalg_dst[16] = {0};
struct sockaddr_in6 addr;
int sockfd, err, pfd;
socklen_t addrlen;
u16 udp_test_port;
skel = crypto_sanity__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel open"))
return;
SYS(fail, "ip netns add %s", NS_TEST);
SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
nstoken = open_netns(NS_TEST);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
goto fail;
err = init_afalg();
if (!ASSERT_OK(err, "AF_ALG init fail"))
goto fail;
qdisc_hook.ifindex = if_nametoindex("lo");
if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
goto fail;
skel->bss->key_len = 16;
skel->bss->authsize = 0;
udp_test_port = skel->data->udp_test_port;
memcpy(skel->bss->key, crypto_key, sizeof(crypto_key));
snprintf(skel->bss->algo, 128, "%s", algo);
pfd = bpf_program__fd(skel->progs.skb_crypto_setup);
if (!ASSERT_GT(pfd, 0, "skb_crypto_setup fd"))
goto fail;
err = bpf_prog_test_run_opts(pfd, &opts);
if (!ASSERT_OK(err, "skb_crypto_setup") ||
!ASSERT_OK(opts.retval, "skb_crypto_setup retval"))
goto fail;
if (!ASSERT_OK(skel->bss->status, "skb_crypto_setup status"))
goto fail;
err = bpf_tc_hook_create(&qdisc_hook);
if (!ASSERT_OK(err, "create qdisc hook"))
goto fail;
addrlen = sizeof(addr);
err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, udp_test_port,
(void *)&addr, &addrlen);
if (!ASSERT_OK(err, "make_sockaddr"))
goto fail;
tc_attach_enc.prog_fd = bpf_program__fd(skel->progs.encrypt_sanity);
err = bpf_tc_attach(&qdisc_hook, &tc_attach_enc);
if (!ASSERT_OK(err, "attach encrypt filter"))
goto fail;
sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
if (!ASSERT_NEQ(sockfd, -1, "encrypt socket"))
goto fail;
err = sendto(sockfd, plain_text, sizeof(plain_text), 0, (void *)&addr, addrlen);
close(sockfd);
if (!ASSERT_EQ(err, sizeof(plain_text), "encrypt send"))
goto fail;
do_crypt_afalg(plain_text, afalg_dst, sizeof(afalg_dst), true);
if (!ASSERT_OK(skel->bss->status, "encrypt status"))
goto fail;
if (!ASSERT_STRNEQ(skel->bss->dst, afalg_dst, sizeof(afalg_dst), "encrypt AF_ALG"))
goto fail;
tc_attach_enc.flags = tc_attach_enc.prog_fd = tc_attach_enc.prog_id = 0;
err = bpf_tc_detach(&qdisc_hook, &tc_attach_enc);
if (!ASSERT_OK(err, "bpf_tc_detach encrypt"))
goto fail;
tc_attach_dec.prog_fd = bpf_program__fd(skel->progs.decrypt_sanity);
err = bpf_tc_attach(&qdisc_hook, &tc_attach_dec);
if (!ASSERT_OK(err, "attach decrypt filter"))
goto fail;
sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
if (!ASSERT_NEQ(sockfd, -1, "decrypt socket"))
goto fail;
err = sendto(sockfd, afalg_dst, sizeof(afalg_dst), 0, (void *)&addr, addrlen);
close(sockfd);
if (!ASSERT_EQ(err, sizeof(afalg_dst), "decrypt send"))
goto fail;
do_crypt_afalg(afalg_dst, afalg_plain, sizeof(afalg_plain), false);
if (!ASSERT_OK(skel->bss->status, "decrypt status"))
goto fail;
if (!ASSERT_STRNEQ(skel->bss->dst, afalg_plain, sizeof(afalg_plain), "decrypt AF_ALG"))
goto fail;
tc_attach_dec.flags = tc_attach_dec.prog_fd = tc_attach_dec.prog_id = 0;
err = bpf_tc_detach(&qdisc_hook, &tc_attach_dec);
ASSERT_OK(err, "bpf_tc_detach decrypt");
fail:
close_netns(nstoken);
deinit_afalg();
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
crypto_sanity__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
#include "crypto_common.h"
int status;
SEC("syscall")
int crypto_release(void *ctx)
{
struct bpf_crypto_params params = {
.type = "skcipher",
.algo = "ecb(aes)",
.key_len = 16,
};
struct bpf_crypto_ctx *cctx;
int err = 0;
status = 0;
cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
}
bpf_crypto_ctx_release(cctx);
return 0;
}
SEC("syscall")
__failure __msg("Unreleased reference")
int crypto_acquire(void *ctx)
{
struct bpf_crypto_params params = {
.type = "skcipher",
.algo = "ecb(aes)",
.key_len = 16,
};
struct bpf_crypto_ctx *cctx;
int err = 0;
status = 0;
cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
}
cctx = bpf_crypto_ctx_acquire(cctx);
if (!cctx)
return -EINVAL;
bpf_crypto_ctx_release(cctx);
return 0;
}
char __license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
#include "crypto_common.h"
const volatile unsigned int len = 16;
char cipher[128] = {};
u32 key_len, authsize;
char dst[256] = {};
u8 key[256] = {};
long hits = 0;
int status;
SEC("syscall")
int crypto_setup(void *args)
{
struct bpf_crypto_ctx *cctx;
struct bpf_crypto_params params = {
.type = "skcipher",
.key_len = key_len,
.authsize = authsize,
};
int err = 0;
status = 0;
if (!cipher[0] || !key_len || key_len > 256) {
status = -EINVAL;
return 0;
}
__builtin_memcpy(&params.algo, cipher, sizeof(cipher));
__builtin_memcpy(&params.key, key, sizeof(key));
cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
}
err = crypto_ctx_insert(cctx);
if (err && err != -EEXIST)
status = err;
return 0;
}
SEC("tc")
int crypto_encrypt(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
struct bpf_dynptr psrc, pdst, iv;
v = crypto_ctx_value_lookup();
if (!v) {
status = -ENOENT;
return 0;
}
ctx = v->ctx;
if (!ctx) {
status = -ENOENT;
return 0;
}
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
bpf_dynptr_from_mem(dst, 0, 0, &iv);
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
__sync_add_and_fetch(&hits, 1);
return 0;
}
SEC("tc")
int crypto_decrypt(struct __sk_buff *skb)
{
struct bpf_dynptr psrc, pdst, iv;
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
v = crypto_ctx_value_lookup();
if (!v)
return -ENOENT;
ctx = v->ctx;
if (!ctx)
return -ENOENT;
bpf_dynptr_from_skb(skb, 0, &psrc);
bpf_dynptr_from_mem(dst, len, 0, &pdst);
bpf_dynptr_from_mem(dst, 0, 0, &iv);
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
__sync_add_and_fetch(&hits, 1);
return 0;
}
char __license[] SEC("license") = "GPL";
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#ifndef _CRYPTO_COMMON_H
#define _CRYPTO_COMMON_H
#include "errno.h"
#include <stdbool.h>
struct bpf_crypto_ctx *bpf_crypto_ctx_create(const struct bpf_crypto_params *params,
u32 params__sz, int *err) __ksym;
struct bpf_crypto_ctx *bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) __ksym;
void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) __ksym;
int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, const struct bpf_dynptr *src,
const struct bpf_dynptr *dst, const struct bpf_dynptr *iv) __ksym;
struct __crypto_ctx_value {
struct bpf_crypto_ctx __kptr * ctx;
};
struct array_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int);
__type(value, struct __crypto_ctx_value);
__uint(max_entries, 1);
} __crypto_ctx_map SEC(".maps");
static inline struct __crypto_ctx_value *crypto_ctx_value_lookup(void)
{
u32 key = 0;
return bpf_map_lookup_elem(&__crypto_ctx_map, &key);
}
static inline int crypto_ctx_insert(struct bpf_crypto_ctx *ctx)
{
struct __crypto_ctx_value local, *v;
struct bpf_crypto_ctx *old;
u32 key = 0;
int err;
local.ctx = NULL;
err = bpf_map_update_elem(&__crypto_ctx_map, &key, &local, 0);
if (err) {
bpf_crypto_ctx_release(ctx);
return err;
}
v = bpf_map_lookup_elem(&__crypto_ctx_map, &key);
if (!v) {
bpf_crypto_ctx_release(ctx);
return -ENOENT;
}
old = bpf_kptr_xchg(&v->ctx, ctx);
if (old) {
bpf_crypto_ctx_release(old);
return -EEXIST;
}
return 0;
}
#endif /* _CRYPTO_COMMON_H */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
#include "crypto_common.h"
unsigned char key[256] = {};
u16 udp_test_port = 7777;
u32 authsize, key_len;
char algo[128] = {};
char dst[16] = {};
int status;
static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
{
struct ipv6hdr ip6h;
struct udphdr udph;
u32 offset;
if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
return -1;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
return -1;
if (ip6h.nexthdr != IPPROTO_UDP)
return -1;
if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
return -1;
if (udph.dest != __bpf_htons(udp_test_port))
return -1;
offset = ETH_HLEN + sizeof(ip6h) + sizeof(udph);
if (skb->len < offset + 16)
return -1;
/* let's make sure that 16 bytes of payload are in the linear part of skb */
bpf_skb_pull_data(skb, offset + 16);
bpf_dynptr_from_skb(skb, 0, psrc);
bpf_dynptr_adjust(psrc, offset, offset + 16);
return 0;
}
SEC("syscall")
int skb_crypto_setup(void *ctx)
{
struct bpf_crypto_params params = {
.type = "skcipher",
.key_len = key_len,
.authsize = authsize,
};
struct bpf_crypto_ctx *cctx;
int err = 0;
status = 0;
if (key_len > 256) {
status = -EINVAL;
return 0;
}
__builtin_memcpy(&params.algo, algo, sizeof(algo));
__builtin_memcpy(&params.key, key, sizeof(key));
cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
}
err = crypto_ctx_insert(cctx);
if (err && err != -EEXIST)
status = err;
return 0;
}
SEC("tc")
int decrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
struct bpf_dynptr psrc, pdst, iv;
int err;
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
return TC_ACT_SHOT;
}
v = crypto_ctx_value_lookup();
if (!v) {
status = -ENOENT;
return TC_ACT_SHOT;
}
ctx = v->ctx;
if (!ctx) {
status = -ENOENT;
return TC_ACT_SHOT;
}
/* dst is a global variable to make testing part easier to check. In real
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
/* iv dynptr has to be initialized with 0 size, but proper memory region
* has to be provided anyway
*/
bpf_dynptr_from_mem(dst, 0, 0, &iv);
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv);
return TC_ACT_SHOT;
}
SEC("tc")
int encrypt_sanity(struct __sk_buff *skb)
{
struct __crypto_ctx_value *v;
struct bpf_crypto_ctx *ctx;
struct bpf_dynptr psrc, pdst, iv;
int err;
status = 0;
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
return TC_ACT_SHOT;
}
v = crypto_ctx_value_lookup();
if (!v) {
status = -ENOENT;
return TC_ACT_SHOT;
}
ctx = v->ctx;
if (!ctx) {
status = -ENOENT;
return TC_ACT_SHOT;
}
/* dst is a global variable to make testing part easier to check. In real
* production code, a percpu map should be used to store the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
/* iv dynptr has to be initialized with 0 size, but proper memory region
* has to be provided anyway
*/
bpf_dynptr_from_mem(dst, 0, 0, &iv);
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv);
return TC_ACT_SHOT;
}
char __license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment