Commit 6499fe6e authored by David Vernet's avatar David Vernet Committed by Alexei Starovoitov

bpf: Remove bpf_cgroup_kptr_get() kfunc

Now that bpf_cgroup_acquire() is KF_RCU | KF_RET_NULL,
bpf_cgroup_kptr_get() is redundant. Let's remove it, and update
selftests to instead use bpf_cgroup_acquire() where appropriate. The
next patch will update the BPF documentation to not mention
bpf_cgroup_kptr_get().
Signed-off-by: default avatarDavid Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230411041633.179404-2-void@manifault.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 1d712839
......@@ -2040,37 +2040,6 @@ __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
return cgroup_tryget(cgrp) ? cgrp : NULL;
}
/**
* bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup
* kptr acquired by this kfunc which is not subsequently stored in a map, must
* be released by calling bpf_cgroup_release().
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
*/
__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
struct cgroup *cgrp;
rcu_read_lock();
/* Another context could remove the cgroup from the map and release it
* at any time, including after we've done the lookup above. This is
* safe because we're in an RCU read region, so the cgroup is
* guaranteed to remain valid until at least the rcu_read_unlock()
* below.
*/
cgrp = READ_ONCE(*cgrpp);
if (cgrp && !cgroup_tryget(cgrp))
/* If the cgroup had been removed from the map and freed as
* described above, cgroup_tryget() will return false. The
* cgroup will be freed at some point after the current RCU gp
* has ended, so just return NULL to the user.
*/
cgrp = NULL;
rcu_read_unlock();
return cgrp;
}
/**
* bpf_cgroup_release - Release the reference acquired on a cgroup.
* If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
......@@ -2314,7 +2283,6 @@ BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
#ifdef CONFIG_CGROUPS
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
......
......@@ -21,10 +21,11 @@ struct hash_map {
} __cgrps_kfunc_map SEC(".maps");
struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym;
void bpf_cgroup_release(struct cgroup *p) __ksym;
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
{
......
......@@ -133,59 +133,6 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
/* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */
kptr = bpf_cgroup_kptr_get(&cgrp);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr, *acquired;
acquired = bpf_cgroup_acquire(cgrp);
if (!acquired)
return 0;
/* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */
kptr = bpf_cgroup_kptr_get(&acquired);
bpf_cgroup_release(acquired);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("arg#0 expected pointer to map value")
int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
/* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */
kptr = bpf_cgroup_kptr_get(NULL);
if (!kptr)
return 0;
bpf_cgroup_release(kptr);
return 0;
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
......@@ -207,8 +154,8 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
}
SEC("tp_btf/cgroup_mkdir")
__failure __msg("Unreleased reference")
int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
__failure __msg("must be referenced or trusted")
int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path)
{
struct cgroup *kptr;
struct __cgrps_kfunc_map_value *v;
......@@ -217,11 +164,12 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
if (!v)
return 0;
kptr = bpf_cgroup_kptr_get(&v->cgrp);
if (!kptr)
return 0;
/* Kptr acquired above is never released. */
bpf_rcu_read_lock();
kptr = v->cgrp;
if (kptr)
/* Can't release a cgroup kptr stored in a map. */
bpf_cgroup_release(kptr);
bpf_rcu_read_unlock();
return 0;
}
......
......@@ -126,13 +126,11 @@ int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
return 0;
}
kptr = bpf_cgroup_kptr_get(&v->cgrp);
if (!kptr) {
bpf_rcu_read_lock();
kptr = v->cgrp;
if (!kptr)
err = 3;
return 0;
}
bpf_cgroup_release(kptr);
bpf_rcu_read_unlock();
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment