Commit 32637e33 authored by Toke Høiland-Jørgensen's avatar Toke Høiland-Jørgensen Committed by Alexei Starovoitov

bpf: Expand map key argument of bpf_redirect_map to u64

For queueing packets in XDP we want to add a new redirect map type with
support for 64-bit indexes. To prepare fore this, expand the width of the
'key' argument to the bpf_redirect_map() helper. Since BPF registers are
always 64-bit, this should be safe to do after the fact.
Acked-by: default avatarSong Liu <song@kernel.org>
Reviewed-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20221108140601.149971-3-toke@redhat.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 14d898f3
......@@ -135,7 +135,7 @@ struct bpf_map_ops {
struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
/* Misc helpers.*/
int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
int (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
/* map_meta_equal must be implemented for maps that can be
* used as an inner map. It is a runtime check to ensure
......
......@@ -643,13 +643,13 @@ struct bpf_nh_params {
};
struct bpf_redirect_info {
u32 flags;
u32 tgt_index;
u64 tgt_index;
void *tgt_value;
struct bpf_map *map;
u32 flags;
u32 kern_flags;
u32 map_id;
enum bpf_map_type map_type;
u32 kern_flags;
struct bpf_nh_params nh;
};
......@@ -1504,7 +1504,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
}
#endif /* IS_ENABLED(CONFIG_IPV6) */
static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex,
static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key))
{
......@@ -1515,7 +1515,7 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind
if (unlikely(flags & ~(action_mask | flag_mask)))
return XDP_ABORTED;
ri->tgt_value = lookup_elem(map, ifindex);
ri->tgt_value = lookup_elem(map, index);
if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
/* If the lookup fails we want to clear out the state in the
* redirect_info struct completely, so that if an eBPF program
......@@ -1527,7 +1527,7 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind
return flags & action_mask;
}
ri->tgt_index = ifindex;
ri->tgt_index = index;
ri->map_id = map->id;
ri->map_type = map->map_type;
......
......@@ -2647,7 +2647,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
* long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
* long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
......
......@@ -667,9 +667,9 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
static int cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
return __bpf_xdp_redirect_map(map, index, flags, 0,
__cpu_map_lookup_elem);
}
......
......@@ -992,14 +992,14 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
map, key, value, map_flags);
}
static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
__dev_map_lookup_elem);
}
static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags,
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
......
......@@ -14384,7 +14384,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_redirect,
(int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
(int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
(int (*)(struct bpf_map *map,
bpf_callback_t callback_fn,
......
......@@ -4414,10 +4414,10 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u64, key,
u64, flags)
{
return map->ops->map_redirect(map, ifindex, flags);
return map->ops->map_redirect(map, key, flags);
}
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
......
......@@ -231,9 +231,9 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
static int xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
{
return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
return __bpf_xdp_redirect_map(map, index, flags, 0,
__xsk_map_lookup_elem);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment