Commit 451d1dc8 authored by Daniel T. Lee's avatar Daniel T. Lee Committed by Alexei Starovoitov

samples: bpf: update map definition to new syntax BTF-defined map

Since, the new syntax of BTF-defined map has been introduced,
the syntax for using maps under samples directory are mixed up.
For example, some are already using the new syntax, and some are using
existing syntax by calling them as 'legacy'.

As stated at commit abd29c93 ("libbpf: allow specifying map
definitions using BTF"), the BTF-defined map has more compatablility
with extending supported map definition features.

The commit doesn't replace all of the map to new BTF-defined map,
because some of the samples still use bpf_load instead of libbpf, which
can't properly create BTF-defined map.

This will only updates the samples which uses libbpf API for loading bpf
program. (ex. bpf_prog_load_xattr)
Signed-off-by: default avatarDaniel T. Lee <danieltimlee@gmail.com>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent afbe3c27
...@@ -5,12 +5,12 @@ ...@@ -5,12 +5,12 @@
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_legacy.h" #include "bpf_legacy.h"
struct bpf_map_def SEC("maps") my_map = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(long), __type(value, long);
.max_entries = 256, __uint(max_entries, 256);
}; } my_map SEC(".maps");
SEC("socket1") SEC("socket1")
int bpf_prog1(struct __sk_buff *skb) int bpf_prog1(struct __sk_buff *skb)
......
...@@ -190,12 +190,12 @@ struct pair { ...@@ -190,12 +190,12 @@ struct pair {
long bytes; long bytes;
}; };
struct bpf_map_def SEC("maps") hash_map = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(__be32), __type(key, __be32);
.value_size = sizeof(struct pair), __type(value, struct pair);
.max_entries = 1024, __uint(max_entries, 1024);
}; } hash_map SEC(".maps");
SEC("socket2") SEC("socket2")
int bpf_prog2(struct __sk_buff *skb) int bpf_prog2(struct __sk_buff *skb)
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(long), __type(value, long);
.max_entries = 256, __uint(max_entries, 256);
}; } rxcnt SEC(".maps");
static int parse_ipv4(void *data, u64 nh_off, void *data_end) static int parse_ipv4(void *data, u64 nh_off, void *data_end)
{ {
......
...@@ -14,12 +14,12 @@ ...@@ -14,12 +14,12 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(long), __type(value, long);
.max_entries = 256, __uint(max_entries, 256);
}; } rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data) static void swap_src_dst_mac(void *data)
{ {
......
...@@ -28,12 +28,12 @@ ...@@ -28,12 +28,12 @@
/* volatile to prevent compiler optimizations */ /* volatile to prevent compiler optimizations */
static volatile __u32 max_pcktsz = MAX_PCKT_SIZE; static volatile __u32 max_pcktsz = MAX_PCKT_SIZE;
struct bpf_map_def SEC("maps") icmpcnt = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(__u32), __type(key, __u32);
.value_size = sizeof(__u64), __type(value, __u64);
.max_entries = 1, __uint(max_entries, 1);
}; } icmpcnt SEC(".maps");
static __always_inline void count_icmp(void) static __always_inline void count_icmp(void)
{ {
......
...@@ -23,13 +23,12 @@ ...@@ -23,13 +23,12 @@
#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
/* For TX-traffic redirect requires net_device ifindex to be in this devmap */ struct {
struct bpf_map_def SEC("maps") xdp_tx_ports = { __uint(type, BPF_MAP_TYPE_DEVMAP);
.type = BPF_MAP_TYPE_DEVMAP, __uint(key_size, sizeof(int));
.key_size = sizeof(int), __uint(value_size, sizeof(int));
.value_size = sizeof(int), __uint(max_entries, 64);
.max_entries = 64, } xdp_tx_ports SEC(".maps");
};
/* from include/net/ip.h */ /* from include/net/ip.h */
static __always_inline int ip_decrease_ttl(struct iphdr *iph) static __always_inline int ip_decrease_ttl(struct iphdr *iph)
......
...@@ -18,12 +18,12 @@ ...@@ -18,12 +18,12 @@
#define MAX_CPUS 64 /* WARNING - sync with _user.c */ #define MAX_CPUS 64 /* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */ /* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = { struct {
.type = BPF_MAP_TYPE_CPUMAP, __uint(type, BPF_MAP_TYPE_CPUMAP);
.key_size = sizeof(u32), __uint(key_size, sizeof(u32));
.value_size = sizeof(u32), __uint(value_size, sizeof(u32));
.max_entries = MAX_CPUS, __uint(max_entries, MAX_CPUS);
}; } cpu_map SEC(".maps");
/* Common stats data record to keep userspace more simple */ /* Common stats data record to keep userspace more simple */
struct datarec { struct datarec {
...@@ -35,67 +35,67 @@ struct datarec { ...@@ -35,67 +35,67 @@ struct datarec {
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint. * feedback. Redirect TX errors can be caught via a tracepoint.
*/ */
struct bpf_map_def SEC("maps") rx_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } rx_cnt SEC(".maps");
/* Used by trace point */ /* Used by trace point */
struct bpf_map_def SEC("maps") redirect_err_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 2, __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */ /* TODO: have entries for all possible errno's */
}; } redirect_err_cnt SEC(".maps");
/* Used by trace point */ /* Used by trace point */
struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = MAX_CPUS, __uint(max_entries, MAX_CPUS);
}; } cpumap_enqueue_cnt SEC(".maps");
/* Used by trace point */ /* Used by trace point */
struct bpf_map_def SEC("maps") cpumap_kthread_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } cpumap_kthread_cnt SEC(".maps");
/* Set of maps controlling available CPU, and for iterating through /* Set of maps controlling available CPU, and for iterating through
* selectable redirect CPUs. * selectable redirect CPUs.
*/ */
struct bpf_map_def SEC("maps") cpus_available = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u32), __type(value, u32);
.max_entries = MAX_CPUS, __uint(max_entries, MAX_CPUS);
}; } cpus_available SEC(".maps");
struct bpf_map_def SEC("maps") cpus_count = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u32), __type(value, u32);
.max_entries = 1, __uint(max_entries, 1);
}; } cpus_count SEC(".maps");
struct bpf_map_def SEC("maps") cpus_iterator = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u32), __type(value, u32);
.max_entries = 1, __uint(max_entries, 1);
}; } cpus_iterator SEC(".maps");
/* Used by trace point */ /* Used by trace point */
struct bpf_map_def SEC("maps") exception_cnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } exception_cnt SEC(".maps");
/* Helper parse functions */ /* Helper parse functions */
......
...@@ -19,22 +19,22 @@ ...@@ -19,22 +19,22 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") tx_port = { struct {
.type = BPF_MAP_TYPE_ARRAY, __uint(type, BPF_MAP_TYPE_ARRAY);
.key_size = sizeof(int), __type(key, int);
.value_size = sizeof(int), __type(value, int);
.max_entries = 1, __uint(max_entries, 1);
}; } tx_port SEC(".maps");
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint. * feedback. Redirect TX errors can be caught via a tracepoint.
*/ */
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(long), __type(value, long);
.max_entries = 1, __uint(max_entries, 1);
}; } rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data) static void swap_src_dst_mac(void *data)
{ {
......
...@@ -19,22 +19,22 @@ ...@@ -19,22 +19,22 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") tx_port = { struct {
.type = BPF_MAP_TYPE_DEVMAP, __uint(type, BPF_MAP_TYPE_DEVMAP);
.key_size = sizeof(int), __uint(key_size, sizeof(int));
.value_size = sizeof(int), __uint(value_size, sizeof(int));
.max_entries = 100, __uint(max_entries, 100);
}; } tx_port SEC(".maps");
/* Count RX packets, as XDP bpf_prog doesn't get direct TX-success /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
* feedback. Redirect TX errors can be caught via a tracepoint. * feedback. Redirect TX errors can be caught via a tracepoint.
*/ */
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(long), __type(value, long);
.max_entries = 1, __uint(max_entries, 1);
}; } rxcnt SEC(".maps");
static void swap_src_dst_mac(void *data) static void swap_src_dst_mac(void *data)
{ {
......
...@@ -42,44 +42,44 @@ struct direct_map { ...@@ -42,44 +42,44 @@ struct direct_map {
}; };
/* Map for trie implementation*/ /* Map for trie implementation*/
struct bpf_map_def SEC("maps") lpm_map = { struct {
.type = BPF_MAP_TYPE_LPM_TRIE, __uint(type, BPF_MAP_TYPE_LPM_TRIE);
.key_size = 8, __uint(key_size, 8);
.value_size = sizeof(struct trie_value), __uint(value_size, sizeof(struct trie_value));
.max_entries = 50, __uint(max_entries, 50);
.map_flags = BPF_F_NO_PREALLOC, __uint(map_flags, BPF_F_NO_PREALLOC);
}; } lpm_map SEC(".maps");
/* Map for counter*/ /* Map for counter*/
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(u64), __type(value, u64);
.max_entries = 256, __uint(max_entries, 256);
}; } rxcnt SEC(".maps");
/* Map for ARP table*/ /* Map for ARP table*/
struct bpf_map_def SEC("maps") arp_table = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(__be32), __type(key, __be32);
.value_size = sizeof(__be64), __type(value, __be64);
.max_entries = 50, __uint(max_entries, 50);
}; } arp_table SEC(".maps");
/* Map to keep the exact match entries in the route table*/ /* Map to keep the exact match entries in the route table*/
struct bpf_map_def SEC("maps") exact_match = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(__be32), __type(key, __be32);
.value_size = sizeof(struct direct_map), __type(value, struct direct_map);
.max_entries = 50, __uint(max_entries, 50);
}; } exact_match SEC(".maps");
struct bpf_map_def SEC("maps") tx_port = { struct {
.type = BPF_MAP_TYPE_DEVMAP, __uint(type, BPF_MAP_TYPE_DEVMAP);
.key_size = sizeof(int), __uint(key_size, sizeof(int));
.value_size = sizeof(int), __uint(value_size, sizeof(int));
.max_entries = 100, __uint(max_entries, 100);
}; } tx_port SEC(".maps");
/* Function to set source and destination mac of the packet */ /* Function to set source and destination mac of the packet */
static inline void set_src_dst_mac(void *data, void *src, void *dst) static inline void set_src_dst_mac(void *data, void *src, void *dst)
......
...@@ -23,12 +23,13 @@ enum cfg_options_flags { ...@@ -23,12 +23,13 @@ enum cfg_options_flags {
READ_MEM = 0x1U, READ_MEM = 0x1U,
SWAP_MAC = 0x2U, SWAP_MAC = 0x2U,
}; };
struct bpf_map_def SEC("maps") config_map = {
.type = BPF_MAP_TYPE_ARRAY, struct {
.key_size = sizeof(int), __uint(type, BPF_MAP_TYPE_ARRAY);
.value_size = sizeof(struct config), __type(key, int);
.max_entries = 1, __type(value, struct config);
}; __uint(max_entries, 1);
} config_map SEC(".maps");
/* Common stats data record (shared with userspace) */ /* Common stats data record (shared with userspace) */
struct datarec { struct datarec {
...@@ -36,22 +37,22 @@ struct datarec { ...@@ -36,22 +37,22 @@ struct datarec {
__u64 issue; __u64 issue;
}; };
struct bpf_map_def SEC("maps") stats_global_map = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = 1, __uint(max_entries, 1);
}; } stats_global_map SEC(".maps");
#define MAX_RXQs 64 #define MAX_RXQs 64
/* Stats per rx_queue_index (per CPU) */ /* Stats per rx_queue_index (per CPU) */
struct bpf_map_def SEC("maps") rx_queue_index_map = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(u32), __type(key, u32);
.value_size = sizeof(struct datarec), __type(value, struct datarec);
.max_entries = MAX_RXQs + 1, __uint(max_entries, MAX_RXQs + 1);
}; } rx_queue_index_map SEC(".maps");
static __always_inline static __always_inline
void swap_src_dst_mac(void *data) void swap_src_dst_mac(void *data)
......
...@@ -19,19 +19,19 @@ ...@@ -19,19 +19,19 @@
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "xdp_tx_iptunnel_common.h" #include "xdp_tx_iptunnel_common.h"
struct bpf_map_def SEC("maps") rxcnt = { struct {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
.key_size = sizeof(__u32), __type(key, __u32);
.value_size = sizeof(__u64), __type(value, __u64);
.max_entries = 256, __uint(max_entries, 256);
}; } rxcnt SEC(".maps");
struct bpf_map_def SEC("maps") vip2tnl = { struct {
.type = BPF_MAP_TYPE_HASH, __uint(type, BPF_MAP_TYPE_HASH);
.key_size = sizeof(struct vip), __type(key, struct vip);
.value_size = sizeof(struct iptnl_info), __type(value, struct iptnl_info);
.max_entries = MAX_IPTNL_ENTRIES, __uint(max_entries, MAX_IPTNL_ENTRIES);
}; } vip2tnl SEC(".maps");
static __always_inline void count_tx(u32 protocol) static __always_inline void count_tx(u32 protocol)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment