Commit bcf0cafa authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: split out common control message handling code

BPF's control message handler seems like a good base to built
on for request-reply control messages.  Split it out to allow
for reuse.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0a72d833
...@@ -15,6 +15,7 @@ nfp-objs := \ ...@@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \ nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \ nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \ nfpcore/nfp_target.o \
ccm.o \
nfp_asm.o \ nfp_asm.o \
nfp_app.o \ nfp_app.o \
nfp_app_nic.o \ nfp_app_nic.o \
......
...@@ -6,48 +6,13 @@ ...@@ -6,48 +6,13 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/wait.h>
#include "../ccm.h"
#include "../nfp_app.h" #include "../nfp_app.h"
#include "../nfp_net.h" #include "../nfp_net.h"
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
{
u16 used_tags;
used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
}
static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
{
/* All FW communication for BPF is request-reply. To make sure we
* don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (nfp_bpf_all_tags_busy(bpf)) {
cmsg_warn(bpf, "all FW request contexts busy!\n");
return -EAGAIN;
}
WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
return bpf->tag_alloc_next++;
}
static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
bpf->tag_alloc_last != bpf->tag_alloc_next)
bpf->tag_alloc_last++;
}
static struct sk_buff * static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{ {
...@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) ...@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size; return size;
} }
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return hdr->type;
}
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
}
static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;
skb_queue_walk(&bpf->cmsg_replies, skb) {
msg_tag = nfp_bpf_cmsg_get_tag(skb);
if (msg_tag == tag) {
nfp_bpf_free_tag(bpf, tag);
__skb_unlink(skb, &bpf->cmsg_replies);
return skb;
}
}
return NULL;
}
static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
if (!skb)
nfp_bpf_free_tag(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
int tag)
{
struct sk_buff *skb;
int i, err;
for (i = 0; i < 50; i++) {
udelay(4);
skb = nfp_bpf_reply(bpf, tag);
if (skb)
return skb;
}
err = wait_event_interruptible_timeout(bpf->cmsg_wq,
skb = nfp_bpf_reply(bpf, tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_bpf_reply_drop_tag(bpf, tag);
if (err < 0) {
cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
type);
return ERR_PTR(-ETIMEDOUT);
}
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
enum nfp_bpf_cmsg_type type, unsigned int reply_size)
{
struct cmsg_hdr *hdr;
int tag;
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_alloc_tag(bpf);
if (tag < 0) {
nfp_ctrl_unlock(bpf->app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}
hdr = (void *)skb->data;
hdr->ver = CMSG_MAP_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);
__nfp_app_ctrl_tx(bpf->app, skb);
nfp_ctrl_unlock(bpf->app->ctrl);
skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
if (IS_ERR(skb))
return skb;
hdr = (struct cmsg_hdr *)skb->data;
if (hdr->type != __CMSG_REPLY(type)) {
cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
hdr->type, __CMSG_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}
return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}
static int static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply) struct cmsg_reply_map_simple *reply)
...@@ -275,7 +97,7 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) ...@@ -275,7 +97,7 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type); req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0; req->map_flags = 0;
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC, skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
sizeof(*reply)); sizeof(*reply));
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -310,7 +132,7 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) ...@@ -310,7 +132,7 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data; req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid); req->tid = cpu_to_be32(nfp_map->tid);
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE, skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
sizeof(*reply)); sizeof(*reply));
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n"); cmsg_warn(bpf, "leaking map - I/O error\n");
...@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, ...@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
} }
static int static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
enum nfp_bpf_cmsg_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{ {
struct nfp_bpf_map *nfp_map = offmap->dev_priv; struct nfp_bpf_map *nfp_map = offmap->dev_priv;
...@@ -386,7 +207,7 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -386,7 +207,7 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size); map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op, skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
nfp_bpf_cmsg_map_reply_size(bpf, 1)); nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags) void *key, void *value, u64 flags)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL); key, value, flags, NULL, NULL);
} }
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL); key, NULL, 0, NULL, NULL);
} }
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value) void *key, void *value)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value); key, NULL, 0, NULL, value);
} }
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key) void *next_key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL); NULL, NULL, 0, next_key, NULL);
} }
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key) void *key, void *next_key)
{ {
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT, return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL); key, NULL, 0, next_key, NULL);
} }
...@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) ...@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) { if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len); cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
goto err_free; dev_kfree_skb_any(skb);
return;
} }
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len)) if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
else else
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return;
}
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_cmsg_get_tag(skb);
if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
} }
__skb_queue_tail(&bpf->cmsg_replies, skb); nfp_ccm_rx(&bpf->ccm, skb);
wake_up_interruptible_all(&bpf->cmsg_wq);
nfp_ctrl_unlock(bpf->app->ctrl);
return;
err_unlock:
nfp_ctrl_unlock(bpf->app->ctrl);
err_free:
dev_kfree_skb_any(skb);
} }
void void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len) nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{ {
const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
const struct cmsg_hdr *hdr = data;
if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) { if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len); cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return; return;
} }
if (hdr->type == CMSG_TYPE_BPF_EVENT) if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len); nfp_bpf_event_output(bpf, data, len);
else else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n", cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/types.h> #include <linux/types.h>
#include "../ccm.h"
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking /* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver. * our FW ABI. In that case we will do translation in the driver.
...@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps { ...@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps {
/* /*
* Types defined for map related control messages * Types defined for map related control messages
*/ */
#define CMSG_MAP_ABI_VERSION 1
enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_ALLOC = 1,
CMSG_TYPE_MAP_FREE = 2,
CMSG_TYPE_MAP_LOOKUP = 3,
CMSG_TYPE_MAP_UPDATE = 4,
CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7,
CMSG_TYPE_BPF_EVENT = 8,
__CMSG_TYPE_MAP_MAX,
};
#define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */ /* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_KEY_LW 16
...@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status { ...@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status {
CMSG_RC_ERR_MAP_E2BIG = 7, CMSG_RC_ERR_MAP_E2BIG = 7,
}; };
struct cmsg_hdr {
u8 type;
u8 ver;
__be16 tag;
};
struct cmsg_reply_map_simple { struct cmsg_reply_map_simple {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 rc; __be32 rc;
}; };
struct cmsg_req_map_alloc_tbl { struct cmsg_req_map_alloc_tbl {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 key_size; /* in bytes */ __be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */ __be32 value_size; /* in bytes */
__be32 max_entries; __be32 max_entries;
...@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl { ...@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl {
}; };
struct cmsg_req_map_free_tbl { struct cmsg_req_map_free_tbl {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 tid; __be32 tid;
}; };
...@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl { ...@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl {
}; };
struct cmsg_req_map_op { struct cmsg_req_map_op {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 tid; __be32 tid;
__be32 count; __be32 count;
__be32 flags; __be32 flags;
...@@ -135,7 +114,7 @@ struct cmsg_reply_map_op { ...@@ -135,7 +114,7 @@ struct cmsg_reply_map_op {
}; };
struct cmsg_bpf_event { struct cmsg_bpf_event {
struct cmsg_hdr hdr; struct nfp_ccm_hdr hdr;
__be32 cpu_id; __be32 cpu_id;
__be64 map_ptr; __be64 map_ptr;
__be32 data_size; __be32 data_size;
......
...@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app; bpf->app = app;
app->priv = bpf; app->priv = bpf;
skb_queue_head_init(&bpf->cmsg_replies);
init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list); INIT_LIST_HEAD(&bpf->map_list);
err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); err = nfp_ccm_init(&bpf->ccm, app);
if (err) if (err)
goto err_free_bpf; goto err_free_bpf;
err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
if (err)
goto err_clean_ccm;
nfp_bpf_init_capabilities(bpf); nfp_bpf_init_capabilities(bpf);
err = nfp_bpf_parse_capabilities(app); err = nfp_bpf_parse_capabilities(app);
...@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app)
err_free_neutral_maps: err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral); rhashtable_destroy(&bpf->maps_neutral);
err_clean_ccm:
nfp_ccm_clean(&bpf->ccm);
err_free_bpf: err_free_bpf:
kfree(bpf); kfree(bpf);
return err; return err;
...@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app) ...@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev); bpf_offload_dev_destroy(bpf->bpf_dev);
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); nfp_ccm_clean(&bpf->ccm);
WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral, rhashtable_free_and_destroy(&bpf->maps_neutral,
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h> #include <linux/wait.h>
#include "../ccm.h"
#include "../nfp_asm.h" #include "../nfp_asm.h"
#include "fw.h" #include "fw.h"
...@@ -84,16 +85,10 @@ enum pkt_vec { ...@@ -84,16 +85,10 @@ enum pkt_vec {
/** /**
* struct nfp_app_bpf - bpf app priv structure * struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app * @app: backpointer to the app
* @ccm: common control message handler data
* *
* @bpf_dev: BPF offload device handle * @bpf_dev: BPF offload device handle
* *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
*
* @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies
*
* @cmsg_key_sz: size of key in cmsg element array * @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array * @cmsg_val_sz: size of value in cmsg element array
* *
...@@ -132,16 +127,10 @@ enum pkt_vec { ...@@ -132,16 +127,10 @@ enum pkt_vec {
*/ */
struct nfp_app_bpf { struct nfp_app_bpf {
struct nfp_app *app; struct nfp_app *app;
struct nfp_ccm ccm;
struct bpf_offload_dev *bpf_dev; struct bpf_offload_dev *bpf_dev;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq;
unsigned int cmsg_key_sz; unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz; unsigned int cmsg_val_sz;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
#include "main.h" #include "main.h"
#include "../ccm.h"
#include "../nfp_app.h" #include "../nfp_app.h"
#include "../nfp_net_ctrl.h" #include "../nfp_net_ctrl.h"
#include "../nfp_net.h" #include "../nfp_net.h"
...@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, ...@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL; return -EINVAL;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL; return -EINVAL;
rcu_read_lock(); rcu_read_lock();
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net.h"
#define NFP_CCM_TYPE_REPLY_BIT 7
#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
{
u16 used_tags;
used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
}
static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
{
/* CCM is for FW communication which is request-reply. To make sure
* we don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
ccm_warn(ccm->app, "all FW request contexts busy!\n");
return -EAGAIN;
}
WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
return ccm->tag_alloc_next++;
}
static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
ccm->tag_alloc_last != ccm->tag_alloc_next)
ccm->tag_alloc_last++;
}
static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;
skb_queue_walk(&ccm->replies, skb) {
msg_tag = nfp_ccm_get_tag(skb);
if (msg_tag == tag) {
nfp_ccm_free_tag(ccm, tag);
__skb_unlink(skb, &ccm->replies);
return skb;
}
}
return NULL;
}
static struct sk_buff *
nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(app->ctrl);
skb = __nfp_ccm_reply(ccm, tag);
nfp_ctrl_unlock(app->ctrl);
return skb;
}
static struct sk_buff *
nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(app->ctrl);
skb = __nfp_ccm_reply(ccm, tag);
if (!skb)
nfp_ccm_free_tag(ccm, tag);
nfp_ctrl_unlock(app->ctrl);
return skb;
}
static struct sk_buff *
nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
enum nfp_ccm_type type, int tag)
{
struct sk_buff *skb;
int i, err;
for (i = 0; i < 50; i++) {
udelay(4);
skb = nfp_ccm_reply(ccm, app, tag);
if (skb)
return skb;
}
err = wait_event_interruptible_timeout(ccm->wq,
skb = nfp_ccm_reply(ccm, app,
tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
if (err < 0) {
ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
return ERR_PTR(-ETIMEDOUT);
}
return skb;
}
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size)
{
struct nfp_app *app = ccm->app;
struct nfp_ccm_hdr *hdr;
int reply_type, tag;
nfp_ctrl_lock(app->ctrl);
tag = nfp_ccm_alloc_tag(ccm);
if (tag < 0) {
nfp_ctrl_unlock(app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}
hdr = (void *)skb->data;
hdr->ver = NFP_CCM_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);
__nfp_app_ctrl_tx(app, skb);
nfp_ctrl_unlock(app->ctrl);
skb = nfp_ccm_wait_reply(ccm, app, type, tag);
if (IS_ERR(skb))
return skb;
reply_type = nfp_ccm_get_type(skb);
if (reply_type != __NFP_CCM_REPLY(type)) {
ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
reply_type, __NFP_CCM_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}
return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}
void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
{
struct nfp_app *app = ccm->app;
unsigned int tag;
if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
goto err_free;
}
nfp_ctrl_lock(app->ctrl);
tag = nfp_ccm_get_tag(skb);
if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
}
__skb_queue_tail(&ccm->replies, skb);
wake_up_interruptible_all(&ccm->wq);
nfp_ctrl_unlock(app->ctrl);
return;
err_unlock:
nfp_ctrl_unlock(app->ctrl);
err_free:
dev_kfree_skb_any(skb);
}
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
{
ccm->app = app;
skb_queue_head_init(&ccm->replies);
init_waitqueue_head(&ccm->wq);
return 0;
}
void nfp_ccm_clean(struct nfp_ccm *ccm)
{
WARN_ON(!skb_queue_empty(&ccm->replies));
}
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
#ifndef NFP_CCM_H
#define NFP_CCM_H 1
#include <linux/bitmap.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
struct nfp_app;
/* Firmware ABI */
enum nfp_ccm_type {
NFP_CCM_TYPE_BPF_MAP_ALLOC = 1,
NFP_CCM_TYPE_BPF_MAP_FREE = 2,
NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3,
NFP_CCM_TYPE_BPF_MAP_UPDATE = 4,
NFP_CCM_TYPE_BPF_MAP_DELETE = 5,
NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
__NFP_CCM_TYPE_MAX,
};
#define NFP_CCM_ABI_VERSION 1
struct nfp_ccm_hdr {
u8 type;
u8 ver;
__be16 tag;
};
static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
return hdr->type;
}
static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
{
struct nfp_ccm_hdr *hdr;
hdr = (struct nfp_ccm_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
}
/* Implementation */
/**
* struct nfp_ccm - common control message handling
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
*
* @replies: received cmsg replies waiting to be consumed
* @wq: work queue for waiting for cmsg replies
*/
struct nfp_ccm {
struct nfp_app *app;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
struct sk_buff_head replies;
struct wait_queue_head wq;
};
int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
void nfp_ccm_clean(struct nfp_ccm *ccm);
void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
struct sk_buff *
nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
enum nfp_ccm_type type, unsigned int reply_size);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment