Commit 6fd86efa authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: use callbacks for slow path ring related functions

To reduce the coupling of slow path ring implementations and their
callers, use callbacks instead.

Changes to Jakub's work:
* Also use callbacks for xmit functions
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarYinjun Zhang <yinjun.zhang@corigine.com>
Signed-off-by: default avatarSimon Horman <simon.horman@corigine.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 62d03330
......@@ -1131,9 +1131,9 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
/* Control device data path
*/
static bool
nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old)
bool
nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old)
{
unsigned int real_len = skb->len, meta_len = 0;
struct nfp_net_tx_ring *tx_ring;
......@@ -1215,31 +1215,12 @@ nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
return false;
}
bool __nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
return nfp_ctrl_tx_one(nn, r_vec, skb, false);
}
bool nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
bool ret;
spin_lock_bh(&r_vec->lock);
ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
spin_unlock_bh(&r_vec->lock);
return ret;
}
static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&r_vec->queue)))
if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
return;
}
......
......@@ -93,34 +93,14 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len);
void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
int nfp_nfd3_poll(struct napi_struct *napi, int budget);
netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
bool
nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old);
void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring);
int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p);
netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
bool nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool __nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
#endif
......@@ -38,7 +38,7 @@ static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
*
* Assumes that the device is stopped, must be idempotent.
*/
void
static void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct netdev_queue *nd_q;
......@@ -98,7 +98,7 @@ nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
* nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free
*/
void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
......@@ -123,7 +123,7 @@ void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
*
* Return: 0 on success, negative errno otherwise.
*/
int
static int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
......@@ -156,7 +156,7 @@ nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
return -ENOMEM;
}
void
static void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
......@@ -174,7 +174,7 @@ nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
}
}
int
static int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
......@@ -195,7 +195,7 @@ nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
return 0;
}
void
static void
nfp_nfd3_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
......@@ -241,3 +241,19 @@ nfp_nfd3_print_tx_descs(struct seq_file *file,
seq_putc(file, '\n');
}
}
const struct nfp_dp_ops nfp_nfd3_ops = {
.version = NFP_NFD_VER_NFD3,
.poll = nfp_nfd3_poll,
.xsk_poll = nfp_nfd3_xsk_poll,
.ctrl_poll = nfp_nfd3_ctrl_poll,
.xmit = nfp_nfd3_tx,
.ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
.rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
.tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
.tx_ring_reset = nfp_nfd3_tx_ring_reset,
.tx_ring_free = nfp_nfd3_tx_ring_free,
.tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
.tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
.print_tx_descs = nfp_nfd3_print_tx_descs
};
......@@ -98,6 +98,7 @@
/* Forward declarations */
struct nfp_cpp;
struct nfp_dev_info;
struct nfp_dp_ops;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
......@@ -439,6 +440,7 @@ struct nfp_stat_pair {
* @rx_rings: Array of pre-allocated RX ring structures
* @ctrl_bar: Pointer to mapped control BAR
*
* @ops: Callbacks and parameters for this vNIC's NFD version
* @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors
* @num_r_vecs: Number of used ring vectors
......@@ -473,6 +475,8 @@ struct nfp_net_dp {
/* Cold data follows */
const struct nfp_dp_ops *ops;
unsigned int txd_cnt;
unsigned int rxd_cnt;
......
......@@ -754,7 +754,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
tasklet_setup(&r_vec->tasklet, nfp_nfd3_ctrl_poll);
tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
......@@ -768,7 +768,7 @@ nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
if (dp->netdev)
netif_napi_add(dp->netdev, &r_vec->napi,
nfp_net_has_xsk_pool_slow(dp, idx) ?
nfp_nfd3_xsk_poll : nfp_nfd3_poll,
dp->ops->xsk_poll : dp->ops->poll,
NAPI_POLL_WEIGHT);
else
tasklet_enable(&r_vec->tasklet);
......@@ -1895,7 +1895,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_nfd3_tx,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
......@@ -2033,6 +2033,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.ctrl_bar = ctrl_bar;
nn->dev_info = dev_info;
nn->pdev = pdev;
nn->dp.ops = &nfp_nfd3_ops;
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
......
......@@ -105,7 +105,7 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
nfp_net_debugfs_print_tx_descs(file, r_vec, tx_ring,
nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
d_rd_p, d_wr_p);
out:
rtnl_unlock();
......
......@@ -392,57 +392,28 @@ void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
}
void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
nfp_nfd3_tx_ring_reset(dp, tx_ring);
}
void nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring)
netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
nfp_nfd3_rx_ring_fill_freelist(dp, rx_ring);
}
struct nfp_net *nn = netdev_priv(netdev);
int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
return nfp_nfd3_tx_ring_alloc(dp, tx_ring);
return nn->dp.ops->xmit(skb, netdev);
}
void
nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
nfp_nfd3_tx_ring_free(tx_ring);
}
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
int nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
return nfp_nfd3_tx_ring_bufs_alloc(dp, tx_ring);
return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
}
void nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
}
void
nfp_net_debugfs_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
nfp_nfd3_print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
}
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
bool ret;
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
return __nfp_nfd3_ctrl_tx(nn, skb);
}
spin_lock_bh(&r_vec->lock);
ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
spin_unlock_bh(&r_vec->lock);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
return nfp_nfd3_ctrl_tx(nn, skb);
return ret;
}
......@@ -5,7 +5,6 @@
#define _NFP_NET_DP_
#include "nfp_net.h"
#include "nfd3/nfd3.h"
static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
......@@ -100,21 +99,103 @@ void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void
nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
int nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void
nfp_net_debugfs_print_tx_descs(struct seq_file *file,
enum nfp_nfd_version {
NFP_NFD_VER_NFD3,
};
/**
* struct nfp_dp_ops - Hooks to wrap different implementation of different dp
* @version: Indicate dp type
* @poll: Napi poll for normal rx/tx
* @xsk_poll: Napi poll when xsk is enabled
* @ctrl_poll: Tasklet poll for ctrl rx/tx
* @xmit: Xmit for normal path
* @ctrl_tx_one: Xmit for ctrl path
* @rx_ring_fill_freelist: Give buffers from the ring to FW
* @tx_ring_alloc: Allocate resource for a TX ring
* @tx_ring_reset: Free any untransmitted buffers and reset pointers
* @tx_ring_free: Free resources allocated to a TX ring
* @tx_ring_bufs_alloc: Allocate resource for each TX buffer
* @tx_ring_bufs_free: Free resources allocated to each TX buffer
* @print_tx_descs: Show TX ring's info for debug purpose
*/
struct nfp_dp_ops {
enum nfp_nfd_version version;
int (*poll)(struct napi_struct *napi, int budget);
int (*xsk_poll)(struct napi_struct *napi, int budget);
void (*ctrl_poll)(struct tasklet_struct *t);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old);
void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
int (*tx_ring_alloc)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_reset)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*print_tx_descs)(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p);
};
static inline void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_reset(dp, tx_ring);
}
static inline void
nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring)
{
dp->ops->rx_ring_fill_freelist(dp, rx_ring);
}
static inline int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_alloc(dp, tx_ring);
}
static inline void
nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
dp->ops->tx_ring_free(tx_ring);
}
static inline int
nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
}
static inline void
nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
dp->ops->tx_ring_bufs_free(dp, tx_ring);
}
static inline void
nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
{
dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
}
extern const struct nfp_dp_ops nfp_nfd3_ops;
netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
#endif /* _NFP_NET_DP_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment