Commit 117b07e6 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-XDP-performance-improvements'

Tariq Toukan says:

====================
mlx4 XDP performance improvements

This patchset contains data-path improvements, mainly for XDP_DROP
and XDP_TX cases.

Main patches:
* Patch 2 by Saeed allows enabling optimized A0 RX steering (in HW) when
  setting a single RX ring.
  With this configuration, HW packet-rate dramatically improves,
  reaching 28.1 Mpps in XDP_DROP case for both IPv4 (37% gain)
  and IPv6 (53% gain).
* Patch 6 enhances the XDP xmit function. Among other changes, now we
  ring one doorbell per NAPI. Patch gives 17% gain in XDP_TX case.
* Patch 7 obsoletes the NAPI of XDP_TX completion queue and integrates its
  poll into the respective RX NAPI. Patch gives 15% gain in XDP_TX case.

Series generated against net-next commit:
f7aec129 rxrpc: Cache the congestion window setting
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1492a3a7 4c07c132
...@@ -146,16 +146,25 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -146,16 +146,25 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err) if (err)
goto free_eq; goto free_eq;
cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event; cq->mcq.event = mlx4_en_cq_event;
if (cq->type != RX) switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
else napi_enable(&cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_enable(&cq->napi); napi_enable(&cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
cq->xdp_busy = false;
break;
}
return 0; return 0;
...@@ -184,8 +193,10 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) ...@@ -184,8 +193,10 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{ {
if (cq->type != TX_XDP) {
napi_disable(&cq->napi); napi_disable(&cq->napi);
netif_napi_del(&cq->napi); netif_napi_del(&cq->napi);
}
mlx4_cq_free(priv->mdev->dev, &cq->mcq); mlx4_cq_free(priv->mdev->dev, &cq->mcq);
} }
......
...@@ -125,9 +125,9 @@ void mlx4_en_update_loopback_state(struct net_device *dev, ...@@ -125,9 +125,9 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK; priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
mutex_lock(&priv->mdev->state_lock); mutex_lock(&priv->mdev->state_lock);
if (priv->mdev->dev->caps.flags2 & if ((priv->mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB && MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) &&
priv->rss_map.indir_qp.qpn) { priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) {
int i; int i;
int err = 0; int err = 0;
int loopback = !!(features & NETIF_F_LOOPBACK); int loopback = !!(features & NETIF_F_LOOPBACK);
......
...@@ -596,6 +596,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) ...@@ -596,6 +596,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return err; return err;
} }
en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port); int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index; *qpn = base_qpn + index;
...@@ -1010,7 +1012,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, ...@@ -1010,7 +1012,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
memcpy(&mc_list[10], mclist->addr, ETH_ALEN); memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port; mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev, err = mlx4_multicast_detach(mdev->dev,
&priv->rss_map.indir_qp, priv->rss_map.indir_qp,
mc_list, mc_list,
MLX4_PROT_ETH, MLX4_PROT_ETH,
mclist->reg_id); mclist->reg_id);
...@@ -1032,7 +1034,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, ...@@ -1032,7 +1034,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
/* needed for B0 steering support */ /* needed for B0 steering support */
mc_list[5] = priv->port; mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev, err = mlx4_multicast_attach(mdev->dev,
&priv->rss_map.indir_qp, priv->rss_map.indir_qp,
mc_list, mc_list,
priv->port, 0, priv->port, 0,
MLX4_PROT_ETH, MLX4_PROT_ETH,
...@@ -1677,13 +1679,15 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1677,13 +1679,15 @@ int mlx4_en_start_port(struct net_device *dev)
if (t != TX_XDP) { if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i); tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL; tx_ring->recycle_ring = NULL;
} else {
mlx4_en_init_recycle_ring(priv, i);
}
/* Arm CQ for TX completions */ /* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq); mlx4_en_arm_cq(priv, cq);
} else {
mlx4_en_init_recycle_ring(priv, i);
/* XDP TX CQ should never be armed */
}
/* Set initial ownership of all Tx TXBBs to SW (1) */ /* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *)(tx_ring->buf + j)) = 0xffffffff; *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
...@@ -1742,7 +1746,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1742,7 +1746,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */ /* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]); eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */ mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH, priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id)) &priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n"); mlx4_warn(mdev, "Failed Attaching Broadcast\n");
...@@ -1866,12 +1870,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1866,12 +1870,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
/* Detach All multicasts */ /* Detach All multicasts */
eth_broadcast_addr(&mc_list[10]); eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */ mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id); MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(mclist, &priv->curr_list, list) { list_for_each_entry(mclist, &priv->curr_list, list) {
memcpy(&mc_list[10], mclist->addr, ETH_ALEN); memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port; mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id); mc_list, MLX4_PROT_ETH, mclist->reg_id);
if (mclist->tunnel_reg_id) if (mclist->tunnel_reg_id)
mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
......
This diff is collapsed.
This diff is collapsed.
...@@ -2356,7 +2356,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -2356,7 +2356,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
MLX4_A0_STEERING_TABLE_SIZE; MLX4_A0_STEERING_TABLE_SIZE;
} }
mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
dmfs_high_rate_steering_mode_str( dmfs_high_rate_steering_mode_str(
dev->caps.dmfs_high_steer_mode)); dev->caps.dmfs_high_steer_mode));
} }
......
...@@ -72,7 +72,8 @@ ...@@ -72,7 +72,8 @@
#define DEF_RX_RINGS 16 #define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128 #define MAX_RX_RINGS 128
#define MIN_RX_RINGS 4 #define MIN_RX_RINGS 4
#define TXBB_SIZE 64 #define LOG_TXBB_SIZE 6
#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1) #define HEADROOM (2048 / TXBB_SIZE + 1)
#define STAMP_STRIDE 64 #define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4) #define STAMP_DWORDS (STAMP_STRIDE / 4)
...@@ -115,13 +116,12 @@ ...@@ -115,13 +116,12 @@
#define MLX4_EN_MIN_TX_RING_P_UP 1 #define MLX4_EN_MIN_TX_RING_P_UP 1
#define MLX4_EN_MAX_TX_RING_P_UP 32 #define MLX4_EN_MAX_TX_RING_P_UP 32
#define MLX4_EN_NUM_UP 8 #define MLX4_EN_NUM_UP 8
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024 #define MLX4_EN_DEF_RX_RING_SIZE 1024
#define MLX4_EN_DEF_TX_RING_SIZE MLX4_EN_DEF_RX_RING_SIZE
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP) MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256 #define MLX4_EN_DEFAULT_TX_WORK 256
#define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */ /* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44 #define MLX4_EN_RX_COAL_TARGET 44
...@@ -276,7 +276,7 @@ struct mlx4_en_tx_ring { ...@@ -276,7 +276,7 @@ struct mlx4_en_tx_ring {
struct netdev_queue *tx_queue; struct netdev_queue *tx_queue;
u32 (*free_tx_desc)(struct mlx4_en_priv *priv, u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,
int index, u8 owner, int index,
u64 timestamp, int napi_mode); u64 timestamp, int napi_mode);
struct mlx4_en_rx_ring *recycle_ring; struct mlx4_en_rx_ring *recycle_ring;
...@@ -359,7 +359,10 @@ struct mlx4_en_cq { ...@@ -359,7 +359,10 @@ struct mlx4_en_cq {
struct mlx4_hwq_resources wqres; struct mlx4_hwq_resources wqres;
int ring; int ring;
struct net_device *dev; struct net_device *dev;
union {
struct napi_struct napi; struct napi_struct napi;
bool xdp_busy;
};
int size; int size;
int buf_size; int buf_size;
int vector; int vector;
...@@ -431,7 +434,7 @@ struct mlx4_en_rss_map { ...@@ -431,7 +434,7 @@ struct mlx4_en_rss_map {
int base_qpn; int base_qpn;
struct mlx4_qp qps[MAX_RX_RINGS]; struct mlx4_qp qps[MAX_RX_RINGS];
enum mlx4_qp_state state[MAX_RX_RINGS]; enum mlx4_qp_state state[MAX_RX_RINGS];
struct mlx4_qp indir_qp; struct mlx4_qp *indir_qp;
enum mlx4_qp_state indir_state; enum mlx4_qp_state indir_state;
}; };
...@@ -689,7 +692,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); ...@@ -689,7 +692,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame, struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length, struct net_device *dev, unsigned int length,
int tx_ind, int *doorbell_pending); int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frame); struct mlx4_en_rx_alloc *frame);
...@@ -721,13 +724,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, ...@@ -721,13 +724,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget); int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
bool mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,
int index, u8 owner, u64 timestamp, int index, u64 timestamp,
int napi_mode); int napi_mode);
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,
int index, u8 owner, u64 timestamp, int index, u64 timestamp,
int napi_mode); int napi_mode);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio, int is_tx, int rss, int qpn, int cqn, int user_prio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment