Commit c2273219 authored by Shay Agroskin's avatar Shay Agroskin Committed by Saeed Mahameed

net/mlx5e: XDP, Inline small packets into the TX MPWQE in XDP xmit flow

Upon high packet rate with multiple CPUs TX workloads, much of the HCA's
resources are spent on prefetching TX descriptors, thus affecting
transmission rates.
This patch comes to mitigate this problem by moving some workload to the
CPU and reducing the HW data prefetch overhead for small packets (<= 256B).

When forwarding packets with XDP, a packet that is smaller
than a certain size (set to ~256 bytes) would be sent inline within
its WQE TX descrptor (mem-copied), when the hardware tx queue is congested
beyond a pre-defined water-mark.

This is added to better utilize the HW resources (which now makes
one less packet data prefetch) and allow better scalability, on the
account of CPU usage (which now 'memcpy's the packet into the WQE).

To load balance between HW and CPU and get max packet rate, we use
watermarks to detect how much the HW is congested and move the work
loads back and forth between HW and CPU.

Performance:
Tested packet rate for UDP 64Byte multi-stream
over two dual port ConnectX-5 100Gbps NICs.
CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz

* Tested with hyper-threading disabled

XDP_TX:

|          | before | after   |       |
| 24 rings | 51Mpps | 116Mpps | +126% |
| 1 ring   | 12Mpps | 12Mpps  | same  |

XDP_REDIRECT:

** Below is the transmit rate, not the redirection rate
which might be larger, and is not affected by this patch.

|          | before  | after   |      |
| 32 rings | 64Mpps  | 92Mpps  | +43% |
| 1 ring   | 6.4Mpps | 6.4Mpps | same |

As we can see, feature significantly improves scaling, without
hurting single ring performance.
Signed-off-by: default avatarShay Agroskin <shayag@mellanox.com>
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 73cab880
......@@ -409,14 +409,17 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
u8 num_ds;
u8 num_pkts;
};
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
u8 pkt_count;
u8 max_ds_count;
u8 complete;
u8 inline_on;
};
struct mlx5e_xdpsq;
......
......@@ -114,6 +114,8 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
prefetchw(session->wqe->data);
session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
session->pkt_count = 0;
session->complete = 0;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
......@@ -132,6 +134,9 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
MLX5E_XDP_MPW_MAX_WQEBBS);
session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
mlx5e_xdp_update_inline_state(sq);
stats->mpwqe++;
}
......@@ -149,7 +154,7 @@ static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT;
wi->num_pkts = session->pkt_count;
sq->pc += wi->num_wqebbs;
......@@ -164,11 +169,9 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
dma_addr_t dma_addr = xdpi->dma_addr;
struct xdp_frame *xdpf = xdpi->xdpf;
unsigned int dma_len = xdpf->len;
if (unlikely(sq->hw_mtu < dma_len)) {
if (unlikely(sq->hw_mtu < xdpf->len)) {
stats->err++;
return false;
}
......@@ -185,9 +188,10 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_session_start(sq);
}
mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len);
mlx5e_xdp_mpwqe_add_dseg(sq, xdpi, stats);
if (unlikely(session->ds_count == session->max_ds_count))
if (unlikely(session->complete ||
session->ds_count == session->max_ds_count))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
......@@ -301,7 +305,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sqcc += wi->num_wqebbs;
for (j = 0; j < wi->num_ds; j++) {
for (j = 0; j < wi->num_pkts; j++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
......@@ -342,7 +346,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
sq->cc += wi->num_wqebbs;
for (i = 0; i < wi->num_ds; i++) {
for (i = 0; i < wi->num_pkts; i++) {
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
......
......@@ -75,16 +75,68 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
}
}
/* Enable inline WQEs to shift some load from a congested HCA (HW) to
* a less congested cpu (SW).
*/
static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
{
u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
if (session->inline_on) {
if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
session->inline_on = 0;
return;
}
/* inline is false */
if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
session->inline_on = 1;
}
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len)
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
struct mlx5e_xdpsq_stats *stats)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
dma_addr_t dma_addr = xdpi->dma_addr;
struct xdp_frame *xdpf = xdpi->xdpf;
struct mlx5_wqe_data_seg *dseg =
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++;
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
u16 dma_len = xdpf->len;
session->pkt_count++;
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
(struct mlx5_wqe_inline_seg *)dseg;
u16 ds_len = sizeof(*inline_dseg) + dma_len;
u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
/* Not enough space for inline wqe, send with memory pointer */
session->complete = true;
goto no_inline;
}
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
memcpy(inline_dseg->data, xdpf->data, dma_len);
session->ds_count += ds_cnt;
stats->inlnw++;
return;
}
no_inline:
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
session->ds_count++;
}
static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
......@@ -111,5 +163,4 @@ mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
{
return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
#endif
......@@ -1532,7 +1532,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
wi->num_ds = 1;
wi->num_pkts = 1;
}
}
......
......@@ -66,6 +66,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
......@@ -81,6 +82,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
......@@ -163,6 +165,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
......@@ -189,6 +192,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
/* xdp redirect */
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
......@@ -1250,6 +1254,7 @@ static const struct counter_desc sq_stats_desc[] = {
static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
......@@ -1258,6 +1263,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
......
......@@ -78,6 +78,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_redirect;
u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_mpwqe;
u64 rx_xdp_tx_inlnw;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
......@@ -93,6 +94,7 @@ struct mlx5e_sw_stats {
u64 tx_cqe_err;
u64 tx_xdp_xmit;
u64 tx_xdp_mpwqe;
u64 tx_xdp_inlnw;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
......@@ -244,6 +246,7 @@ struct mlx5e_sq_stats {
struct mlx5e_xdpsq_stats {
u64 xmit;
u64 mpwqe;
u64 inlnw;
u64 full;
u64 err;
/* dirtied @completion */
......
......@@ -395,6 +395,7 @@ struct mlx5_wqe_signature_seg {
struct mlx5_wqe_inline_seg {
__be32 byte_count;
__be32 data[0];
};
enum mlx5_sig_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment