Commit 6982ab60 authored by Saeed Mahameed's avatar Saeed Mahameed Committed by David S. Miller

net/mlx5e: Xmit, no write combining

mlx5e netdev Blue Flame (write combining) support demands a lot of
overhead for a little latency gain for some special cases, this overhead
is hurting the common case.

Here we remove xmit Blue Flame support by creating all bfregs with no
write combining for all SQs, and we remove a lot of BF logic and
conditions from xmit data path.

Simplify mlx5e_tx_notify_hw (doorbell function) by removing BF related
code and by removing one memory barrier needed for WC mapped SQ doorbell
buffers, which no longer exist.

Performance improvement:
System: Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz

Test case                   Before      Now      improvement
---------------------------------------------------------------
TX packets (24 threads)     50Mpps      54Mpps    8%
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 80fe326a
...@@ -111,7 +111,6 @@ ...@@ -111,7 +111,6 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_ICOSQ_MAX_WQEBBS \ #define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB)) (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
...@@ -426,7 +425,6 @@ struct mlx5e_sq_dma { ...@@ -426,7 +425,6 @@ struct mlx5e_sq_dma {
enum { enum {
MLX5E_SQ_STATE_ENABLED, MLX5E_SQ_STATE_ENABLED,
MLX5E_SQ_STATE_BF_ENABLE,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -450,9 +448,6 @@ struct mlx5e_sq { ...@@ -450,9 +448,6 @@ struct mlx5e_sq {
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc; u32 dma_fifo_pc;
u16 bf_offset;
u16 prev_cc;
u8 bf_budget;
struct mlx5e_sq_stats stats; struct mlx5e_sq_stats stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
...@@ -478,7 +473,6 @@ struct mlx5e_sq { ...@@ -478,7 +473,6 @@ struct mlx5e_sq {
void __iomem *uar_map; void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u16 bf_buf_size;
u16 max_inline; u16 max_inline;
u8 min_inline_mode; u8 min_inline_mode;
u16 edge; u16 edge;
...@@ -818,11 +812,9 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, ...@@ -818,11 +812,9 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode); u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type); void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline void
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl)
{ {
u16 ofst = sq->bf_offset;
/* ensure wqe is visible to device before updating doorbell record */ /* ensure wqe is visible to device before updating doorbell record */
dma_wmb(); dma_wmb();
...@@ -832,14 +824,8 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, ...@@ -832,14 +824,8 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
* doorbell * doorbell
*/ */
wmb(); wmb();
if (bf_sz)
__iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
sq->bf_offset ^= sq->bf_buf_size; mlx5_write64((__be32 *)ctrl, sq->uar_map, NULL);
} }
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
......
...@@ -1017,7 +1017,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1017,7 +1017,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->tc = tc; sq->tc = tc;
err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false); err = mlx5_alloc_bfreg(mdev, &sq->bfreg, false, false);
if (err) if (err)
return err; return err;
...@@ -1030,10 +1030,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1030,10 +1030,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar; goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
if (sq->bfreg.wc)
set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline; sq->max_inline = param->max_inline;
sq->min_inline_mode = param->min_inline_mode; sq->min_inline_mode = param->min_inline_mode;
...@@ -1050,7 +1047,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -1050,7 +1047,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
} }
sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type); sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
return 0; return 0;
......
...@@ -353,7 +353,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -353,7 +353,7 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs; sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs; sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); mlx5e_tx_notify_hw(sq, &wqe->ctrl);
} }
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
...@@ -646,7 +646,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) ...@@ -646,7 +646,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
wqe = mlx5_wq_cyc_get_wqe(wq, pi); wqe = mlx5_wq_cyc_get_wqe(wq, pi);
wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); mlx5e_tx_notify_hw(sq, &wqe->ctrl);
} }
static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
......
...@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) ...@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) { if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); mlx5e_tx_notify_hw(sq, &wqe->ctrl);
} }
} }
...@@ -175,25 +175,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, ...@@ -175,25 +175,6 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
} }
} }
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb, bool bf)
{
/* Some NIC TX decisions, e.g loopback, are based on the packet
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
if (bf) {
u16 ihs = skb_headlen(skb);
if (skb_vlan_tag_present(skb))
ihs += VLAN_HLEN;
if (ihs <= sq->max_inline)
return skb_headlen(skb);
}
return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
unsigned int *skb_len, unsigned int *skb_len,
unsigned int len) unsigned int len)
...@@ -235,7 +216,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -235,7 +216,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND; u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0; dma_addr_t dma_addr = 0;
unsigned int num_bytes; unsigned int num_bytes;
bool bf = false;
u16 headlen; u16 headlen;
u16 ds_cnt; u16 ds_cnt;
u16 ihs; u16 ihs;
...@@ -255,11 +235,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -255,11 +235,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
} else } else
sq->stats.csum_none++; sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
}
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO; opcode = MLX5_OPCODE_LSO;
...@@ -277,10 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -277,10 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.packets += skb_shinfo(skb)->gso_segs; sq->stats.packets += skb_shinfo(skb)->gso_segs;
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else { } else {
bf = sq->bf_budget && ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
sq->stats.packets++; sq->stats.packets++;
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
} }
...@@ -366,13 +338,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -366,13 +338,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.xmit_more += skb->xmit_more; sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); mlx5e_tx_notify_hw(sq, &wqe->ctrl);
} }
/* fill sq edge with nops to avoid wqe wrap around */ /* fill sq edge with nops to avoid wqe wrap around */
...@@ -381,9 +348,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -381,9 +348,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
mlx5e_send_nop(sq, false); mlx5e_send_nop(sq, false);
} }
if (bf)
sq->bf_budget--;
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_unmap_wqe_err: dma_unmap_wqe_err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment