Commit 3c31ff22 authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

drivers: mellanox: use netdev_xmit_more() helper

skb->xmit_more hint is now always 0. This switches the mellanox drivers
to the netdev_xmit_more() helper.

Cc: Saeed Mahameed <saeedm@mellanox.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Boris Pismenny <borisp@mellanox.com>
Cc: Ilya Lesokhin <ilyal@mellanox.com>
Cc: Eran Ben Elisha <eranbe@mellanox.com>
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6b16f9ee
...@@ -1042,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1042,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue, send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes, tx_info->nr_bytes,
skb->xmit_more); netdev_xmit_more());
real_size = (real_size / 16) & 0x3f; real_size = (real_size / 16) & 0x3f;
......
...@@ -772,7 +772,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -772,7 +772,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev); struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi); struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
......
...@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, ...@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
*/ */
nskb->ip_summed = CHECKSUM_PARTIAL; nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping; nskb->queue_mapping = skb->queue_mapping;
} }
...@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, ...@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len; sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn)); cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi); mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi); mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb; return skb;
......
...@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, ...@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
static inline void static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
bool xmit_more)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++; sq->stats->stopped++;
} }
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) if (!xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
} }
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi) struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg; struct mlx5_wqe_ctrl_seg *cseg;
...@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
} }
stats->bytes += num_bytes; stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more; stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len; headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen; ds_cnt += !!headlen;
...@@ -423,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -423,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop; goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg); num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -449,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -449,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!skb)) if (unlikely(!skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
return mlx5e_sq_xmit(sq, skb, wqe, pi); return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
} }
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
...@@ -659,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -659,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
} }
stats->bytes += num_bytes; stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more; stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len; headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen; ds_cnt += !!headlen;
...@@ -704,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -704,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop; goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg); num_dma, wi, cseg, false);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment