Commit 12d4ae9d authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx-next'

Or Gerlitz says:

====================
Mellanox NIC drivers update, June 23 2015

This series has two fixes from Eran to his recent SRIOV counters work in
mlx4 and few more updates from Saeed and Achiad to the mlx5 Ethernet
code. All fixes here relate to net-next code, so no need for -stable.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0a51f76e 99611ba1
...@@ -203,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) ...@@ -203,6 +203,20 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tso_packets += ring->tso_packets; priv->port_stats.tso_packets += ring->tso_packets;
priv->port_stats.xmit_more += ring->xmit_more; priv->port_stats.xmit_more += ring->xmit_more;
} }
if (mlx4_is_master(mdev->dev)) {
stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
&mlx4_en_stats->RTOT_prio_1,
NUM_PRIORITIES);
stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
&mlx4_en_stats->TTOT_prio_1,
NUM_PRIORITIES);
stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
&mlx4_en_stats->ROCT_prio_1,
NUM_PRIORITIES);
stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
&mlx4_en_stats->TOCT_prio_1,
NUM_PRIORITIES);
}
/* net device stats */ /* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
......
...@@ -79,8 +79,7 @@ struct mlx4_en_flow_stats_tx { ...@@ -79,8 +79,7 @@ struct mlx4_en_flow_stats_tx {
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \ #define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \ NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX + \ NUM_FLOW_PRIORITY_STATS_RX)
NUM_PF_STATS)
struct mlx4_en_stat_out_flow_control_mbox { struct mlx4_en_stat_out_flow_control_mbox {
/* Total number of PAUSE frames received from the far-end port */ /* Total number of PAUSE frames received from the far-end port */
...@@ -108,7 +107,7 @@ enum { ...@@ -108,7 +107,7 @@ enum {
}; };
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS) NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n)) sizeof(((struct net_device_stats *)0)->n))
......
...@@ -208,7 +208,6 @@ enum cq_flags { ...@@ -208,7 +208,6 @@ enum cq_flags {
struct mlx5e_cq { struct mlx5e_cq {
/* data path - accessed per cqe */ /* data path - accessed per cqe */
struct mlx5_cqwq wq; struct mlx5_cqwq wq;
void *sqrq;
unsigned long flags; unsigned long flags;
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
...@@ -316,6 +315,7 @@ struct mlx5e_channel { ...@@ -316,6 +315,7 @@ struct mlx5e_channel {
__be32 mkey_be; __be32 mkey_be;
u8 num_tc; u8 num_tc;
unsigned long flags; unsigned long flags;
int tc_to_txq_map[MLX5E_MAX_NUM_TC];
/* control */ /* control */
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
...@@ -379,10 +379,9 @@ struct mlx5e_flow_table { ...@@ -379,10 +379,9 @@ struct mlx5e_flow_table {
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
int order_base_2_num_channels;
int queue_mapping_channel_mask;
int num_tc; int num_tc;
int default_vlan_prio; int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
/* priv data path fields - end */ /* priv data path fields - end */
unsigned long state; unsigned long state;
...@@ -460,7 +459,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); ...@@ -460,7 +459,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback); void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
......
...@@ -345,7 +345,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) ...@@ -345,7 +345,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
PAGE_SHIFT); PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
...@@ -496,6 +495,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -496,6 +495,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc = param->sqc; void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int txq_ix;
int err; int err;
err = mlx5_alloc_map_uar(mdev, &sq->uar); err = mlx5_alloc_map_uar(mdev, &sq->uar);
...@@ -515,14 +515,15 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -515,14 +515,15 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err) if (err)
goto err_sq_wq_destroy; goto err_sq_wq_destroy;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix = c->ix + tc * priv->params.num_channels;
c->ix + tc * priv->params.num_channels); sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
sq->pdev = c->pdev; sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be; sq->mkey_be = c->mkey_be;
sq->channel = c; sq->channel = c;
sq->tc = tc; sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
priv->txq_to_sq_map[txq_ix] = sq;
return 0; return 0;
...@@ -852,8 +853,6 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, ...@@ -852,8 +853,6 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
priv->params.tx_cq_moderation_pkts); priv->params.tx_cq_moderation_pkts);
if (err) if (err)
goto err_close_tx_cqs; goto err_close_tx_cqs;
c->sq[tc].cq.sqrq = &c->sq[tc];
} }
return 0; return 0;
...@@ -902,6 +901,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c) ...@@ -902,6 +901,15 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
mlx5e_close_sq(&c->sq[tc]); mlx5e_close_sq(&c->sq[tc]);
} }
static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
int num_channels)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
c->tc_to_txq_map[i] = c->ix + i * num_channels;
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam, struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp) struct mlx5e_channel **cp)
...@@ -923,6 +931,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -923,6 +931,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mr.key); c->mkey_be = cpu_to_be32(priv->mr.key);
c->num_tc = priv->num_tc; c->num_tc = priv->num_tc;
mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_tx_cqs(c, cparam); err = mlx5e_open_tx_cqs(c, cparam);
...@@ -934,7 +944,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -934,7 +944,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
priv->params.rx_cq_moderation_pkts); priv->params.rx_cq_moderation_pkts);
if (err) if (err)
goto err_close_tx_cqs; goto err_close_tx_cqs;
c->rq.cq.sqrq = &c->rq;
napi_enable(&c->napi); napi_enable(&c->napi);
...@@ -1050,14 +1059,18 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, ...@@ -1050,14 +1059,18 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv) static int mlx5e_open_channels(struct mlx5e_priv *priv)
{ {
struct mlx5e_channel_param cparam; struct mlx5e_channel_param cparam;
int err; int err = -ENOMEM;
int i; int i;
int j; int j;
priv->channel = kcalloc(priv->params.num_channels, priv->channel = kcalloc(priv->params.num_channels,
sizeof(struct mlx5e_channel *), GFP_KERNEL); sizeof(struct mlx5e_channel *), GFP_KERNEL);
if (!priv->channel)
return -ENOMEM; priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam); mlx5e_build_channel_param(priv, &cparam);
for (i = 0; i < priv->params.num_channels; i++) { for (i = 0; i < priv->params.num_channels; i++) {
...@@ -1078,6 +1091,8 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv) ...@@ -1078,6 +1091,8 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
mlx5e_close_channel(priv->channel[i]); mlx5e_close_channel(priv->channel[i]);
err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel); kfree(priv->channel);
return err; return err;
...@@ -1090,6 +1105,7 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv) ...@@ -1090,6 +1105,7 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]); mlx5e_close_channel(priv->channel[i]);
kfree(priv->txq_to_sq_map);
kfree(priv->channel); kfree(priv->channel);
} }
...@@ -1384,8 +1400,7 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1384,8 +1400,7 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs; int num_txqs;
int err; int err;
num_txqs = roundup_pow_of_two(priv->params.num_channels) * num_txqs = priv->params.num_channels * priv->params.num_tc;
priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels); netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
...@@ -1693,9 +1708,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -1693,9 +1708,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->mdev = mdev; priv->mdev = mdev;
priv->netdev = netdev; priv->netdev = netdev;
priv->params.num_channels = num_comp_vectors; priv->params.num_channels = num_comp_vectors;
priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
priv->queue_mapping_channel_mask =
roundup_pow_of_two(num_comp_vectors) - 1;
priv->num_tc = priv->params.num_tc; priv->num_tc = priv->params.num_tc;
priv->default_vlan_prio = priv->params.default_vlan_prio; priv->default_vlan_prio = priv->params.default_vlan_prio;
...@@ -1723,7 +1735,6 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -1723,7 +1735,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)
if (priv->num_tc > 1) { if (priv->num_tc > 1) {
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc;
} }
netdev->netdev_ops = &mlx5e_netdev_ops; netdev->netdev_ops = &mlx5e_netdev_ops;
...@@ -1793,9 +1804,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -1793,9 +1804,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
if (mlx5e_check_required_hca_cap(mdev)) if (mlx5e_check_required_hca_cap(mdev))
return NULL; return NULL;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
ncv);
if (!netdev) { if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL; return NULL;
......
...@@ -191,7 +191,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -191,7 +191,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{ {
struct mlx5e_rq *rq = cq->sqrq; struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int i; int i;
/* avoid accessing cq (dma coherent memory) if not needed */ /* avoid accessing cq (dma coherent memory) if not needed */
...@@ -209,10 +209,13 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -209,10 +209,13 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (!cqe) if (!cqe)
break; break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter_be = cqe->wqe_counter; wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be); wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
skb = rq->skb[wqe_counter]; skb = rq->skb[wqe_counter];
prefetch(skb->data);
rq->skb[wqe_counter] = NULL; rq->skb[wqe_counter] = NULL;
dma_unmap_single(rq->pdev, dma_unmap_single(rq->pdev,
......
...@@ -106,7 +106,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -106,7 +106,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
priv->default_vlan_prio; priv->default_vlan_prio;
int tc = netdev_get_prio_tc_map(dev, up); int tc = netdev_get_prio_tc_map(dev, up);
return (tc << priv->order_base_2_num_channels) | channel_ix; return priv->channel[channel_ix]->tc_to_txq_map[tc];
} }
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
...@@ -143,16 +143,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -143,16 +143,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
u32 payload_len; u32 payload_len;
int num_pkts;
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO; opcode = MLX5_OPCODE_LSO;
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload_len = skb->len - ihs; payload_len = skb->len - ihs;
num_pkts = (payload_len / skb_shinfo(skb)->gso_size) +
!!(payload_len % skb_shinfo(skb)->gso_size);
MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
(num_pkts - 1) * ihs; (skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.tso_packets++; sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len; sq->stats.tso_bytes += payload_len;
} else { } else {
...@@ -213,7 +210,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -213,7 +210,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
sq->skb[pi] = skb; sq->skb[pi] = skb;
...@@ -228,8 +224,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -228,8 +224,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++; sq->stats.stopped++;
} }
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe); mlx5e_tx_notify_hw(sq, wqe);
}
/* fill sq edge with nops to avoid wqe wrap around */ /* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge) while ((sq->pc & wq->sz_m1) > sq->edge)
...@@ -250,21 +248,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -250,21 +248,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int ix = skb->queue_mapping; struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
int tc = 0;
struct mlx5e_channel *c = priv->channel[ix];
struct mlx5e_sq *sq = &c->sq[tc];
return mlx5e_sq_xmit(sq, skb);
}
netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
struct mlx5e_channel *c = priv->channel[ix];
struct mlx5e_sq *sq = &c->sq[tc];
return mlx5e_sq_xmit(sq, skb); return mlx5e_sq_xmit(sq, skb);
} }
...@@ -282,7 +266,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -282,7 +266,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
return false; return false;
sq = cq->sqrq; sq = container_of(cq, struct mlx5e_sq, cq);
npkts = 0; npkts = 0;
nbytes = 0; nbytes = 0;
...@@ -297,21 +281,31 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -297,21 +281,31 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
struct sk_buff *skb; u16 wqe_counter;
u16 ci; bool last_wqe;
int j;
cqe = mlx5e_get_cqe(cq); cqe = mlx5e_get_cqe(cq);
if (!cqe) if (!cqe)
break; break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
struct sk_buff *skb;
u16 ci;
int j;
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1; ci = sqcc & sq->wq.sz_m1;
skb = sq->skb[ci]; skb = sq->skb[ci];
if (unlikely(!skb)) { /* nop */ if (unlikely(!skb)) { /* nop */
sq->stats.nop++; sq->stats.nop++;
sqcc++; sqcc++;
goto free_skb; continue;
} }
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
...@@ -320,15 +314,15 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -320,15 +314,15 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
dma_fifo_cc++; dma_fifo_cc++;
dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); dma_unmap_single(sq->pdev, addr, size,
DMA_TO_DEVICE);
} }
npkts++; npkts++;
nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
free_skb:
dev_kfree_skb(skb); dev_kfree_skb(skb);
} while (!last_wqe);
} }
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
......
...@@ -43,8 +43,6 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) ...@@ -43,8 +43,6 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
if (cqe_ownership_bit != sw_ownership_val) if (cqe_ownership_bit != sw_ownership_val)
return NULL; return NULL;
mlx5_cqwq_pop(wq);
/* ensure cqe content is read after cqe ownership bit */ /* ensure cqe content is read after cqe ownership bit */
rmb(); rmb();
...@@ -65,7 +63,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -65,7 +63,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq); busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy) if (busy)
return budget; return budget;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment