Commit 18a4ded9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-09-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-09-03

This series from Tariq includes micro data path optimization for mlx5e
netdevice driver.

Mainly Tariq introduces the following changes to NAPI and RX handling
path of the driver:
 - RX ring structure reorganizing
 - Trivial code refactoring and optimization
 - NAPI busy-poll for when fast UMR is in progress
 - Non-atomic state operations in NAPI context
 - Remove unnecessary fields from fast path structures
 - page-cache micro optimization
 - Rely on NAPI to avoid missing an IRQ for RX/TX shared NAPI contexts
 - Stop NAPI when irq changes affinity
 - Distribute RSS table among all RX rings
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ccfdf21b d4b6c488
......@@ -291,10 +291,11 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_AM,
};
#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))
struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
......@@ -342,7 +343,6 @@ enum {
struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
struct mlx5e_txqsq {
......@@ -418,13 +418,8 @@ struct mlx5e_xdpsq {
struct mlx5e_icosq {
/* data path */
/* dirtied @completion */
u16 cc;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u16 prev_cc;
struct mlx5e_cq cq;
......@@ -438,7 +433,6 @@ struct mlx5e_icosq {
void __iomem *uar_map;
u32 sqn;
u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
......@@ -507,7 +501,7 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
*/
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
......@@ -516,7 +510,7 @@ struct mlx5e_page_cache {
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
struct mlx5e_rq {
......@@ -527,21 +521,26 @@ struct mlx5e_rq {
struct {
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
bool page_reuse;
bool xdp_xmit;
union {
bool page_reuse;
bool xdp_xmit;
};
} wqe;
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
u16 num_strides;
u8 log_stride_sz;
bool umr_in_progress;
} mpwqe;
};
struct {
u16 headroom;
u8 page_order;
u32 wqe_sz; /* wqe data buffer size */
u8 map_dir; /* dma map direction */
} buff;
__be32 mkey_be;
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
......@@ -550,12 +549,11 @@ struct mlx5e_rq {
struct mlx5e_page_cache page_cache;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_post_rx_wqes post_wqes;
mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
......@@ -565,19 +563,13 @@ struct mlx5e_rq {
/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
u8 wq_type;
u32 mpwqe_stride_sz;
u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;
enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1,
};
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
......@@ -589,7 +581,9 @@ struct mlx5e_channel {
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
unsigned long flags;
/* data path - accessed per napi poll */
struct irq_desc *irq_desc;
/* control */
struct mlx5e_priv *priv;
......@@ -850,11 +844,9 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
void mlx5e_rx_am(struct mlx5e_rq *rq);
......@@ -933,8 +925,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
......
......@@ -663,8 +663,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(priv->mdev,
new_channels.params.indirection_rqt,
mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
......@@ -208,6 +208,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats;
......@@ -593,12 +594,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->rx_headroom = params->rq_headroom;
rq->buff.headroom = params->rq_headroom;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
......@@ -615,11 +616,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
byte_count = rq->buff.wqe_sz;
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
......@@ -638,7 +638,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = -ENOMEM;
goto err_rq_wq_destroy;
}
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
......@@ -654,18 +654,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
rq->buff.wqe_sz = params->lro_en ?
byte_count = params->lro_en ?
params->lro_wqe_sz :
MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN;
byte_count += MLX5E_METADATA_ETHER_LEN;
#endif
rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);
......@@ -676,6 +675,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
wqe->data.addr = cpu_to_be64(dma_offset);
}
wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be;
}
......@@ -882,7 +887,8 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
u16 wqe_ix;
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
rq->mpwqe.umr_in_progress)
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
......@@ -925,7 +931,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_destroy_rq;
if (params->rx_am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
return 0;
......@@ -945,7 +951,6 @@ static void mlx5e_activate_rq(struct mlx5e_rq *rq)
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
......@@ -1047,7 +1052,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
int err;
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
......@@ -1757,7 +1761,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
......@@ -1774,6 +1780,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
......@@ -3693,7 +3702,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
/* napi_schedule in case we have missed anything */
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
napi_schedule(&c->napi);
if (old_prog)
......@@ -3825,22 +3833,11 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
int node = mdev->priv.numa_node;
int node_num_of_cores;
int i;
if (node == -1)
node = first_online_node;
node_num_of_cores = cpumask_weight(cpumask_of_node(node));
if (node_num_of_cores)
num_channels = min_t(int, num_channels, node_num_of_cores);
for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
......@@ -3979,7 +3976,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
mlx5e_build_default_indir_rqt(params->indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_channels);
}
......
......@@ -84,6 +84,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_full;
u64 rx_cache_empty;
u64 rx_cache_busy;
u64 rx_cache_waive;
/* Special handling counters */
u64 link_down_events_phy;
......@@ -123,6 +124,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
......@@ -354,6 +356,7 @@ struct mlx5e_rq_stats {
u64 cache_full;
u64 cache_empty;
u64 cache_busy;
u64 cache_waive;
};
static const struct counter_desc rq_stats_desc[] = {
......@@ -377,6 +380,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
};
struct mlx5e_sq_stats {
......
......@@ -394,6 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc;
u32 nbytes;
u16 npkts;
......@@ -402,7 +403,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_txqsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
return false;
npkts = 0;
......@@ -416,15 +421,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
/* avoid dirtying sq cache line every cqe */
dma_fifo_cc = sq->dma_fifo_cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
struct mlx5_cqe64 *cqe;
i = 0;
do {
u16 wqe_counter;
bool last_wqe;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
......@@ -467,7 +468,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sqcc += wi->num_wqebbs;
napi_consume_skb(skb, napi_budget);
} while (!last_wqe);
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
mlx5_cqwq_update_db_record(&cq->wq);
......
......@@ -30,66 +30,18 @@
* SOFTWARE.
*/
#include <linux/irq.h>
#include "en.h"
static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
struct mlx5e_icosq *sq,
struct mlx5_cqe64 *cqe,
u16 *sqcc)
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
struct mlx5e_rq *rq = &sq->channel->rq;
prefetch(rq);
mlx5_cqwq_pop(&cq->wq);
*sqcc += icowi->num_wqebbs;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
cqe->op_own);
return;
}
if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
mlx5e_post_rx_mpwqe(rq);
return;
}
if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
WARN_ONCE(true,
"mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
icowi->opcode);
}
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
u16 sqcc;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
return;
int current_cpu = smp_processor_id();
const struct cpumask *aff;
struct irq_data *idata;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
sqcc = sq->cc;
/* by design, there's only a single cqe */
mlx5e_poll_ico_single_cqe(cq, sq, cqe, &sqcc);
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
wmb();
sq->cc = sqcc;
idata = irq_desc_get_irq_data(c->irq_desc);
aff = irq_data_get_affinity_mask(idata);
return cpumask_test_cpu(current_cpu, aff);
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
......@@ -100,8 +52,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
int work_done;
int i;
clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
......@@ -111,25 +61,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= c->rq.post_wqes(&c->rq);
busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy)
return budget;
napi_complete_done(napi, work_done);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
if (work_done == budget)
work_done--;
}
/* avoid losing completion event during/after polling cqs */
if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
napi_schedule(napi);
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
}
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
mlx5e_rx_am(&c->rq);
mlx5e_cq_arm(&c->rq.cq);
......@@ -143,7 +90,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
cq->event_ctr++;
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
napi_schedule(cq->napi);
}
......
......@@ -709,7 +709,7 @@ static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment