Commit 10ef9ab4 authored by Sathya Perla's avatar Sathya Perla Committed by David S. Miller

be2net: event queue re-design

v2: Fixed up the bad typecasting pointed out by David...

In the current design 8 TXQs are serviced by 1 EQ, while each RSS queue
is serviced by a separate EQ. This is being changed as follows:

- Upto 8 EQs will be used (based on the availabilty of msix vectors).
Each EQ will handle 1 RSS and 1 TX ring. The default non-RSS RX queue and
MCC queue are handled by the last EQ.

- On cards which provide support, upto 8 RSS rings will be used, instead
of the current limit of 4.

The new design allows spreading the TX multi-queue completion processing
across multiple CPUs unlike the previous design.
Signed-off-by: default avatarSathya Perla <sathya.perla@emulex.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 23677ce3
...@@ -81,7 +81,7 @@ static inline char *nic_name(struct pci_dev *pdev) ...@@ -81,7 +81,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256 #define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64 #define BE_NUM_VLANS_SUPPORTED 64
#define BE_MAX_EQD 96 #define BE_MAX_EQD 96u
#define BE_MAX_TX_FRAG_COUNT 30 #define BE_MAX_TX_FRAG_COUNT 30
#define EVNT_Q_LEN 1024 #define EVNT_Q_LEN 1024
...@@ -92,12 +92,16 @@ static inline char *nic_name(struct pci_dev *pdev) ...@@ -92,12 +92,16 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256 #define MCC_CQ_LEN 256
#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */ #define BE3_MAX_RSS_QS 8
#define BE2_MAX_RSS_QS 4
#define MAX_RSS_QS BE3_MAX_RSS_QS
#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */ #define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
#define MAX_TX_QS 8 #define MAX_TX_QS 8
#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */ #define MAX_MSIX_VECTORS MAX_RSS_QS
#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64 #define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define FW_VER_LEN 32 #define FW_VER_LEN 32
...@@ -165,13 +169,16 @@ struct be_eq_obj { ...@@ -165,13 +169,16 @@ struct be_eq_obj {
/* Adaptive interrupt coalescing (AIC) info */ /* Adaptive interrupt coalescing (AIC) info */
bool enable_aic; bool enable_aic;
u16 min_eqd; /* in usecs */ u32 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */ u32 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */ u32 eqd; /* configured val when aic is off */
u8 eq_idx; u32 cur_eqd; /* in usecs */
u8 idx; /* array index */
u16 tx_budget;
struct napi_struct napi; struct napi_struct napi;
}; struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
struct be_mcc_obj { struct be_mcc_obj {
struct be_queue_info q; struct be_queue_info q;
...@@ -197,7 +204,7 @@ struct be_tx_obj { ...@@ -197,7 +204,7 @@ struct be_tx_obj {
/* Remember the skbs that were transmitted */ /* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN]; struct sk_buff *sent_skb_list[TX_Q_LEN];
struct be_tx_stats stats; struct be_tx_stats stats;
}; } ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */ /* Struct to remember the pages posted for rx frags */
struct be_rx_page_info { struct be_rx_page_info {
...@@ -215,8 +222,6 @@ struct be_rx_stats { ...@@ -215,8 +222,6 @@ struct be_rx_stats {
u32 rx_drops_no_skbs; /* skb allocation errors */ u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */ u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */ u32 rx_post_fail; /* page post alloc failures */
u32 rx_polls; /* NAPI calls */
u32 rx_events;
u32 rx_compl; u32 rx_compl;
u32 rx_mcast_pkts; u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */ u32 rx_compl_err; /* completions with err set */
...@@ -249,16 +254,13 @@ struct be_rx_obj { ...@@ -249,16 +254,13 @@ struct be_rx_obj {
struct be_queue_info cq; struct be_queue_info cq;
struct be_rx_compl_info rxcp; struct be_rx_compl_info rxcp;
struct be_rx_page_info page_info_tbl[RX_Q_LEN]; struct be_rx_page_info page_info_tbl[RX_Q_LEN];
struct be_eq_obj rx_eq;
struct be_rx_stats stats; struct be_rx_stats stats;
u8 rss_id; u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */ bool rx_post_starved; /* Zero rx frags have been posted to BE */
u32 cache_line_barrier[16]; } ____cacheline_aligned_in_smp;
};
struct be_drv_stats { struct be_drv_stats {
u32 be_on_die_temperature; u32 be_on_die_temperature;
u32 tx_events;
u32 eth_red_drops; u32 eth_red_drops;
u32 rx_drops_no_pbuf; u32 rx_drops_no_pbuf;
u32 rx_drops_no_txpb; u32 rx_drops_no_txpb;
...@@ -320,20 +322,19 @@ struct be_adapter { ...@@ -320,20 +322,19 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock; spinlock_t mcc_cq_lock;
struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
u32 num_msix_vec; u32 num_msix_vec;
u32 num_evt_qs;
struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered; bool isr_registered;
/* TX Rings */ /* TX Rings */
struct be_eq_obj tx_eq; u32 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS]; struct be_tx_obj tx_obj[MAX_TX_QS];
u8 num_tx_qs;
u32 cache_line_break[8];
/* Rx rings */ /* Rx rings */
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 num_rx_qs; u32 num_rx_qs;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */ u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 eq_next_idx; u8 eq_next_idx;
...@@ -404,24 +405,34 @@ struct be_adapter { ...@@ -404,24 +405,34 @@ struct be_adapter {
extern const struct ethtool_ops be_ethtool_ops; extern const struct ethtool_ops be_ethtool_ops;
#define msix_enabled(adapter) (adapter->num_msix_vec > 0) #define msix_enabled(adapter) (adapter->num_msix_vec > 0)
#define tx_stats(txo) (&txo->stats) #define num_irqs(adapter) (msix_enabled(adapter) ? \
#define rx_stats(rxo) (&rxo->stats) adapter->num_msix_vec : 1)
#define tx_stats(txo) (&(txo)->stats)
#define rx_stats(rxo) (&(rxo)->stats)
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) /* The default RXQ is the last RXQ */
#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1])
#define for_all_rx_queues(adapter, rxo, i) \ #define for_all_rx_queues(adapter, rxo, i) \
for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
i++, rxo++) i++, rxo++)
/* Just skip the first default non-rss queue */ /* Skip the default non-rss queue (last one)*/
#define for_all_rss_queues(adapter, rxo, i) \ #define for_all_rss_queues(adapter, rxo, i) \
for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\ for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
i++, rxo++) i++, rxo++)
#define for_all_tx_queues(adapter, txo, i) \ #define for_all_tx_queues(adapter, txo, i) \
for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
i++, txo++) i++, txo++)
#define for_all_evt_queues(adapter, eqo, i) \
for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
i++, eqo++)
#define is_mcc_eqo(eqo) (eqo->idx == 0)
#define mcc_eqo(adapter) (&adapter->eq_obj[0])
#define PAGE_SHIFT_4K 12 #define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
......
...@@ -235,10 +235,10 @@ void be_async_mcc_disable(struct be_adapter *adapter) ...@@ -235,10 +235,10 @@ void be_async_mcc_disable(struct be_adapter *adapter)
adapter->mcc_obj.rearm_cq = false; adapter->mcc_obj.rearm_cq = false;
} }
int be_process_mcc(struct be_adapter *adapter, int *status) int be_process_mcc(struct be_adapter *adapter)
{ {
struct be_mcc_compl *compl; struct be_mcc_compl *compl;
int num = 0; int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock); spin_lock_bh(&adapter->mcc_cq_lock);
...@@ -252,32 +252,32 @@ int be_process_mcc(struct be_adapter *adapter, int *status) ...@@ -252,32 +252,32 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
be_async_grp5_evt_process(adapter, be_async_grp5_evt_process(adapter,
compl->flags, compl); compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl); status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used); atomic_dec(&mcc_obj->q.used);
} }
be_mcc_compl_use(compl); be_mcc_compl_use(compl);
num++; num++;
} }
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
spin_unlock_bh(&adapter->mcc_cq_lock); spin_unlock_bh(&adapter->mcc_cq_lock);
return num; return status;
} }
/* Wait till no more pending mcc requests are present */ /* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter) static int be_mcc_wait_compl(struct be_adapter *adapter)
{ {
#define mcc_timeout 120000 /* 12s timeout */ #define mcc_timeout 120000 /* 12s timeout */
int i, num, status = 0; int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
for (i = 0; i < mcc_timeout; i++) { for (i = 0; i < mcc_timeout; i++) {
if (be_error(adapter)) if (be_error(adapter))
return -EIO; return -EIO;
num = be_process_mcc(adapter, &status); status = be_process_mcc(adapter);
if (num)
be_cq_notify(adapter, mcc_obj->cq.id,
mcc_obj->rearm_cq, num);
if (atomic_read(&mcc_obj->q.used) == 0) if (atomic_read(&mcc_obj->q.used) == 0)
break; break;
...@@ -726,9 +726,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) ...@@ -726,9 +726,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
} }
/* Uses Mbox */ /* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
struct be_queue_info *cq, struct be_queue_info *eq, struct be_queue_info *eq, bool no_delay, int coalesce_wm)
bool sol_evts, bool no_delay, int coalesce_wm)
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req; struct be_cmd_req_cq_create *req;
...@@ -759,7 +758,6 @@ int be_cmd_cq_create(struct be_adapter *adapter, ...@@ -759,7 +758,6 @@ int be_cmd_cq_create(struct be_adapter *adapter,
ctxt, 1); ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
ctxt, eq->id); ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
} else { } else {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
coalesce_wm); coalesce_wm);
...@@ -768,11 +766,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, ...@@ -768,11 +766,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
__ilog2_u32(cq->len/256)); __ilog2_u32(cq->len/256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, solevent,
ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
} }
be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_dws_cpu_to_le(ctxt, sizeof(req->context));
...@@ -973,7 +968,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, ...@@ -973,7 +968,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
/* Uses MCC */ /* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter, int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, u16 frag_size, struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id) u32 if_id, u32 rss, u8 *rss_id)
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req; struct be_cmd_req_eth_rx_create *req;
...@@ -997,7 +992,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, ...@@ -997,7 +992,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req->num_pages = 2; req->num_pages = 2;
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
req->interface_id = cpu_to_le32(if_id); req->interface_id = cpu_to_le32(if_id);
req->max_frame_size = cpu_to_le16(max_frame_size); req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
req->rss_queue = cpu_to_le32(rss); req->rss_queue = cpu_to_le32(rss);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
......
...@@ -1506,8 +1506,7 @@ extern int be_cmd_eq_create(struct be_adapter *adapter, ...@@ -1506,8 +1506,7 @@ extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay); struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter, extern int be_cmd_cq_create(struct be_adapter *adapter,
struct be_queue_info *cq, struct be_queue_info *eq, struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay, bool no_delay, int num_cqe_dma_coalesce);
int num_cqe_dma_coalesce);
extern int be_cmd_mccq_create(struct be_adapter *adapter, extern int be_cmd_mccq_create(struct be_adapter *adapter,
struct be_queue_info *mccq, struct be_queue_info *mccq,
struct be_queue_info *cq); struct be_queue_info *cq);
...@@ -1516,8 +1515,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter, ...@@ -1516,8 +1515,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter,
struct be_queue_info *cq); struct be_queue_info *cq);
extern int be_cmd_rxq_create(struct be_adapter *adapter, extern int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, struct be_queue_info *rxq, u16 cq_id,
u16 frag_size, u16 max_frame_size, u32 if_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
u32 rss, u8 *rss_id);
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type); int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter, extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
...@@ -1546,7 +1544,7 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, ...@@ -1546,7 +1544,7 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
extern int be_cmd_reset_function(struct be_adapter *adapter); extern int be_cmd_reset_function(struct be_adapter *adapter);
extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
u16 table_size); u16 table_size);
extern int be_process_mcc(struct be_adapter *adapter, int *status); extern int be_process_mcc(struct be_adapter *adapter);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter, extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state); u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter, extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
......
...@@ -37,7 +37,6 @@ enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; ...@@ -37,7 +37,6 @@ enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
FIELDINFO(struct be_drv_stats, field) FIELDINFO(struct be_drv_stats, field)
static const struct be_ethtool_stat et_stats[] = { static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(tx_events)},
{DRVSTAT_INFO(rx_crc_errors)}, {DRVSTAT_INFO(rx_crc_errors)},
{DRVSTAT_INFO(rx_alignment_symbol_errors)}, {DRVSTAT_INFO(rx_alignment_symbol_errors)},
{DRVSTAT_INFO(rx_pause_frames)}, {DRVSTAT_INFO(rx_pause_frames)},
...@@ -126,8 +125,6 @@ static const struct be_ethtool_stat et_stats[] = { ...@@ -126,8 +125,6 @@ static const struct be_ethtool_stat et_stats[] = {
static const struct be_ethtool_stat et_rx_stats[] = { static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_polls)},
{DRVSTAT_RX_INFO(rx_events)},
{DRVSTAT_RX_INFO(rx_compl)}, {DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)}, {DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers /* Number of page allocation failures while posting receive buffers
...@@ -154,7 +151,6 @@ static const struct be_ethtool_stat et_tx_stats[] = { ...@@ -154,7 +151,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_reqs)}, {DRVSTAT_TX_INFO(tx_reqs)},
/* Number of TX work request blocks DMAed to HW */ /* Number of TX work request blocks DMAed to HW */
{DRVSTAT_TX_INFO(tx_wrbs)}, {DRVSTAT_TX_INFO(tx_wrbs)},
{DRVSTAT_TX_INFO(tx_compl)},
/* Number of times the TX queue was stopped due to lack /* Number of times the TX queue was stopped due to lack
* of spaces in the TXQ. * of spaces in the TXQ.
*/ */
...@@ -290,86 +286,42 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) ...@@ -290,86 +286,42 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
} }
} }
static int static int be_get_coalesce(struct net_device *netdev,
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) struct ethtool_coalesce *et)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq; struct be_eq_obj *eqo = &adapter->eq_obj[0];
struct be_eq_obj *tx_eq = &adapter->tx_eq;
coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; et->rx_coalesce_usecs = eqo->cur_eqd;
coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd; et->rx_coalesce_usecs_high = eqo->max_eqd;
coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd; et->rx_coalesce_usecs_low = eqo->min_eqd;
coalesce->tx_coalesce_usecs = tx_eq->cur_eqd; et->tx_coalesce_usecs = eqo->cur_eqd;
coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd; et->tx_coalesce_usecs_high = eqo->max_eqd;
coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd; et->tx_coalesce_usecs_low = eqo->min_eqd;
coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic; et->use_adaptive_rx_coalesce = eqo->enable_aic;
coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic; et->use_adaptive_tx_coalesce = eqo->enable_aic;
return 0; return 0;
} }
/* /* TX attributes are ignored. Only RX attributes are considered
* This routine is used to set interrup coalescing delay * eqd cmd is issued in the worker thread.
*/ */
static int static int be_set_coalesce(struct net_device *netdev,
be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) struct ethtool_coalesce *et)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo; struct be_eq_obj *eqo;
struct be_eq_obj *rx_eq; int i;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
u32 rx_max, rx_min, rx_cur; for_all_evt_queues(adapter, eqo, i) {
int status = 0, i; eqo->enable_aic = et->use_adaptive_rx_coalesce;
u32 tx_cur; eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
if (coalesce->use_adaptive_tx_coalesce == 1) eqo->eqd = et->rx_coalesce_usecs;
return -EINVAL;
for_all_rx_queues(adapter, rxo, i) {
rx_eq = &rxo->rx_eq;
if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
rx_eq->cur_eqd = 0;
rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
rx_max = coalesce->rx_coalesce_usecs_high;
rx_min = coalesce->rx_coalesce_usecs_low;
rx_cur = coalesce->rx_coalesce_usecs;
if (rx_eq->enable_aic) {
if (rx_max > BE_MAX_EQD)
rx_max = BE_MAX_EQD;
if (rx_min > rx_max)
rx_min = rx_max;
rx_eq->max_eqd = rx_max;
rx_eq->min_eqd = rx_min;
if (rx_eq->cur_eqd > rx_max)
rx_eq->cur_eqd = rx_max;
if (rx_eq->cur_eqd < rx_min)
rx_eq->cur_eqd = rx_min;
} else {
if (rx_cur > BE_MAX_EQD)
rx_cur = BE_MAX_EQD;
if (rx_eq->cur_eqd != rx_cur) {
status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
rx_cur);
if (!status)
rx_eq->cur_eqd = rx_cur;
}
}
}
tx_cur = coalesce->tx_coalesce_usecs;
if (tx_cur > BE_MAX_EQD)
tx_cur = BE_MAX_EQD;
if (tx_eq->cur_eqd != tx_cur) {
status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
if (!status)
tx_eq->cur_eqd = tx_cur;
} }
return 0; return 0;
......
...@@ -144,7 +144,7 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, ...@@ -144,7 +144,7 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
GFP_KERNEL); GFP_KERNEL);
if (!mem->va) if (!mem->va)
return -1; return -ENOMEM;
memset(mem->va, 0, mem->size); memset(mem->va, 0, mem->size);
return 0; return 0;
} }
...@@ -996,18 +996,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev, ...@@ -996,18 +996,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return status; return status;
} }
static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo) static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
{ {
struct be_eq_obj *rx_eq = &rxo->rx_eq; struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
struct be_rx_stats *stats = rx_stats(rxo);
ulong now = jiffies; ulong now = jiffies;
ulong delta = now - stats->rx_jiffies; ulong delta = now - stats->rx_jiffies;
u64 pkts; u64 pkts;
unsigned int start, eqd; unsigned int start, eqd;
if (!rx_eq->enable_aic) if (!eqo->enable_aic) {
eqd = eqo->eqd;
goto modify_eqd;
}
if (eqo->idx >= adapter->num_rx_qs)
return; return;
stats = rx_stats(&adapter->rx_obj[eqo->idx]);
/* Wrapped around */ /* Wrapped around */
if (time_before(now, stats->rx_jiffies)) { if (time_before(now, stats->rx_jiffies)) {
stats->rx_jiffies = now; stats->rx_jiffies = now;
...@@ -1026,17 +1032,16 @@ static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo) ...@@ -1026,17 +1032,16 @@ static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ); stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
stats->rx_pkts_prev = pkts; stats->rx_pkts_prev = pkts;
stats->rx_jiffies = now; stats->rx_jiffies = now;
eqd = stats->rx_pps / 110000; eqd = (stats->rx_pps / 110000) << 3;
eqd = eqd << 3; eqd = min(eqd, eqo->max_eqd);
if (eqd > rx_eq->max_eqd) eqd = max(eqd, eqo->min_eqd);
eqd = rx_eq->max_eqd;
if (eqd < rx_eq->min_eqd)
eqd = rx_eq->min_eqd;
if (eqd < 10) if (eqd < 10)
eqd = 0; eqd = 0;
if (eqd != rx_eq->cur_eqd) {
be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd); modify_eqd:
rx_eq->cur_eqd = eqd; if (eqd != eqo->cur_eqd) {
be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
eqo->cur_eqd = eqd;
} }
} }
...@@ -1064,11 +1069,10 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp) ...@@ -1064,11 +1069,10 @@ static inline bool csum_passed(struct be_rx_compl_info *rxcp)
(rxcp->ip_csum || rxcp->ipv6); (rxcp->ip_csum || rxcp->ipv6);
} }
static struct be_rx_page_info * static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
struct be_rx_obj *rxo,
u16 frag_idx)
{ {
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info; struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
...@@ -1087,16 +1091,15 @@ get_rx_page_info(struct be_adapter *adapter, ...@@ -1087,16 +1091,15 @@ get_rx_page_info(struct be_adapter *adapter,
} }
/* Throwaway the data in the Rx completion */ /* Throwaway the data in the Rx completion */
static void be_rx_compl_discard(struct be_adapter *adapter, static void be_rx_compl_discard(struct be_rx_obj *rxo,
struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp)
struct be_rx_compl_info *rxcp)
{ {
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info; struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd; u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) { for (i = 0; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
put_page(page_info->page); put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info)); memset(page_info, 0, sizeof(*page_info));
index_inc(&rxcp->rxq_idx, rxq->len); index_inc(&rxcp->rxq_idx, rxq->len);
...@@ -1107,8 +1110,8 @@ static void be_rx_compl_discard(struct be_adapter *adapter, ...@@ -1107,8 +1110,8 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* skb_fill_rx_data forms a complete skb for an ether frame * skb_fill_rx_data forms a complete skb for an ether frame
* indicated by rxcp. * indicated by rxcp.
*/ */
static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
struct sk_buff *skb, struct be_rx_compl_info *rxcp) struct be_rx_compl_info *rxcp)
{ {
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info; struct be_rx_page_info *page_info;
...@@ -1116,7 +1119,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, ...@@ -1116,7 +1119,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
u16 hdr_len, curr_frag_len, remaining; u16 hdr_len, curr_frag_len, remaining;
u8 *start; u8 *start;
page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
start = page_address(page_info->page) + page_info->page_offset; start = page_address(page_info->page) + page_info->page_offset;
prefetch(start); prefetch(start);
...@@ -1153,7 +1156,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, ...@@ -1153,7 +1156,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
index_inc(&rxcp->rxq_idx, rxq->len); index_inc(&rxcp->rxq_idx, rxq->len);
remaining = rxcp->pkt_size - curr_frag_len; remaining = rxcp->pkt_size - curr_frag_len;
for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size); curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */ /* Coalesce all frags from the same physical page in one slot */
...@@ -1181,21 +1184,21 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, ...@@ -1181,21 +1184,21 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
} }
/* Process the RX completion indicated by rxcp when GRO is disabled */ /* Process the RX completion indicated by rxcp when GRO is disabled */
static void be_rx_compl_process(struct be_adapter *adapter, static void be_rx_compl_process(struct be_rx_obj *rxo,
struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp)
struct be_rx_compl_info *rxcp)
{ {
struct be_adapter *adapter = rxo->adapter;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct sk_buff *skb; struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE); skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_stats(rxo)->rx_drops_no_skbs++; rx_stats(rxo)->rx_drops_no_skbs++;
be_rx_compl_discard(adapter, rxo, rxcp); be_rx_compl_discard(rxo, rxcp);
return; return;
} }
skb_fill_rx_data(adapter, rxo, skb, rxcp); skb_fill_rx_data(rxo, skb, rxcp);
if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp))) if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
...@@ -1203,7 +1206,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, ...@@ -1203,7 +1206,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (adapter->netdev->features & NETIF_F_RXHASH) if (netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash; skb->rxhash = rxcp->rss_hash;
...@@ -1214,26 +1217,25 @@ static void be_rx_compl_process(struct be_adapter *adapter, ...@@ -1214,26 +1217,25 @@ static void be_rx_compl_process(struct be_adapter *adapter,
} }
/* Process the RX completion indicated by rxcp when GRO is enabled */ /* Process the RX completion indicated by rxcp when GRO is enabled */
static void be_rx_compl_process_gro(struct be_adapter *adapter, void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp)
struct be_rx_compl_info *rxcp)
{ {
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info; struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
struct be_eq_obj *eq_obj = &rxo->rx_eq;
u16 remaining, curr_frag_len; u16 remaining, curr_frag_len;
u16 i, j; u16 i, j;
skb = napi_get_frags(&eq_obj->napi); skb = napi_get_frags(napi);
if (!skb) { if (!skb) {
be_rx_compl_discard(adapter, rxo, rxcp); be_rx_compl_discard(rxo, rxcp);
return; return;
} }
remaining = rxcp->pkt_size; remaining = rxcp->pkt_size;
for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size); curr_frag_len = min(remaining, rx_frag_size);
...@@ -1266,12 +1268,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, ...@@ -1266,12 +1268,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if (rxcp->vlanf) if (rxcp->vlanf)
__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag); __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
napi_gro_frags(&eq_obj->napi); napi_gro_frags(napi);
} }
static void be_parse_rx_compl_v1(struct be_adapter *adapter, static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp)
struct be_rx_compl_info *rxcp)
{ {
rxcp->pkt_size = rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
...@@ -1302,9 +1303,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, ...@@ -1302,9 +1303,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
} }
static void be_parse_rx_compl_v0(struct be_adapter *adapter, static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp)
struct be_rx_compl_info *rxcp)
{ {
rxcp->pkt_size = rxcp->pkt_size =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
...@@ -1350,9 +1350,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) ...@@ -1350,9 +1350,9 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
be_dws_le_to_cpu(compl, sizeof(*compl)); be_dws_le_to_cpu(compl, sizeof(*compl));
if (adapter->be3_native) if (adapter->be3_native)
be_parse_rx_compl_v1(adapter, compl, rxcp); be_parse_rx_compl_v1(compl, rxcp);
else else
be_parse_rx_compl_v0(adapter, compl, rxcp); be_parse_rx_compl_v0(compl, rxcp);
if (rxcp->vlanf) { if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards. /* vlanf could be wrongly set in some cards.
...@@ -1391,7 +1391,6 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) ...@@ -1391,7 +1391,6 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
{ {
struct be_adapter *adapter = rxo->adapter; struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
struct page *pagep = NULL; struct page *pagep = NULL;
...@@ -1433,7 +1432,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) ...@@ -1433,7 +1432,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
prev_page_info = page_info; prev_page_info = page_info;
queue_head_inc(rxq); queue_head_inc(rxq);
page_info = &page_info_tbl[rxq->head]; page_info = &rxo->page_info_tbl[rxq->head];
} }
if (pagep) if (pagep)
prev_page_info->last_page_user = true; prev_page_info->last_page_user = true;
...@@ -1495,62 +1494,51 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, ...@@ -1495,62 +1494,51 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
return num_wrbs; return num_wrbs;
} }
static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) /* Return the number of events in the event queue */
static inline int events_get(struct be_eq_obj *eqo)
{ {
struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q); struct be_eq_entry *eqe;
int num = 0;
if (!eqe->evt) do {
return NULL; eqe = queue_tail_node(&eqo->q);
if (eqe->evt == 0)
break;
rmb(); rmb();
eqe->evt = le32_to_cpu(eqe->evt); eqe->evt = 0;
queue_tail_inc(&eq_obj->q); num++;
return eqe; queue_tail_inc(&eqo->q);
} while (true);
return num;
} }
static int event_handle(struct be_adapter *adapter, static int event_handle(struct be_eq_obj *eqo)
struct be_eq_obj *eq_obj,
bool rearm)
{ {
struct be_eq_entry *eqe; bool rearm = false;
u16 num = 0; int num = events_get(eqo);
while ((eqe = event_get(eq_obj)) != NULL) {
eqe->evt = 0;
num++;
}
/* Deal with any spurious interrupts that come /* Deal with any spurious interrupts that come without events */
* without events
*/
if (!num) if (!num)
rearm = true; rearm = true;
be_eq_notify(adapter, eq_obj->q.id, rearm, true, num); be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
if (num) if (num)
napi_schedule(&eq_obj->napi); napi_schedule(&eqo->napi);
return num; return num;
} }
/* Just read and notify events without processing them. /* Leaves the EQ is disarmed state */
* Used at the time of destroying event queues */ static void be_eq_clean(struct be_eq_obj *eqo)
static void be_eq_clean(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{ {
struct be_eq_entry *eqe; int num = events_get(eqo);
u16 num = 0;
while ((eqe = event_get(eq_obj)) != NULL) {
eqe->evt = 0;
num++;
}
if (num) be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
be_eq_notify(adapter, eq_obj->q.id, false, true, num);
} }
static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) static void be_rx_cq_clean(struct be_rx_obj *rxo)
{ {
struct be_rx_page_info *page_info; struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rxq = &rxo->q;
...@@ -1560,14 +1548,14 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) ...@@ -1560,14 +1548,14 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
/* First cleanup pending rx completions */ /* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(rxo)) != NULL) { while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
be_rx_compl_discard(adapter, rxo, rxcp); be_rx_compl_discard(rxo, rxcp);
be_cq_notify(adapter, rx_cq->id, false, 1); be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
} }
/* Then free posted rx buffer that were not used */ /* Then free posted rx buffer that were not used */
tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
page_info = get_rx_page_info(adapter, rxo, tail); page_info = get_rx_page_info(rxo, tail);
put_page(page_info->page); put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info)); memset(page_info, 0, sizeof(*page_info));
} }
...@@ -1623,6 +1611,47 @@ static void be_tx_compl_clean(struct be_adapter *adapter, ...@@ -1623,6 +1611,47 @@ static void be_tx_compl_clean(struct be_adapter *adapter,
} }
} }
static void be_evt_queues_destroy(struct be_adapter *adapter)
{
struct be_eq_obj *eqo;
int i;
for_all_evt_queues(adapter, eqo, i) {
be_eq_clean(eqo);
if (eqo->q.created)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
be_queue_free(adapter, &eqo->q);
}
}
static int be_evt_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq;
struct be_eq_obj *eqo;
int i, rc;
adapter->num_evt_qs = num_irqs(adapter);
for_all_evt_queues(adapter, eqo, i) {
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
eqo->max_eqd = BE_MAX_EQD;
eqo->enable_aic = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
sizeof(struct be_eq_entry));
if (rc)
return rc;
rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
if (rc)
return rc;
}
return rc;
}
static void be_mcc_queues_destroy(struct be_adapter *adapter) static void be_mcc_queues_destroy(struct be_adapter *adapter)
{ {
struct be_queue_info *q; struct be_queue_info *q;
...@@ -1643,22 +1672,19 @@ static int be_mcc_queues_create(struct be_adapter *adapter) ...@@ -1643,22 +1672,19 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
{ {
struct be_queue_info *q, *cq; struct be_queue_info *q, *cq;
/* Alloc MCC compl queue */
cq = &adapter->mcc_obj.cq; cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
sizeof(struct be_mcc_compl))) sizeof(struct be_mcc_compl)))
goto err; goto err;
/* Ask BE to create MCC compl queue; share TX's eq */ /* Use the default EQ for MCC completions */
if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0)) if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
goto mcc_cq_free; goto mcc_cq_free;
/* Alloc MCC queue */
q = &adapter->mcc_obj.q; q = &adapter->mcc_obj.q;
if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
goto mcc_cq_destroy; goto mcc_cq_destroy;
/* Ask BE to create MCC queue */
if (be_cmd_mccq_create(adapter, q, cq)) if (be_cmd_mccq_create(adapter, q, cq))
goto mcc_q_free; goto mcc_q_free;
...@@ -1691,14 +1717,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter) ...@@ -1691,14 +1717,6 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q); be_queue_free(adapter, q);
} }
/* Clear any residual events */
be_eq_clean(adapter, &adapter->tx_eq);
q = &adapter->tx_eq.q;
if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_EQ);
be_queue_free(adapter, q);
} }
static int be_num_txqs_want(struct be_adapter *adapter) static int be_num_txqs_want(struct be_adapter *adapter)
...@@ -1711,10 +1729,10 @@ static int be_num_txqs_want(struct be_adapter *adapter) ...@@ -1711,10 +1729,10 @@ static int be_num_txqs_want(struct be_adapter *adapter)
return MAX_TX_QS; return MAX_TX_QS;
} }
/* One TX event queue is shared by all TX compl qs */ static int be_tx_cqs_create(struct be_adapter *adapter)
static int be_tx_queues_create(struct be_adapter *adapter)
{ {
struct be_queue_info *eq, *q, *cq; struct be_queue_info *cq, *eq;
int status;
struct be_tx_obj *txo; struct be_tx_obj *txo;
u8 i; u8 i;
...@@ -1726,192 +1744,109 @@ static int be_tx_queues_create(struct be_adapter *adapter) ...@@ -1726,192 +1744,109 @@ static int be_tx_queues_create(struct be_adapter *adapter)
rtnl_unlock(); rtnl_unlock();
} }
adapter->tx_eq.max_eqd = 0; for_all_tx_queues(adapter, txo, i) {
adapter->tx_eq.min_eqd = 0; cq = &txo->cq;
adapter->tx_eq.cur_eqd = 96; status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
adapter->tx_eq.enable_aic = false; sizeof(struct be_eth_tx_compl));
if (status)
return status;
eq = &adapter->tx_eq.q; /* If num_evt_qs is less than num_tx_qs, then more than
if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, * one txq share an eq
sizeof(struct be_eq_entry))) */
return -1; eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
status = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (status)
return status;
}
return 0;
}
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) static int be_tx_qs_create(struct be_adapter *adapter)
goto err; {
adapter->tx_eq.eq_idx = adapter->eq_next_idx++; struct be_tx_obj *txo;
int i, status;
for_all_tx_queues(adapter, txo, i) { for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq; status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
if (be_queue_alloc(adapter, cq, TX_CQ_LEN, sizeof(struct be_eth_wrb));
sizeof(struct be_eth_tx_compl))) if (status)
goto err; return status;
if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
goto err;
q = &txo->q; status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
if (be_queue_alloc(adapter, q, TX_Q_LEN, if (status)
sizeof(struct be_eth_wrb))) return status;
goto err;
} }
return 0;
err: return 0;
be_tx_queues_destroy(adapter);
return -1;
} }
static void be_rx_queues_destroy(struct be_adapter *adapter) static void be_rx_cqs_destroy(struct be_adapter *adapter)
{ {
struct be_queue_info *q; struct be_queue_info *q;
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
int i; int i;
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
be_queue_free(adapter, &rxo->q);
q = &rxo->cq; q = &rxo->cq;
if (q->created) if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q); be_queue_free(adapter, q);
q = &rxo->rx_eq.q;
if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_EQ);
be_queue_free(adapter, q);
}
}
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_enabled(adapter) && be_physfn(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
"No support for multiple RX queues\n");
return 1;
} }
} }
static int be_rx_queues_create(struct be_adapter *adapter) static int be_rx_cqs_create(struct be_adapter *adapter)
{ {
struct be_queue_info *eq, *q, *cq; struct be_queue_info *eq, *cq;
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
int rc, i; int rc, i;
adapter->num_rx_qs = min(be_num_rxqs_want(adapter), /* We'll create as many RSS rings as there are irqs.
msix_enabled(adapter) ? * But when there's only one irq there's no use creating RSS rings
adapter->num_msix_vec - 1 : 1); */
if (adapter->num_rx_qs != MAX_RX_QS) adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
dev_warn(&adapter->pdev->dev, num_irqs(adapter) + 1 : 1;
"Can create only %d RX queues", adapter->num_rx_qs);
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter; rxo->adapter = adapter;
rxo->rx_eq.max_eqd = BE_MAX_EQD;
rxo->rx_eq.enable_aic = true;
/* EQ */
eq = &rxo->rx_eq.q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
sizeof(struct be_eq_entry));
if (rc)
goto err;
rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
if (rc)
goto err;
rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */
cq = &rxo->cq; cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
sizeof(struct be_eth_rx_compl)); sizeof(struct be_eth_rx_compl));
if (rc) if (rc)
goto err; return rc;
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (rc) if (rc)
goto err; return rc;
/* Rx Q - will be created in be_open() */
q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN,
sizeof(struct be_eth_rx_d));
if (rc)
goto err;
} }
return 0; if (adapter->num_rx_qs != MAX_RX_QS)
err: dev_info(&adapter->pdev->dev,
be_rx_queues_destroy(adapter); "Created only %d receive queues", adapter->num_rx_qs);
return -1;
}
static bool event_peek(struct be_eq_obj *eq_obj) return 0;
{
struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
if (!eqe->evt)
return false;
else
return true;
} }
static irqreturn_t be_intx(int irq, void *dev) static irqreturn_t be_intx(int irq, void *dev)
{ {
struct be_adapter *adapter = dev; struct be_adapter *adapter = dev;
struct be_rx_obj *rxo; int num_evts;
int isr, i, tx = 0 , rx = 0;
if (lancer_chip(adapter)) {
if (event_peek(&adapter->tx_eq))
tx = event_handle(adapter, &adapter->tx_eq, false);
for_all_rx_queues(adapter, rxo, i) {
if (event_peek(&rxo->rx_eq))
rx |= event_handle(adapter, &rxo->rx_eq, true);
}
if (!(tx || rx))
return IRQ_NONE;
} else {
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
(adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq, false);
for_all_rx_queues(adapter, rxo, i) {
if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq, true);
}
}
return IRQ_HANDLED;
}
static irqreturn_t be_msix_rx(int irq, void *dev)
{
struct be_rx_obj *rxo = dev;
struct be_adapter *adapter = rxo->adapter;
event_handle(adapter, &rxo->rx_eq, true); /* With INTx only one EQ is used */
num_evts = event_handle(&adapter->eq_obj[0]);
return IRQ_HANDLED; if (num_evts)
return IRQ_HANDLED;
else
return IRQ_NONE;
} }
static irqreturn_t be_msix_tx_mcc(int irq, void *dev) static irqreturn_t be_msix(int irq, void *dev)
{ {
struct be_adapter *adapter = dev; struct be_eq_obj *eqo = dev;
event_handle(adapter, &adapter->tx_eq, false);
event_handle(eqo);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1920,16 +1855,14 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp) ...@@ -1920,16 +1855,14 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
return (rxcp->tcpf && !rxcp->err) ? true : false; return (rxcp->tcpf && !rxcp->err) ? true : false;
} }
static int be_poll_rx(struct napi_struct *napi, int budget) static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
int budget)
{ {
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
struct be_adapter *adapter = rxo->adapter; struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq; struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp; struct be_rx_compl_info *rxcp;
u32 work_done; u32 work_done;
rx_stats(rxo)->rx_polls++;
for (work_done = 0; work_done < budget; work_done++) { for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo); rxcp = be_rx_compl_get(rxo);
if (!rxcp) if (!rxcp)
...@@ -1941,7 +1874,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget) ...@@ -1941,7 +1874,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
/* Discard compl with partial DMA Lancer B0 */ /* Discard compl with partial DMA Lancer B0 */
if (unlikely(!rxcp->pkt_size)) { if (unlikely(!rxcp->pkt_size)) {
be_rx_compl_discard(adapter, rxo, rxcp); be_rx_compl_discard(rxo, rxcp);
goto loop_continue; goto loop_continue;
} }
...@@ -1950,94 +1883,96 @@ static int be_poll_rx(struct napi_struct *napi, int budget) ...@@ -1950,94 +1883,96 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
*/ */
if (unlikely(rxcp->port != adapter->port_num && if (unlikely(rxcp->port != adapter->port_num &&
!lancer_chip(adapter))) { !lancer_chip(adapter))) {
be_rx_compl_discard(adapter, rxo, rxcp); be_rx_compl_discard(rxo, rxcp);
goto loop_continue; goto loop_continue;
} }
if (do_gro(rxcp)) if (do_gro(rxcp))
be_rx_compl_process_gro(adapter, rxo, rxcp); be_rx_compl_process_gro(rxo, napi, rxcp);
else else
be_rx_compl_process(adapter, rxo, rxcp); be_rx_compl_process(rxo, rxcp);
loop_continue: loop_continue:
be_rx_stats_update(rxo, rxcp); be_rx_stats_update(rxo, rxcp);
} }
be_cq_notify(adapter, rx_cq->id, false, work_done); if (work_done) {
be_cq_notify(adapter, rx_cq->id, true, work_done);
/* Refill the queue */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) be_post_rx_frags(rxo, GFP_ATOMIC);
be_post_rx_frags(rxo, GFP_ATOMIC);
/* All consumed */
if (work_done < budget) {
napi_complete(napi);
/* Arm CQ */
be_cq_notify(adapter, rx_cq->id, true, 0);
} }
return work_done; return work_done;
} }
/* As TX and MCC share the same EQ check for both TX and MCC completions. static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
* For TX/MCC we don't honour budget; consume everything int budget, int idx)
*/
static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
{ {
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp; struct be_eth_tx_compl *txcp;
int tx_compl, mcc_compl, status = 0; int num_wrbs = 0, work_done;
u8 i;
u16 num_wrbs;
for_all_tx_queues(adapter, txo, i) { for (work_done = 0; work_done < budget; work_done++) {
tx_compl = 0; txcp = be_tx_compl_get(&txo->cq);
num_wrbs = 0; if (!txcp)
while ((txcp = be_tx_compl_get(&txo->cq))) { break;
num_wrbs += be_tx_compl_process(adapter, txo, num_wrbs += be_tx_compl_process(adapter, txo,
AMAP_GET_BITS(struct amap_eth_tx_compl, AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp)); wrb_index, txcp));
tx_compl++; }
}
if (tx_compl) {
be_cq_notify(adapter, txo->cq.id, true, tx_compl);
atomic_sub(num_wrbs, &txo->q.used);
/* As Tx wrbs have been freed up, wake up netdev queue if (work_done) {
* if it was stopped due to lack of tx wrbs. */ be_cq_notify(adapter, txo->cq.id, true, work_done);
if (__netif_subqueue_stopped(adapter->netdev, i) && atomic_sub(num_wrbs, &txo->q.used);
atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, i);
}
u64_stats_update_begin(&tx_stats(txo)->sync_compl); /* As Tx wrbs have been freed up, wake up netdev queue
tx_stats(txo)->tx_compl += tx_compl; * if it was stopped due to lack of tx wrbs. */
u64_stats_update_end(&tx_stats(txo)->sync_compl); if (__netif_subqueue_stopped(adapter->netdev, idx) &&
atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
} }
u64_stats_update_begin(&tx_stats(txo)->sync_compl);
tx_stats(txo)->tx_compl += work_done;
u64_stats_update_end(&tx_stats(txo)->sync_compl);
} }
return (work_done < budget); /* Done */
}
mcc_compl = be_process_mcc(adapter, &status); int be_poll(struct napi_struct *napi, int budget)
{
struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter = eqo->adapter;
int max_work = 0, work, i;
bool tx_done;
if (mcc_compl) { /* Process all TXQs serviced by this EQ */
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl); for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
eqo->tx_budget, i);
if (!tx_done)
max_work = budget;
} }
napi_complete(napi); /* This loop will iterate twice for EQ0 in which
* completions of the last RXQ (default one) are also processed
* For other EQs the loop iterates only once
*/
for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
work = be_process_rx(&adapter->rx_obj[i], napi, budget);
max_work = max(work, max_work);
}
/* Arm CQ again to regenerate EQEs for Lancer in INTx mode */ if (is_mcc_eqo(eqo))
if (lancer_chip(adapter) && !msix_enabled(adapter)) { be_process_mcc(adapter);
for_all_tx_queues(adapter, txo, i)
be_cq_notify(adapter, txo->cq.id, true, 0);
be_cq_notify(adapter, mcc_obj->cq.id, true, 0); if (max_work < budget) {
napi_complete(napi);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
} else {
/* As we'll continue in polling mode, count and clear events */
be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
} }
return max_work;
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
adapter->drv_stats.tx_events++;
return 1;
} }
void be_detect_dump_ue(struct be_adapter *adapter) void be_detect_dump_ue(struct be_adapter *adapter)
...@@ -2112,12 +2047,24 @@ static void be_msix_disable(struct be_adapter *adapter) ...@@ -2112,12 +2047,24 @@ static void be_msix_disable(struct be_adapter *adapter)
} }
} }
static uint be_num_rss_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
adapter->num_vfs == 0 && be_physfn(adapter) &&
!be_is_mc(adapter))
return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
else
return 0;
}
static void be_msix_enable(struct be_adapter *adapter) static void be_msix_enable(struct be_adapter *adapter)
{ {
#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ #define BE_MIN_MSIX_VECTORS 1
int i, status, num_vec; int i, status, num_vec;
num_vec = be_num_rxqs_want(adapter) + 1; /* If RSS queues are not used, need a vec for default RX Q */
num_vec = min(be_num_rss_want(adapter), num_online_cpus());
num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
for (i = 0; i < num_vec; i++) for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i; adapter->msix_entries[i].entry = i;
...@@ -2185,60 +2132,31 @@ static void be_sriov_disable(struct be_adapter *adapter) ...@@ -2185,60 +2132,31 @@ static void be_sriov_disable(struct be_adapter *adapter)
} }
static inline int be_msix_vec_get(struct be_adapter *adapter, static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj) struct be_eq_obj *eqo)
{
return adapter->msix_entries[eq_obj->eq_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
struct be_eq_obj *eq_obj,
void *handler, char *desc, void *context)
{
struct net_device *netdev = adapter->netdev;
int vec;
sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
vec = be_msix_vec_get(adapter, eq_obj);
return request_irq(vec, handler, 0, eq_obj->desc, context);
}
static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
void *context)
{ {
int vec = be_msix_vec_get(adapter, eq_obj); return adapter->msix_entries[eqo->idx].vector;
free_irq(vec, context);
} }
static int be_msix_register(struct be_adapter *adapter) static int be_msix_register(struct be_adapter *adapter)
{ {
struct be_rx_obj *rxo; struct net_device *netdev = adapter->netdev;
int status, i; struct be_eq_obj *eqo;
char qname[10]; int status, i, vec;
status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
adapter);
if (status)
goto err;
for_all_rx_queues(adapter, rxo, i) { for_all_evt_queues(adapter, eqo, i) {
sprintf(qname, "rxq%d", i); sprintf(eqo->desc, "%s-q%d", netdev->name, i);
status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx, vec = be_msix_vec_get(adapter, eqo);
qname, rxo); status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status) if (status)
goto err_msix; goto err_msix;
} }
return 0; return 0;
err_msix: err_msix:
be_free_irq(adapter, &adapter->tx_eq, adapter); for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--) dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
be_free_irq(adapter, &rxo->rx_eq, rxo); status);
err:
dev_warn(&adapter->pdev->dev,
"MSIX Request IRQ failed - err %d\n", status);
be_msix_disable(adapter); be_msix_disable(adapter);
return status; return status;
} }
...@@ -2274,7 +2192,7 @@ static int be_irq_register(struct be_adapter *adapter) ...@@ -2274,7 +2192,7 @@ static int be_irq_register(struct be_adapter *adapter)
static void be_irq_unregister(struct be_adapter *adapter) static void be_irq_unregister(struct be_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct be_rx_obj *rxo; struct be_eq_obj *eqo;
int i; int i;
if (!adapter->isr_registered) if (!adapter->isr_registered)
...@@ -2287,16 +2205,14 @@ static void be_irq_unregister(struct be_adapter *adapter) ...@@ -2287,16 +2205,14 @@ static void be_irq_unregister(struct be_adapter *adapter)
} }
/* MSIx */ /* MSIx */
be_free_irq(adapter, &adapter->tx_eq, adapter); for_all_evt_queues(adapter, eqo, i)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
for_all_rx_queues(adapter, rxo, i)
be_free_irq(adapter, &rxo->rx_eq, rxo);
done: done:
adapter->isr_registered = false; adapter->isr_registered = false;
} }
static void be_rx_queues_clear(struct be_adapter *adapter) static void be_rx_qs_destroy(struct be_adapter *adapter)
{ {
struct be_queue_info *q; struct be_queue_info *q;
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
...@@ -2311,53 +2227,33 @@ static void be_rx_queues_clear(struct be_adapter *adapter) ...@@ -2311,53 +2227,33 @@ static void be_rx_queues_clear(struct be_adapter *adapter)
* arrive * arrive
*/ */
mdelay(1); mdelay(1);
be_rx_q_clean(adapter, rxo); be_rx_cq_clean(rxo);
} }
be_queue_free(adapter, q);
/* Clear any residual events */
q = &rxo->rx_eq.q;
if (q->created)
be_eq_clean(adapter, &rxo->rx_eq);
} }
} }
static int be_close(struct net_device *netdev) static int be_close(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
struct be_tx_obj *txo; struct be_tx_obj *txo;
struct be_eq_obj *tx_eq = &adapter->tx_eq; struct be_eq_obj *eqo;
int vec, i; int i;
be_async_mcc_disable(adapter); be_async_mcc_disable(adapter);
if (!lancer_chip(adapter)) if (!lancer_chip(adapter))
be_intr_set(adapter, false); be_intr_set(adapter, false);
for_all_rx_queues(adapter, rxo, i) for_all_evt_queues(adapter, eqo, i) {
napi_disable(&rxo->rx_eq.napi); napi_disable(&eqo->napi);
if (msix_enabled(adapter))
napi_disable(&tx_eq->napi); synchronize_irq(be_msix_vec_get(adapter, eqo));
else
if (lancer_chip(adapter)) { synchronize_irq(netdev->irq);
be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); be_eq_clean(eqo);
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, false, 0);
for_all_tx_queues(adapter, txo, i)
be_cq_notify(adapter, txo->cq.id, false, 0);
} }
if (msix_enabled(adapter)) {
vec = be_msix_vec_get(adapter, tx_eq);
synchronize_irq(vec);
for_all_rx_queues(adapter, rxo, i) {
vec = be_msix_vec_get(adapter, &rxo->rx_eq);
synchronize_irq(vec);
}
} else {
synchronize_irq(netdev->irq);
}
be_irq_unregister(adapter); be_irq_unregister(adapter);
/* Wait for all pending tx completions to arrive so that /* Wait for all pending tx completions to arrive so that
...@@ -2366,21 +2262,34 @@ static int be_close(struct net_device *netdev) ...@@ -2366,21 +2262,34 @@ static int be_close(struct net_device *netdev)
for_all_tx_queues(adapter, txo, i) for_all_tx_queues(adapter, txo, i)
be_tx_compl_clean(adapter, txo); be_tx_compl_clean(adapter, txo);
be_rx_queues_clear(adapter); be_rx_qs_destroy(adapter);
return 0; return 0;
} }
static int be_rx_queues_setup(struct be_adapter *adapter) static int be_rx_qs_create(struct be_adapter *adapter)
{ {
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
int rc, i, j; int rc, i, j;
u8 rsstable[128]; u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
sizeof(struct be_eth_rx_d));
if (rc)
return rc;
}
/* The FW would like the default RXQ to be created first */
rxo = default_rxo(adapter);
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
adapter->if_handle, false, &rxo->rss_id);
if (rc)
return rc;
for_all_rss_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE, rx_frag_size, adapter->if_handle,
adapter->if_handle, true, &rxo->rss_id);
(i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
if (rc) if (rc)
return rc; return rc;
} }
...@@ -2394,48 +2303,47 @@ static int be_rx_queues_setup(struct be_adapter *adapter) ...@@ -2394,48 +2303,47 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
} }
} }
rc = be_cmd_rss_config(adapter, rsstable, 128); rc = be_cmd_rss_config(adapter, rsstable, 128);
if (rc) if (rc)
return rc; return rc;
} }
/* First time posting */ /* First time posting */
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL); be_post_rx_frags(rxo, GFP_KERNEL);
napi_enable(&rxo->rx_eq.napi);
}
return 0; return 0;
} }
static int be_open(struct net_device *netdev) static int be_open(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq; struct be_eq_obj *eqo;
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
struct be_tx_obj *txo;
u8 link_status; u8 link_status;
int status, i; int status, i;
status = be_rx_queues_setup(adapter); status = be_rx_qs_create(adapter);
if (status) if (status)
goto err; goto err;
napi_enable(&tx_eq->napi);
be_irq_register(adapter); be_irq_register(adapter);
if (!lancer_chip(adapter)) if (!lancer_chip(adapter))
be_intr_set(adapter, true); be_intr_set(adapter, true);
/* The evt queues are created in unarmed state; arm them */ for_all_rx_queues(adapter, rxo, i)
for_all_rx_queues(adapter, rxo, i) {
be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
be_cq_notify(adapter, rxo->cq.id, true, 0); be_cq_notify(adapter, rxo->cq.id, true, 0);
}
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
/* Now that interrupts are on we can process async mcc */ for_all_tx_queues(adapter, txo, i)
be_cq_notify(adapter, txo->cq.id, true, 0);
be_async_mcc_enable(adapter); be_async_mcc_enable(adapter);
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
status = be_cmd_link_status_query(adapter, NULL, NULL, status = be_cmd_link_status_query(adapter, NULL, NULL,
&link_status, 0); &link_status, 0);
if (!status) if (!status)
...@@ -2545,11 +2453,14 @@ static int be_clear(struct be_adapter *adapter) ...@@ -2545,11 +2453,14 @@ static int be_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_mcc_queues_destroy(adapter); be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter); be_rx_cqs_destroy(adapter);
be_tx_queues_destroy(adapter); be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
/* tell fw we're done with firing cmds */ /* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter); be_cmd_fw_clean(adapter);
be_msix_disable(adapter);
return 0; return 0;
} }
...@@ -2639,24 +2550,29 @@ static int be_setup(struct be_adapter *adapter) ...@@ -2639,24 +2550,29 @@ static int be_setup(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags; u32 cap_flags, en_flags;
u32 tx_fc, rx_fc; u32 tx_fc, rx_fc;
int status, i; int status;
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
struct be_tx_obj *txo;
be_setup_init(adapter); be_setup_init(adapter);
be_cmd_req_native_mode(adapter); be_cmd_req_native_mode(adapter);
status = be_tx_queues_create(adapter); be_msix_enable(adapter);
if (status != 0)
status = be_evt_queues_create(adapter);
if (status)
goto err; goto err;
status = be_rx_queues_create(adapter); status = be_tx_cqs_create(adapter);
if (status != 0) if (status)
goto err;
status = be_rx_cqs_create(adapter);
if (status)
goto err; goto err;
status = be_mcc_queues_create(adapter); status = be_mcc_queues_create(adapter);
if (status != 0) if (status)
goto err; goto err;
memset(mac, 0, ETH_ALEN); memset(mac, 0, ETH_ALEN);
...@@ -2682,12 +2598,6 @@ static int be_setup(struct be_adapter *adapter) ...@@ -2682,12 +2598,6 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0) if (status != 0)
goto err; goto err;
for_all_tx_queues(adapter, txo, i) {
status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
if (status)
goto err;
}
/* The VF's permanent mac queried from card is incorrect. /* The VF's permanent mac queried from card is incorrect.
* For BEx: Query the mac configued by the PF using if_handle * For BEx: Query the mac configued by the PF using if_handle
* For Lancer: Get and use mac_list to obtain mac address. * For Lancer: Get and use mac_list to obtain mac address.
...@@ -2705,6 +2615,10 @@ static int be_setup(struct be_adapter *adapter) ...@@ -2705,6 +2615,10 @@ static int be_setup(struct be_adapter *adapter)
} }
} }
status = be_tx_qs_create(adapter);
if (status)
goto err;
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
status = be_vid_config(adapter, false, 0); status = be_vid_config(adapter, false, 0);
...@@ -2744,12 +2658,13 @@ static int be_setup(struct be_adapter *adapter) ...@@ -2744,12 +2658,13 @@ static int be_setup(struct be_adapter *adapter)
static void be_netpoll(struct net_device *netdev) static void be_netpoll(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo; struct be_eq_obj *eqo;
int i; int i;
event_handle(adapter, &adapter->tx_eq, false); for_all_evt_queues(adapter, eqo, i)
for_all_rx_queues(adapter, rxo, i) event_handle(eqo);
event_handle(adapter, &rxo->rx_eq, true);
return;
} }
#endif #endif
...@@ -3110,7 +3025,7 @@ static const struct net_device_ops be_netdev_ops = { ...@@ -3110,7 +3025,7 @@ static const struct net_device_ops be_netdev_ops = {
static void be_netdev_init(struct net_device *netdev) static void be_netdev_init(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo; struct be_eq_obj *eqo;
int i; int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
...@@ -3129,16 +3044,12 @@ static void be_netdev_init(struct net_device *netdev) ...@@ -3129,16 +3044,12 @@ static void be_netdev_init(struct net_device *netdev)
netif_set_gso_max_size(netdev, 65535); netif_set_gso_max_size(netdev, 65535);
BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
for_all_rx_queues(adapter, rxo, i) for_all_evt_queues(adapter, eqo, i)
netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx, netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
BE_NAPI_WEIGHT);
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
} }
static void be_unmap_pci_bars(struct be_adapter *adapter) static void be_unmap_pci_bars(struct be_adapter *adapter)
...@@ -3309,8 +3220,6 @@ static void __devexit be_remove(struct pci_dev *pdev) ...@@ -3309,8 +3220,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_sriov_disable(adapter); be_sriov_disable(adapter);
be_msix_disable(adapter);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
...@@ -3477,6 +3386,7 @@ static void be_worker(struct work_struct *work) ...@@ -3477,6 +3386,7 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter = struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work); container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo; struct be_rx_obj *rxo;
struct be_eq_obj *eqo;
int i; int i;
if (lancer_chip(adapter)) if (lancer_chip(adapter))
...@@ -3487,15 +3397,7 @@ static void be_worker(struct work_struct *work) ...@@ -3487,15 +3397,7 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending /* when interrupts are not yet enabled, just reap any pending
* mcc completions */ * mcc completions */
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
int mcc_compl, status = 0; be_process_mcc(adapter);
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
goto reschedule; goto reschedule;
} }
...@@ -3508,14 +3410,15 @@ static void be_worker(struct work_struct *work) ...@@ -3508,14 +3410,15 @@ static void be_worker(struct work_struct *work)
} }
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
be_rx_eqd_update(adapter, rxo);
if (rxo->rx_post_starved) { if (rxo->rx_post_starved) {
rxo->rx_post_starved = false; rxo->rx_post_starved = false;
be_post_rx_frags(rxo, GFP_KERNEL); be_post_rx_frags(rxo, GFP_KERNEL);
} }
} }
for_all_evt_queues(adapter, eqo, i)
be_eqd_update(adapter, eqo);
reschedule: reschedule:
adapter->work_counter++; adapter->work_counter++;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
...@@ -3601,6 +3504,12 @@ static int __devinit be_probe(struct pci_dev *pdev, ...@@ -3601,6 +3504,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status) if (status)
goto ctrl_clean; goto ctrl_clean;
/* The INTR bit may be set in the card when probed by a kdump kernel
* after a crash.
*/
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
status = be_stats_init(adapter); status = be_stats_init(adapter);
if (status) if (status)
goto ctrl_clean; goto ctrl_clean;
...@@ -3609,14 +3518,6 @@ static int __devinit be_probe(struct pci_dev *pdev, ...@@ -3609,14 +3518,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status) if (status)
goto stats_clean; goto stats_clean;
/* The INTR bit may be set in the card when probed by a kdump kernel
* after a crash.
*/
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
be_msix_enable(adapter);
INIT_DELAYED_WORK(&adapter->work, be_worker); INIT_DELAYED_WORK(&adapter->work, be_worker);
adapter->rx_fc = adapter->tx_fc = true; adapter->rx_fc = adapter->tx_fc = true;
...@@ -3629,7 +3530,8 @@ static int __devinit be_probe(struct pci_dev *pdev, ...@@ -3629,7 +3530,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0) if (status != 0)
goto unsetup; goto unsetup;
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0; return 0;
...@@ -3673,7 +3575,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -3673,7 +3575,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
} }
be_clear(adapter); be_clear(adapter);
be_msix_disable(adapter);
pci_save_state(pdev); pci_save_state(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state)); pci_set_power_state(pdev, pci_choose_state(pdev, state));
...@@ -3695,7 +3596,6 @@ static int be_resume(struct pci_dev *pdev) ...@@ -3695,7 +3596,6 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0); pci_set_power_state(pdev, 0);
pci_restore_state(pdev); pci_restore_state(pdev);
be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */ /* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter); status = be_cmd_fw_init(adapter);
if (status) if (status)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment