Commit 482c9e79 authored by Sathya Perla's avatar Sathya Perla Committed by David S. Miller

be2net: create/destroy rx-queues on interface open/close

On some skews, the BE card sends pause frames (and not drop pkts) if there are
no more posted buffers available for packet reception.  This behaviour has a
side effect: When an interface is disabled, buffers are no longer posted on the
corresponding RX rings. All broadcast and multicast traffic being received on
the port will quickly fill up the PMEM and cause pause push back. As the PMEM
is shared by both the ports, all traffic being received on the other (enabled)
port also gets stalled.
The fix is to destroy RX rings when the interface is disabled. If there is no
RX ring match in the RXF lookup, the packets are discarded and so don't hog the
PMEM.
The RXQ creation cmd must now use MCC instead of MBOX as they are are called
post MCC queue creation.
Signed-off-by: default avatarSathya Perla <sathya.perla@emulex.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2b3f291b
...@@ -984,7 +984,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, ...@@ -984,7 +984,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
return status; return status;
} }
/* Uses mbox */ /* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter, int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, u16 frag_size, struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id) u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
...@@ -994,10 +994,13 @@ int be_cmd_rxq_create(struct be_adapter *adapter, ...@@ -994,10 +994,13 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem; struct be_dma_mem *q_mem = &rxq->dma_mem;
int status; int status;
if (mutex_lock_interruptible(&adapter->mbox_lock)) spin_lock_bh(&adapter->mcc_lock);
return -1;
wrb = wrb_from_mbox(adapter); wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
...@@ -1014,7 +1017,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, ...@@ -1014,7 +1017,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req->max_frame_size = cpu_to_le16(max_frame_size); req->max_frame_size = cpu_to_le16(max_frame_size);
req->rss_queue = cpu_to_le32(rss); req->rss_queue = cpu_to_le32(rss);
status = be_mbox_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
rxq->id = le16_to_cpu(resp->id); rxq->id = le16_to_cpu(resp->id);
...@@ -1022,8 +1025,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter, ...@@ -1022,8 +1025,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
*rss_id = resp->rss_id; *rss_id = resp->rss_id;
} }
mutex_unlock(&adapter->mbox_lock); err:
spin_unlock_bh(&adapter->mcc_lock);
return status; return status;
} }
...@@ -1078,9 +1081,40 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, ...@@ -1078,9 +1081,40 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
req->id = cpu_to_le16(q->id); req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter); status = be_mbox_notify_wait(adapter);
if (!status)
q->created = false;
mutex_unlock(&adapter->mbox_lock); mutex_unlock(&adapter->mbox_lock);
return status;
}
/* Uses MCC */
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
sizeof(*req));
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
if (!status)
q->created = false;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status; return status;
} }
......
...@@ -1482,6 +1482,8 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter, ...@@ -1482,6 +1482,8 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
u32 rss, u8 *rss_id); u32 rss, u8 *rss_id);
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type); int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
extern int be_cmd_link_status_query(struct be_adapter *adapter, extern int be_cmd_link_status_query(struct be_adapter *adapter,
bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom); bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter); extern int be_cmd_reset(struct be_adapter *adapter);
......
...@@ -1572,6 +1572,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) ...@@ -1572,6 +1572,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
memset(page_info, 0, sizeof(*page_info)); memset(page_info, 0, sizeof(*page_info));
} }
BUG_ON(atomic_read(&rxq->used)); BUG_ON(atomic_read(&rxq->used));
rxq->tail = rxq->head = 0;
} }
static void be_tx_compl_clean(struct be_adapter *adapter, static void be_tx_compl_clean(struct be_adapter *adapter,
...@@ -1752,29 +1753,16 @@ static void be_rx_queues_destroy(struct be_adapter *adapter) ...@@ -1752,29 +1753,16 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
int i; int i;
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q; be_queue_free(adapter, &rxo->q);
if (q->created) {
be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
/* After the rxq is invalidated, wait for a grace time
* of 1ms for all dma to end and the flush compl to
* arrive
*/
mdelay(1);
be_rx_q_clean(adapter, rxo);
}
be_queue_free(adapter, q);
q = &rxo->cq; q = &rxo->cq;
if (q->created) if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q); be_queue_free(adapter, q);
/* Clear any residual events */
q = &rxo->rx_eq.q; q = &rxo->rx_eq.q;
if (q->created) { if (q->created)
be_eq_clean(adapter, &rxo->rx_eq);
be_cmd_q_destroy(adapter, q, QTYPE_EQ); be_cmd_q_destroy(adapter, q, QTYPE_EQ);
}
be_queue_free(adapter, q); be_queue_free(adapter, q);
} }
} }
...@@ -1833,30 +1821,14 @@ static int be_rx_queues_create(struct be_adapter *adapter) ...@@ -1833,30 +1821,14 @@ static int be_rx_queues_create(struct be_adapter *adapter)
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
if (rc) if (rc)
goto err; goto err;
/* Rx Q */
/* Rx Q - will be created in be_open() */
q = &rxo->q; q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN, rc = be_queue_alloc(adapter, q, RX_Q_LEN,
sizeof(struct be_eth_rx_d)); sizeof(struct be_eth_rx_d));
if (rc) if (rc)
goto err; goto err;
rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
(i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
if (rc)
goto err;
}
if (be_multi_rxq(adapter)) {
u8 rsstable[MAX_RSS_QS];
for_all_rss_queues(adapter, rxo, i)
rsstable[i] = rxo->rss_id;
rc = be_cmd_rss_config(adapter, rsstable,
adapter->num_rx_qs - 1);
if (rc)
goto err;
} }
return 0; return 0;
...@@ -2302,6 +2274,31 @@ static void be_irq_unregister(struct be_adapter *adapter) ...@@ -2302,6 +2274,31 @@ static void be_irq_unregister(struct be_adapter *adapter)
adapter->isr_registered = false; adapter->isr_registered = false;
} }
static void be_rx_queues_clear(struct be_adapter *adapter)
{
struct be_queue_info *q;
struct be_rx_obj *rxo;
int i;
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
be_cmd_rxq_destroy(adapter, q);
/* After the rxq is invalidated, wait for a grace time
* of 1ms for all dma to end and the flush compl to
* arrive
*/
mdelay(1);
be_rx_q_clean(adapter, rxo);
}
/* Clear any residual events */
q = &rxo->rx_eq.q;
if (q->created)
be_eq_clean(adapter, &rxo->rx_eq);
}
}
static int be_close(struct net_device *netdev) static int be_close(struct net_device *netdev)
{ {
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
...@@ -2350,6 +2347,40 @@ static int be_close(struct net_device *netdev) ...@@ -2350,6 +2347,40 @@ static int be_close(struct net_device *netdev)
for_all_tx_queues(adapter, txo, i) for_all_tx_queues(adapter, txo, i)
be_tx_compl_clean(adapter, txo); be_tx_compl_clean(adapter, txo);
be_rx_queues_clear(adapter);
return 0;
}
static int be_rx_queues_setup(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i;
u8 rsstable[MAX_RSS_QS];
for_all_rx_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
adapter->if_handle,
(i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
if (rc)
return rc;
}
if (be_multi_rxq(adapter)) {
for_all_rss_queues(adapter, rxo, i)
rsstable[i] = rxo->rss_id;
rc = be_cmd_rss_config(adapter, rsstable,
adapter->num_rx_qs - 1);
if (rc)
return rc;
}
/* First time posting */
for_all_rx_queues(adapter, rxo, i) {
be_post_rx_frags(rxo, GFP_KERNEL);
napi_enable(&rxo->rx_eq.napi);
}
return 0; return 0;
} }
...@@ -2363,10 +2394,10 @@ static int be_open(struct net_device *netdev) ...@@ -2363,10 +2394,10 @@ static int be_open(struct net_device *netdev)
u8 mac_speed; u8 mac_speed;
u16 link_speed; u16 link_speed;
for_all_rx_queues(adapter, rxo, i) { status = be_rx_queues_setup(adapter);
be_post_rx_frags(rxo, GFP_KERNEL); if (status)
napi_enable(&rxo->rx_eq.napi); goto err;
}
napi_enable(&tx_eq->napi); napi_enable(&tx_eq->napi);
be_irq_register(adapter); be_irq_register(adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment