Commit 126fca64 authored by Hariprasad Shenai's avatar Hariprasad Shenai Committed by David S. Miller

cxgb4: prevent simultaneous execution of service_ofldq()

Change mutual exclusion mechanism to prevent multiple threads of
execution from running in service_ofldq() at the same time.  The old
mechanism used an implicit guard on the down-call path and none on the
restart path and wasn't working. This checking makes the mechanism
explicit and is much easier to understand as a result.

Based on original work by Casey Leedom <leedom@chelsio.com>
Signed-off-by: default avatarHariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 632be194
...@@ -618,6 +618,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */ ...@@ -618,6 +618,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */
struct adapter *adap; struct adapter *adap;
struct sk_buff_head sendq; /* list of backpressured packets */ struct sk_buff_head sendq; /* list of backpressured packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */ struct tasklet_struct qresume_tsk; /* restarts the queue */
bool service_ofldq_running; /* service_ofldq() is processing sendq */
u8 full; /* the Tx ring is full */ u8 full; /* the Tx ring is full */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
......
...@@ -1542,11 +1542,22 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) ...@@ -1542,11 +1542,22 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
} }
/** /**
* service_ofldq - restart a suspended offload queue * service_ofldq - service/restart a suspended offload queue
* @q: the offload queue * @q: the offload queue
* *
* Services an offload Tx queue by moving packets from its packet queue * Services an offload Tx queue by moving packets from its Pending Send
* to the HW Tx ring. The function starts and ends with the queue locked. * Queue to the Hardware TX ring. The function starts and ends with the
* Send Queue locked, but drops the lock while putting the skb at the
* head of the Send Queue onto the Hardware TX Ring. Dropping the lock
* allows more skbs to be added to the Send Queue by other threads.
* The packet being processed at the head of the Pending Send Queue is
* left on the queue in case we experience DMA Mapping errors, etc.
* and need to give up and restart later.
*
* service_ofldq() can be thought of as a task which opportunistically
* uses other threads execution contexts. We use the Offload Queue
* boolean "service_ofldq_running" to make sure that only one instance
* is ever running at a time ...
*/ */
static void service_ofldq(struct sge_ofld_txq *q) static void service_ofldq(struct sge_ofld_txq *q)
{ {
...@@ -1556,10 +1567,23 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1556,10 +1567,23 @@ static void service_ofldq(struct sge_ofld_txq *q)
unsigned int written = 0; unsigned int written = 0;
unsigned int flits, ndesc; unsigned int flits, ndesc;
/* If another thread is currently in service_ofldq() processing the
* Pending Send Queue then there's nothing to do. Otherwise, flag
* that we're doing the work and continue. Examining/modifying
* the Offload Queue boolean "service_ofldq_running" must be done
* while holding the Pending Send Queue Lock.
*/
if (q->service_ofldq_running)
return;
q->service_ofldq_running = true;
while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
/* /* We drop the lock while we're working with the skb at the
* We drop the lock but leave skb on sendq, thus retaining * head of the Pending Send Queue. This allows more skbs to
* exclusive access to the state of the queue. * be added to the Pending Send Queue while we're working on
* this one. We don't need to lock to guard the TX Ring
* updates because only one thread of execution is ever
* allowed into service_ofldq() at a time.
*/ */
spin_unlock(&q->sendq.lock); spin_unlock(&q->sendq.lock);
...@@ -1604,6 +1628,11 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1604,6 +1628,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
written = 0; written = 0;
} }
/* Reacquire the Pending Send Queue Lock so we can unlink the
* skb we've just successfully transferred to the TX Ring and
* loop for the next skb which may be at the head of the
* Pending Send Queue.
*/
spin_lock(&q->sendq.lock); spin_lock(&q->sendq.lock);
__skb_unlink(skb, &q->sendq); __skb_unlink(skb, &q->sendq);
if (is_ofld_imm(skb)) if (is_ofld_imm(skb))
...@@ -1611,6 +1640,11 @@ static void service_ofldq(struct sge_ofld_txq *q) ...@@ -1611,6 +1640,11 @@ static void service_ofldq(struct sge_ofld_txq *q)
} }
if (likely(written)) if (likely(written))
ring_tx_db(q->adap, &q->q, written); ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
*/
q->service_ofldq_running = false;
} }
/** /**
...@@ -1624,9 +1658,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) ...@@ -1624,9 +1658,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
{ {
skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
spin_lock(&q->sendq.lock); spin_lock(&q->sendq.lock);
/* Queue the new skb onto the Offload Queue's Pending Send Queue. If
* that results in this new skb being the only one on the queue, start
* servicing it. If there are other skbs already on the list, then
* either the queue is currently being processed or it's been stopped
* for some reason and it'll be restarted at a later time. Restart
* paths are triggered by events like experiencing a DMA Mapping Error
* or filling the Hardware TX Ring.
*/
__skb_queue_tail(&q->sendq, skb); __skb_queue_tail(&q->sendq, skb);
if (q->sendq.qlen == 1) if (q->sendq.qlen == 1)
service_ofldq(q); service_ofldq(q);
spin_unlock(&q->sendq.lock); spin_unlock(&q->sendq.lock);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment