Commit 7755bad9 authored by Ben Greear's avatar Ben Greear Committed by John W. Linville

ath9k: Try more than one queue when scheduling new aggregate.

Try all xmit queues until the hardware buffers are full.
Signed-off-by: default avatarBen Greear <greearb@candelatech.com>
Acked-by: default avatarFelix Fietkau <nbd@openwrt.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 55f6d0ff
...@@ -1222,49 +1222,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) ...@@ -1222,49 +1222,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
sc->tx.txqsetup &= ~(1<<txq->axq_qnum); sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
} }
/* For each axq_acq entry, for each tid, try to schedule packets
* for transmit until ampdu_depth has reached min Q depth.
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{ {
struct ath_atx_ac *ac; struct ath_atx_ac *ac, *ac_tmp, *last_ac;
struct ath_atx_tid *tid, *last; struct ath_atx_tid *tid, *last_tid;
if (list_empty(&txq->axq_acq) || if (list_empty(&txq->axq_acq) ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return; return;
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
list_del(&ac->list);
ac->sched = false;
do { list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
if (list_empty(&ac->tid_q)) last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
return; list_del(&ac->list);
ac->sched = false;
tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); while (!list_empty(&ac->tid_q)) {
list_del(&tid->list); tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
tid->sched = false; list);
list_del(&tid->list);
tid->sched = false;
if (tid->paused) if (tid->paused)
continue; continue;
ath_tx_sched_aggr(sc, txq, tid); ath_tx_sched_aggr(sc, txq, tid);
/* /*
* add tid to round-robin queue if more frames * add tid to round-robin queue if more frames
* are pending for the tid * are pending for the tid
*/ */
if (!list_empty(&tid->buf_q)) if (!list_empty(&tid->buf_q))
ath_tx_queue_tid(txq, tid); ath_tx_queue_tid(txq, tid);
if (tid == last || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) if (tid == last_tid ||
break; txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
} while (!list_empty(&ac->tid_q)); break;
}
if (!list_empty(&ac->tid_q)) { if (!list_empty(&ac->tid_q)) {
if (!ac->sched) { if (!ac->sched) {
ac->sched = true; ac->sched = true;
list_add_tail(&ac->list, &txq->axq_acq); list_add_tail(&ac->list, &txq->axq_acq);
}
} }
if (ac == last_ac ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment