Commit 925e0b06 authored by Bruno Randolf's avatar Bruno Randolf Committed by John W. Linville

ath5k: Use four hardware queues

Prepare ath5k for WME by using four hardware queues.

The way we set up our queues matches the mac80211 queue priority 1:1, so we
don't have to do any mapping for queue numbers.

Every queue uses 50 of the total 200 available transmit buffers, so the DMA
memory usage does not increase with this patch, but it might be good to
fine-tune the number of buffers per queue later (depending on the CPU speed and
load, and the speed of the medium access, it might not be big enough).
Signed-off-by: default avatarBruno Randolf <br1@einfach.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 8a63facc
......@@ -733,6 +733,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
txq->txq_len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
......@@ -889,6 +890,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
INIT_LIST_HEAD(&txq->q);
spin_lock_init(&txq->lock);
txq->setup = true;
txq->txq_len = 0;
}
return &sc->txqs[qnum];
}
......@@ -983,6 +985,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
spin_lock_bh(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
txq->txq_len--;
spin_unlock_bh(&sc->txbuflock);
}
txq->link = NULL;
......@@ -1479,6 +1482,9 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
goto drop_packet;
}
if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
ieee80211_stop_queue(hw, txq->qnum);
spin_lock_irqsave(&sc->txbuflock, flags);
if (list_empty(&sc->txbuf)) {
ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
......@@ -1601,13 +1607,14 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
spin_lock(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
txq->txq_len--;
spin_unlock(&sc->txbuflock);
}
if (likely(list_empty(&txq->q)))
txq->link = NULL;
spin_unlock(&txq->lock);
if (sc->txbuf_len > ATH_TXBUF / 5)
ieee80211_wake_queues(sc->hw);
if (txq->txq_len < ATH5K_TXQ_LEN_LOW)
ieee80211_wake_queue(sc->hw, txq->qnum);
}
static void
......@@ -2391,6 +2398,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
struct ath5k_txq *txq;
u8 mac[ETH_ALEN] = {};
int ret;
......@@ -2456,12 +2464,33 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
goto err_bhal;
}
sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
if (IS_ERR(sc->txq)) {
/* This order matches mac80211's queue priority, so we can
* directly use the mac80211 queue number without any mapping */
txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
if (IS_ERR(txq)) {
ATH5K_ERR(sc, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
if (IS_ERR(txq)) {
ATH5K_ERR(sc, "can't setup xmit queue\n");
ret = PTR_ERR(sc->txq);
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
if (IS_ERR(txq)) {
ATH5K_ERR(sc, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
if (IS_ERR(txq)) {
ATH5K_ERR(sc, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
hw->queues = 4;
tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
......@@ -2554,8 +2583,14 @@ static int
ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath5k_softc *sc = hw->priv;
u16 qnum = skb_get_queue_mapping(skb);
if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
dev_kfree_skb_any(skb);
return 0;
}
return ath5k_tx_queue(hw, skb, sc->txq);
return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
}
static int ath5k_start(struct ieee80211_hw *hw)
......
......@@ -60,6 +60,9 @@
#define ATH_TXBUF 200 /* number of TX buffers */
#define ATH_BCBUF 1 /* number of beacon buffers */
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
struct ath5k_buf {
struct list_head list;
struct ath5k_desc *desc; /* virtual addr of desc */
......@@ -83,6 +86,7 @@ struct ath5k_txq {
struct list_head q; /* transmit queue */
spinlock_t lock; /* lock on q and link */
bool setup;
int txq_len; /* number of queued buffers */
};
#define ATH5K_LED_MAX_NAME_LEN 31
......@@ -204,7 +208,6 @@ struct ath5k_softc {
spinlock_t txbuflock;
unsigned int txbuf_len; /* buf count in txbuf list */
struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
struct ath5k_txq *txq; /* main tx queue */
struct tasklet_struct txtq; /* tx intr tasklet */
struct ath5k_led tx_led; /* tx led */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment