Commit 1440401e authored by Bruno Randolf's avatar Bruno Randolf Committed by John W. Linville

ath5k: Move tx frame completion into separate function

Clearer separation between queue handling and what we do with completed frames.
Signed-off-by: default avatarBruno Randolf <br1@einfach.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 651d9375
......@@ -1516,57 +1516,24 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
return NETDEV_TX_OK;
}
static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
struct ath5k_tx_status *ts)
{
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
struct ath5k_desc *ds;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
int i, ret;
spin_lock(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on. If there are more TX
* descriptors in the queue, wait for TXDP to change
* before processing this one.
*/
if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
!list_is_last(&bf->list, &txq->q))
break;
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error %d while processing queue %u\n",
ret, txq->qnum);
break;
}
int i;
sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
PCI_DMA_TODEVICE);
ieee80211_tx_info_clear_status(info);
for (i = 0; i < 4; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
if (ts.ts_rate[i]) {
r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]);
r->count = ts.ts_retry[i];
if (ts->ts_rate[i]) {
r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
r->count = ts->ts_retry[i];
} else {
r->idx = -1;
r->count = 0;
......@@ -1574,21 +1541,21 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
}
/* count the successful attempt as well */
info->status.rates[ts.ts_final_idx].count++;
info->status.rates[ts->ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
if (unlikely(ts->ts_status)) {
sc->stats.ack_fail++;
if (ts.ts_status & AR5K_TXERR_FILT) {
if (ts->ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
sc->stats.txerr_filt++;
}
if (ts.ts_status & AR5K_TXERR_XRETRY)
if (ts->ts_status & AR5K_TXERR_XRETRY)
sc->stats.txerr_retry++;
if (ts.ts_status & AR5K_TXERR_FIFO)
if (ts->ts_status & AR5K_TXERR_FIFO)
sc->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi;
info->status.ack_signal = ts->ts_rssi;
}
/*
......@@ -1597,12 +1564,52 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
*/
ath5k_remove_padding(skb);
if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
sc->stats.antenna_tx[ts.ts_antenna]++;
if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
sc->stats.antenna_tx[ts->ts_antenna]++;
else
sc->stats.antenna_tx[0]++; /* invalid */
ieee80211_tx_status(sc->hw, skb);
}
static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
{
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
struct ath5k_desc *ds;
struct sk_buff *skb;
int ret;
spin_lock(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on. If there are more TX
* descriptors in the queue, wait for TXDP to change
* before processing this one.
*/
if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
!list_is_last(&bf->list, &txq->q))
break;
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error %d while processing queue %u\n",
ret, txq->qnum);
break;
}
skb = bf->skb;
bf->skb = NULL;
pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
PCI_DMA_TODEVICE);
ath5k_tx_frame_completed(sc, skb, &ts);
spin_lock(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment