Commit 1440401e authored by Bruno Randolf's avatar Bruno Randolf Committed by John W. Linville

ath5k: Move tx frame completion into separate function

Clearer separation between queue handling and what we do with completed frames.
Signed-off-by: default avatarBruno Randolf <br1@einfach.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 651d9375
...@@ -1516,57 +1516,24 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, ...@@ -1516,57 +1516,24 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
struct ath5k_tx_status *ts)
{ {
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
struct ath5k_desc *ds;
struct sk_buff *skb;
struct ieee80211_tx_info *info; struct ieee80211_tx_info *info;
int i, ret; int i;
spin_lock(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on. If there are more TX
* descriptors in the queue, wait for TXDP to change
* before processing this one.
*/
if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
!list_is_last(&bf->list, &txq->q))
break;
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error %d while processing queue %u\n",
ret, txq->qnum);
break;
}
sc->stats.tx_all_count++; sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb); info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
PCI_DMA_TODEVICE);
ieee80211_tx_info_clear_status(info); ieee80211_tx_info_clear_status(info);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
struct ieee80211_tx_rate *r = struct ieee80211_tx_rate *r =
&info->status.rates[i]; &info->status.rates[i];
if (ts.ts_rate[i]) { if (ts->ts_rate[i]) {
r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
r->count = ts.ts_retry[i]; r->count = ts->ts_retry[i];
} else { } else {
r->idx = -1; r->idx = -1;
r->count = 0; r->count = 0;
...@@ -1574,21 +1541,21 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) ...@@ -1574,21 +1541,21 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
} }
/* count the successful attempt as well */ /* count the successful attempt as well */
info->status.rates[ts.ts_final_idx].count++; info->status.rates[ts->ts_final_idx].count++;
if (unlikely(ts.ts_status)) { if (unlikely(ts->ts_status)) {
sc->stats.ack_fail++; sc->stats.ack_fail++;
if (ts.ts_status & AR5K_TXERR_FILT) { if (ts->ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
sc->stats.txerr_filt++; sc->stats.txerr_filt++;
} }
if (ts.ts_status & AR5K_TXERR_XRETRY) if (ts->ts_status & AR5K_TXERR_XRETRY)
sc->stats.txerr_retry++; sc->stats.txerr_retry++;
if (ts.ts_status & AR5K_TXERR_FIFO) if (ts->ts_status & AR5K_TXERR_FIFO)
sc->stats.txerr_fifo++; sc->stats.txerr_fifo++;
} else { } else {
info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi; info->status.ack_signal = ts->ts_rssi;
} }
/* /*
...@@ -1597,12 +1564,52 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) ...@@ -1597,12 +1564,52 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
*/ */
ath5k_remove_padding(skb); ath5k_remove_padding(skb);
if (ts.ts_antenna > 0 && ts.ts_antenna < 5) if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
sc->stats.antenna_tx[ts.ts_antenna]++; sc->stats.antenna_tx[ts->ts_antenna]++;
else else
sc->stats.antenna_tx[0]++; /* invalid */ sc->stats.antenna_tx[0]++; /* invalid */
ieee80211_tx_status(sc->hw, skb); ieee80211_tx_status(sc->hw, skb);
}
static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
{
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
struct ath5k_desc *ds;
struct sk_buff *skb;
int ret;
spin_lock(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on. If there are more TX
* descriptors in the queue, wait for TXDP to change
* before processing this one.
*/
if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
!list_is_last(&bf->list, &txq->q))
break;
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error %d while processing queue %u\n",
ret, txq->qnum);
break;
}
skb = bf->skb;
bf->skb = NULL;
pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
PCI_DMA_TODEVICE);
ath5k_tx_frame_completed(sc, skb, &ts);
spin_lock(&sc->txbuflock); spin_lock(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf); list_move_tail(&bf->list, &sc->txbuf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment