Commit 797fe5cb authored by Sujith's avatar Sujith Committed by John W. Linville

ath9k: Remove the useless do..while loops

These are unnecessary constructs in a function.
This patch removes these from both RX and TX init
routines.
Signed-off-by: default avatarSujith <Sujith.Manoharan@atheros.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 4658b985
...@@ -340,7 +340,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); ...@@ -340,7 +340,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an); void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs); int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_tx_cleanup(struct ath_softc *sc); void ath_tx_cleanup(struct ath_softc *sc);
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb); struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
int ath_txq_update(struct ath_softc *sc, int qnum, int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q); struct ath9k_tx_queue_info *q);
......
...@@ -283,54 +283,51 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) ...@@ -283,54 +283,51 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf; struct ath_buf *bf;
int error = 0; int error = 0;
do { spin_lock_init(&sc->rx.rxflushlock);
spin_lock_init(&sc->rx.rxflushlock); sc->sc_flags &= ~SC_OP_RXFLUSH;
sc->sc_flags &= ~SC_OP_RXFLUSH; spin_lock_init(&sc->rx.rxbuflock);
spin_lock_init(&sc->rx.rxbuflock);
sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
min(sc->cachelsz,
(u16)64));
DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
sc->cachelsz, sc->rx.bufsize); min(sc->cachelsz, (u16)64));
/* Initialize rx descriptors */ DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
sc->cachelsz, sc->rx.bufsize);
error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, /* Initialize rx descriptors */
"rx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"failed to allocate rx descriptors: %d\n", error);
break;
}
list_for_each_entry(bf, &sc->rx.rxbuf, list) { error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL); "rx", nbufs, 1);
if (skb == NULL) { if (error != 0) {
error = -ENOMEM; DPRINTF(sc, ATH_DBG_FATAL,
break; "failed to allocate rx descriptors: %d\n", error);
} goto err;
}
bf->bf_mpdu = skb; list_for_each_entry(bf, &sc->rx.rxbuf, list) {
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
sc->rx.bufsize, if (skb == NULL) {
DMA_FROM_DEVICE); error = -ENOMEM;
if (unlikely(dma_mapping_error(sc->dev, goto err;
bf->bf_buf_addr))) {
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_FATAL,
"dma_mapping_error() on RX init\n");
error = -ENOMEM;
break;
}
bf->bf_dmacontext = bf->bf_buf_addr;
} }
sc->rx.rxlink = NULL;
} while (0); bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
sc->rx.bufsize,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_FATAL,
"dma_mapping_error() on RX init\n");
error = -ENOMEM;
goto err;
}
bf->bf_dmacontext = bf->bf_buf_addr;
}
sc->rx.rxlink = NULL;
err:
if (error) if (error)
ath_rx_cleanup(sc); ath_rx_cleanup(sc);
...@@ -345,10 +342,8 @@ void ath_rx_cleanup(struct ath_softc *sc) ...@@ -345,10 +342,8 @@ void ath_rx_cleanup(struct ath_softc *sc)
list_for_each_entry(bf, &sc->rx.rxbuf, list) { list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = bf->bf_mpdu; skb = bf->bf_mpdu;
if (skb) { if (skb) {
dma_unmap_single(sc->dev, dma_unmap_single(sc->dev, bf->bf_buf_addr,
bf->bf_buf_addr, sc->rx.bufsize, DMA_FROM_DEVICE);
sc->rx.bufsize,
DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
} }
......
...@@ -2047,44 +2047,38 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) ...@@ -2047,44 +2047,38 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
{ {
int error = 0; int error = 0;
do { spin_lock_init(&sc->tx.txbuflock);
spin_lock_init(&sc->tx.txbuflock);
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
"tx", nbufs, 1); "tx", nbufs, 1);
if (error != 0) { if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL, DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n", "Failed to allocate tx descriptors: %d\n", error);
error); goto err;
break; }
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n",
error);
break;
}
} while (0); error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n", error);
goto err;
}
err:
if (error != 0) if (error != 0)
ath_tx_cleanup(sc); ath_tx_cleanup(sc);
return error; return error;
} }
int ath_tx_cleanup(struct ath_softc *sc) void ath_tx_cleanup(struct ath_softc *sc)
{ {
if (sc->beacon.bdma.dd_desc_len != 0) if (sc->beacon.bdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
if (sc->tx.txdma.dd_desc_len != 0) if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
return 0;
} }
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment