Commit b6ea0356 authored by Bob Copeland's avatar Bob Copeland Committed by John W. Linville

ath5k: fix bf->skb==NULL panic in ath5k_tasklet_rx

Under memory pressure, we may not be able to allocate a new skb for
new packets.  If the allocation fails, ath5k_tasklet_rx will exit but
will leave a buffer in the list with a NULL skb, eventually triggering
a BUG_ON.

Extract the skb allocation from ath5k_rxbuf_setup() and change the
tasklet to allocate the next skb before accepting a packet.

Changes-licensed-under: 3-Clause-BSD
Signed-off-by: default avatarBob Copeland <me@bobcopeland.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 138ab2e4
......@@ -1096,6 +1096,42 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
* Buffers setup *
\***************/
static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
struct sk_buff *skb;
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
return NULL;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
*skb_addr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
}
return skb;
}
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
......@@ -1103,37 +1139,11 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
if (likely(skb == NULL)) {
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (unlikely(skb == NULL)) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
if (!skb)
return -ENOMEM;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
return -ENOMEM;
}
}
/*
......@@ -1662,7 +1672,8 @@ ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status rxs = {};
struct ath5k_rx_status rs = {};
struct sk_buff *skb;
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data;
struct ath5k_buf *bf, *bf_last;
struct ath5k_desc *ds;
......@@ -1747,10 +1758,17 @@ ath5k_tasklet_rx(unsigned long data)
goto next;
}
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
/*
* If we can't replace bf->skb with a new skb under memory
* pressure, just skip this packet
*/
if (!next_skb)
goto next;
pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
PCI_DMA_FROMDEVICE);
bf->skb = NULL;
skb_put(skb, rs.rs_datalen);
/* The MAC header is padded to have 32-bit boundary if the
......@@ -1823,6 +1841,9 @@ ath5k_tasklet_rx(unsigned long data)
ath5k_check_ibss_tsf(sc, skb, &rxs);
__ieee80211_rx(sc->hw, skb, &rxs);
bf->skb = next_skb;
bf->skbaddr = next_skb_addr;
next:
list_move_tail(&bf->list, &sc->rxbuf);
} while (ath5k_rxbuf_setup(sc, bf) == 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment