Commit 238a0f7c authored by Brett Creeley's avatar Brett Creeley Committed by David S. Miller

ionic: Cleanups in the Tx hotpath code

Buffer DMA mapping happens in ionic_tx_map_skb() and this function is
called from ionic_tx() and ionic_tx_tso(). If ionic_tx_map_skb()
succeeds, but a failure is encountered later in ionic_tx() or
ionic_tx_tso() we aren't unmapping the buffers. This can be fixed in
ionic_tx() by changing functions it calls to return void because they
always return 0. For ionic_tx_tso(), there's an actual possibility that
we leave the buffers mapped, so fix this by introducing the helper
function ionic_tx_desc_unmap_bufs(). This function is also re-used
in ionic_tx_clean().

Fixes: 0f3154e6 ("ionic: Add Tx and Rx handling")
Signed-off-by: default avatarBrett Creeley <brett@pensando.io>
Signed-off-by: default avatarShannon Nelson <snelson@pensando.io>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 584fb767
......@@ -669,27 +669,37 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
return -EIO;
}
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
struct ionic_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
struct device *dev = q->dev;
unsigned int i;
if (!desc_info->nbufs)
return;
dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
desc_info->nbufs = 0;
}
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg;
struct device *dev = q->dev;
unsigned int i;
u16 qi;
if (desc_info->nbufs) {
dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
ionic_tx_desc_unmap_bufs(q, desc_info);
if (!skb)
return;
......@@ -931,8 +941,11 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
if (err)
if (err) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
}
if (encap)
hdrlen = skb_inner_transport_header(skb) - skb->data +
......@@ -1003,8 +1016,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
......@@ -1038,12 +1051,10 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
stats->crc32_csum++;
else
stats->csum++;
return 0;
}
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
......@@ -1074,12 +1085,10 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_offset = 0;
stats->csum_none++;
return 0;
}
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
......@@ -1093,31 +1102,24 @@ static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
}
stats->frags += skb_shinfo(skb)->nr_frags;
return 0;
}
static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
err = ionic_tx_calc_csum(q, skb, desc_info);
ionic_tx_calc_csum(q, skb, desc_info);
else
err = ionic_tx_calc_no_csum(q, skb, desc_info);
if (err)
return err;
ionic_tx_calc_no_csum(q, skb, desc_info);
/* add frags */
err = ionic_tx_skb_frags(q, skb, desc_info);
if (err)
return err;
ionic_tx_skb_frags(q, skb, desc_info);
skb_tx_timestamp(skb);
stats->pkts++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment