Commit 2854242d authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: keep stats struct local to error handling

When possible, keep the stats struct references strictly
in the error handling blocks and out of the fastpath.
Reviewed-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 56e41ee1
...@@ -128,19 +128,15 @@ static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) ...@@ -128,19 +128,15 @@ static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
static int ionic_rx_page_alloc(struct ionic_queue *q, static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info) struct ionic_buf_info *buf_info)
{ {
struct ionic_rx_stats *stats; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct device *dev;
struct page *page; struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) { if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n", net_err_ratelimited("%s: %s page alloc failed\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return -ENOMEM; return -ENOMEM;
} }
...@@ -150,7 +146,7 @@ static int ionic_rx_page_alloc(struct ionic_queue *q, ...@@ -150,7 +146,7 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
__free_pages(page, 0); __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n", net_err_ratelimited("%s: %s dma map failed\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_rx_stats(q)->dma_map_err++;
return -EIO; return -EIO;
} }
...@@ -233,13 +229,10 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, ...@@ -233,13 +229,10 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
bool synced) bool synced)
{ {
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
u16 frag_len; u16 frag_len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
prefetchw(buf_info->page); prefetchw(buf_info->page);
...@@ -247,7 +240,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q, ...@@ -247,7 +240,7 @@ static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
if (unlikely(!skb)) { if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n", net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
dev_name(q->dev), q->name); dev_name(q->dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return NULL; return NULL;
} }
...@@ -286,19 +279,16 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, ...@@ -286,19 +279,16 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
bool synced) bool synced)
{ {
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev; struct device *dev = q->dev;
struct sk_buff *skb; struct sk_buff *skb;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n", net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return NULL; return NULL;
} }
...@@ -1064,7 +1054,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ...@@ -1064,7 +1054,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len) void *data, size_t len)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -1072,7 +1061,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, ...@@ -1072,7 +1061,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
if (dma_mapping_error(dev, dma_addr)) { if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n", net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_tx_stats(q)->dma_map_err++;
return 0; return 0;
} }
return dma_addr; return dma_addr;
...@@ -1082,7 +1071,6 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -1082,7 +1071,6 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag, const skb_frag_t *frag,
size_t offset, size_t len) size_t offset, size_t len)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -1090,7 +1078,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -1090,7 +1078,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
if (dma_mapping_error(dev, dma_addr)) { if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n", net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_tx_stats(q)->dma_map_err++;
return 0; return 0;
} }
return dma_addr; return dma_addr;
...@@ -1742,12 +1730,10 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) ...@@ -1742,12 +1730,10 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize: linearize:
if (too_many_frags) { if (too_many_frags) {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
err = skb_linearize(skb); err = skb_linearize(skb);
if (err) if (err)
return err; return err;
stats->linearize++; q_to_tx_stats(q)->linearize++;
} }
return ndescs; return ndescs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment