Commit e75ccac1 authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: use fewer inits on the buf_info struct

Based on Alex's review notes on [1], we don't need to write
to the buf_info elements as often, and can tighten up how they
are used.  Also, use prefetchw() to warm up the page struct
for a later get_page().

[1] https://lore.kernel.org/netdev/CAKgT0UfyjoAN7LTnq0NMZfXRv4v7iTCPyAb9pVr3qWMhop_BVw@mail.gmail.com/Signed-off-by: default avatarShannon Nelson <snelson@pensando.io>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e7f52aa4
......@@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
{
buf_info->page = NULL;
buf_info->page_offset = 0;
buf_info->dma_addr = 0;
}
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
......@@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
return -EINVAL;
}
buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!buf_info->page)) {
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
return -ENOMEM;
}
buf_info->page_offset = 0;
buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
buf_info->dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
__free_pages(buf_info->page, 0);
ionic_rx_buf_reset(buf_info);
__free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
return -EIO;
}
buf_info->page = page;
buf_info->page_offset = 0;
return 0;
}
......@@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(buf_info->page, 0);
ionic_rx_buf_reset(buf_info);
buf_info->page = NULL;
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
......@@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info = &desc_info->bufs[0];
len = le16_to_cpu(comp->len);
prefetch(buf_info->page);
prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
......@@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
ionic_rx_buf_reset(buf_info);
buf_info->page = NULL;
}
buf_info++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment