Commit 4cce66cd authored by Thadeu Lima de Souza Cascardo's avatar Thadeu Lima de Souza Cascardo Committed by David S. Miller

mlx4_en: map entire pages to increase throughput

In its receive path, mlx4_en driver maps each page chunk that it pushes
to the hardware and unmaps it when pushing it up the stack. This limits
throughput to about 3Gbps on a Power7 8-core machine.

One solution is to map the entire allocated page at once. However, this
requires that we keep track of every page fragment we give to a
descriptor. We also need to work with the discipline that all fragments will
be released (in the sense that it will not be reused by the driver
anymore) in the order they are allocated to the driver.

This requires that we don't reuse any fragments, every single one of
them must be reallocated. We do that by releasing all the fragments that
are processed and only after finished processing the descriptors, we
start the refill.

We also must somehow guarantee that we either refill all fragments in a
descriptor or none at all, without resorting to giving up a page
fragment that we would have already given. Otherwise, we would break the
discipline of only releasing the fragments in the order they were
allocated.

This has passed page allocation fault injections (restricted to the
driver by using required-start and required-end) and device hotplug
while 16 TCP streams were able to deliver more than 9Gbps.
Signed-off-by: default avatarThadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a9ec6bd1
...@@ -41,41 +41,75 @@ ...@@ -41,41 +41,75 @@
#include "mlx4_en.h" #include "mlx4_en.h"
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags, struct mlx4_en_rx_alloc *frags,
struct mlx4_en_rx_alloc *ring_alloc, struct mlx4_en_rx_alloc *ring_alloc)
int i)
{ {
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i]; struct mlx4_en_frag_info *frag_info;
struct page *page; struct page *page;
dma_addr_t dma; dma_addr_t dma;
int i;
if (page_alloc->offset == frag_info->last_offset) { for (i = 0; i < priv->num_frags; i++) {
/* Allocate new page */ frag_info = &priv->frag_info[i];
page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER); if (ring_alloc[i].offset == frag_info->last_offset) {
page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
MLX4_EN_ALLOC_ORDER);
if (!page) if (!page)
return -ENOMEM; goto out;
dma = dma_map_page(priv->ddev, page, 0,
skb_frags[i].page = page_alloc->page; MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
skb_frags[i].offset = page_alloc->offset; if (dma_mapping_error(priv->ddev, dma)) {
page_alloc->page = page; put_page(page);
page_alloc->offset = frag_info->frag_align; goto out;
}
page_alloc[i].page = page;
page_alloc[i].dma = dma;
page_alloc[i].offset = frag_info->frag_align;
} else { } else {
page = page_alloc->page; page_alloc[i].page = ring_alloc[i].page;
get_page(page); get_page(ring_alloc[i].page);
page_alloc[i].dma = ring_alloc[i].dma;
skb_frags[i].page = page; page_alloc[i].offset = ring_alloc[i].offset +
skb_frags[i].offset = page_alloc->offset; frag_info->frag_stride;
page_alloc->offset += frag_info->frag_stride;
} }
dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) + }
skb_frags[i].offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE); for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
dma = ring_alloc[i].dma + ring_alloc[i].offset;
ring_alloc[i] = page_alloc[i];
rx_desc->data[i].addr = cpu_to_be64(dma); rx_desc->data[i].addr = cpu_to_be64(dma);
}
return 0; return 0;
out:
while (i--) {
frag_info = &priv->frag_info[i];
if (ring_alloc[i].offset == frag_info->last_offset)
dma_unmap_page(priv->ddev, page_alloc[i].dma,
MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc[i].page);
}
return -ENOMEM;
}
static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frags,
int i)
{
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
if (frags[i].offset == frag_info->last_offset) {
dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
PCI_DMA_FROMDEVICE);
}
if (frags[i].page)
put_page(frags[i].page);
} }
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
...@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, ...@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
if (!page_alloc->page) if (!page_alloc->page)
goto out; goto out;
page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
put_page(page_alloc->page);
page_alloc->page = NULL;
goto out;
}
page_alloc->offset = priv->frag_info[i].frag_align; page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page); i, page_alloc->page);
...@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, ...@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
out: out:
while (i--) { while (i--) {
page_alloc = &ring->page_alloc[i]; page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page); put_page(page_alloc->page);
page_alloc->page = NULL; page_alloc->page = NULL;
} }
...@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, ...@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page)); i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page); put_page(page_alloc->page);
page_alloc->page = NULL; page_alloc->page = NULL;
} }
} }
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index) struct mlx4_en_rx_ring *ring, int index)
{ {
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
struct skb_frag_struct *skb_frags = ring->rx_info +
(index << priv->log_rx_info);
int possible_frags; int possible_frags;
int i; int i;
/* Set size and memtype fields */ /* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) { for (i = 0; i < priv->num_frags; i++) {
skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count = rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size); cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
...@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, ...@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
} }
} }
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index) struct mlx4_en_rx_ring *ring, int index)
{ {
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct page_frag *skb_frags = ring->rx_info + struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info); (index << priv->log_rx_info);
int i;
for (i = 0; i < priv->num_frags; i++) return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
goto err;
return 0;
err:
while (i--) {
dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
}
return -ENOMEM;
} }
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
...@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, ...@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_ring *ring,
int index) int index)
{ {
struct page_frag *skb_frags; struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
int nr; int nr;
skb_frags = ring->rx_info + (index << priv->log_rx_info); frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) { for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
dma = be64_to_cpu(rx_desc->data[nr].addr); mlx4_en_free_frag(priv, frags, nr);
en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[nr].page);
} }
} }
...@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ...@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride) struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
int err; int err = -ENOMEM;
int tmp; int tmp;
ring->prod = 0; ring->prod = 0;
ring->cons = 0; ring->cons = 0;
ring->size = size; ring->size = size;
...@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ...@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ring->size * ring->stride + TXBB_SIZE; ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct skb_frag_struct)); sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp); ring->rx_info = vmalloc(tmp);
if (!ring->rx_info) if (!ring->rx_info)
return -ENOMEM; return -ENOMEM;
...@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) ...@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
memset(ring->buf, 0, ring->buf_size); memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring); mlx4_en_update_rx_prod_db(ring);
/* Initailize all descriptors */ /* Initialize all descriptors */
for (i = 0; i < ring->size; i++) for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i); mlx4_en_init_rx_desc(priv, ring, i);
...@@ -404,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, ...@@ -404,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
} }
/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags, struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx4_en_rx_alloc *page_alloc,
int length) int length)
{ {
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
...@@ -417,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, ...@@ -417,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
int nr; int nr;
dma_addr_t dma; dma_addr_t dma;
/* Collect used fragments while replacing them in the HW descirptors */ /* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0; nr < priv->num_frags; nr++) { for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr]; frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size) if (length <= frag_info->frag_prefix_size)
break; break;
if (!frags[nr].page)
goto fail;
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr); dma = be64_to_cpu(rx_desc->data[nr].addr);
dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
DMA_FROM_DEVICE);
/* Allocate a replacement page */ /* Save page reference in skb */
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr)) get_page(frags[nr].page);
goto fail; __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
/* Unmap buffer */ skb_frags_rx[nr].page_offset = frags[nr].offset;
dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]), skb->truesize += frag_info->frag_stride;
PCI_DMA_FROMDEVICE);
} }
/* Adjust size of last fragment to match actual length */ /* Adjust size of last fragment to match actual length */
if (nr > 0) if (nr > 0)
...@@ -445,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, ...@@ -445,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
return nr; return nr;
fail: fail:
/* Drop all accumulated fragments (which have already been replaced in
* the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) { while (nr > 0) {
nr--; nr--;
__skb_frag_unref(&skb_frags_rx[nr]); __skb_frag_unref(&skb_frags_rx[nr]);
...@@ -457,8 +469,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, ...@@ -457,8 +469,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags, struct mlx4_en_rx_alloc *frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length) unsigned int length)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -476,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, ...@@ -476,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the /* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */ * (linear part of the) skb */
va = page_address(skb_frags[0].page) + skb_frags[0].offset; va = page_address(frags[0].page) + frags[0].offset;
if (length <= SMALL_PACKET_SIZE) { if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily /* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */ * sync buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr); dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, length, dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length); skb_copy_to_linear_data(skb, va, length);
dma_sync_single_for_device(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb->tail += length; skb->tail += length;
} else { } else {
/* Move relevant fragments to skb */ /* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags, used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
skb, page_alloc, length); skb, length);
if (unlikely(!used_frags)) { if (unlikely(!used_frags)) {
kfree_skb(skb); kfree_skb(skb);
return NULL; return NULL;
...@@ -529,12 +537,25 @@ static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) ...@@ -529,12 +537,25 @@ static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
int index = ring->prod & ring->size_mask;
while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
if (mlx4_en_prepare_rx_desc(priv, ring, index))
break;
ring->prod++;
index = ring->prod & ring->size_mask;
}
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe; struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct page_frag *skb_frags; struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc; struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb; struct sk_buff *skb;
int index; int index;
...@@ -543,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -543,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0; int polled = 0;
int ip_summed; int ip_summed;
struct ethhdr *ethh; struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac; u64 s_mac;
if (!priv->port_up) if (!priv->port_up)
...@@ -558,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -558,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) { cq->mcq.cons_index & cq->size)) {
skb_frags = ring->rx_info + (index << priv->log_rx_info); frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride); rx_desc = ring->buf + (index << ring->log_stride);
/* /*
...@@ -582,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -582,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't skb yet and /* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */ * cast it to ethhdr struct */
ethh = (struct ethhdr *)(page_address(skb_frags[0].page) + dma = be64_to_cpu(rx_desc->data[0].addr);
skb_frags[0].offset); dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source); s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing /* If source MAC is equal to our own MAC and not performing
...@@ -615,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -615,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!gro_skb) if (!gro_skb)
goto next; goto next;
nr = mlx4_en_complete_rx_desc( nr = mlx4_en_complete_rx_desc(priv,
priv, rx_desc, rx_desc, frags, gro_skb,
skb_frags, gro_skb, length);
ring->page_alloc, length);
if (!nr) if (!nr)
goto next; goto next;
...@@ -654,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -654,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring->csum_none++; ring->csum_none++;
} }
skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
ring->page_alloc, length);
if (!skb) { if (!skb) {
priv->stats.rx_dropped++; priv->stats.rx_dropped++;
goto next; goto next;
...@@ -681,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -681,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
netif_receive_skb(skb); netif_receive_skb(skb);
next: next:
for (nr = 0; nr < priv->num_frags; nr++)
mlx4_en_free_frag(priv, frags, nr);
++cq->mcq.cons_index; ++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask; index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index]; cqe = &cq->buf[index];
...@@ -696,7 +722,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -696,7 +722,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mlx4_cq_set_ci(&cq->mcq); mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index; ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */ mlx4_en_refill_rx_buffers(priv, ring);
mlx4_en_update_rx_prod_db(ring); mlx4_en_update_rx_prod_db(ring);
return polled; return polled;
} }
...@@ -785,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) ...@@ -785,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->num_frags = i; priv->num_frags = i;
priv->rx_skb_size = eff_mtu; priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags); "num_frags:%d):\n", eff_mtu, priv->num_frags);
...@@ -987,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) ...@@ -987,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
} }
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
} }
...@@ -111,7 +111,7 @@ enum { ...@@ -111,7 +111,7 @@ enum {
#define MLX4_EN_MAX_TX_SIZE 8192 #define MLX4_EN_MAX_TX_SIZE 8192
#define MLX4_EN_MAX_RX_SIZE 8192 #define MLX4_EN_MAX_RX_SIZE 8192
/* Minimum ring size for our page-allocation sceme to work */ /* Minimum ring size for our page-allocation scheme to work */
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
...@@ -232,6 +232,7 @@ struct mlx4_en_tx_desc { ...@@ -232,6 +232,7 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc { struct mlx4_en_rx_alloc {
struct page *page; struct page *page;
dma_addr_t dma;
u16 offset; u16 offset;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment