Commit 070e1f01 authored by Christophe Leroy's avatar Christophe Leroy Committed by David S. Miller

net: fs_enet: don't unmap DMA when packet len is below copybreak

When the length of the packet is below the defined copybreak limit,
the received packet is copied into a newly allocated skb in order
to reuse the skb. This is only interesting if it allow us to avoid
a new DMA mapping. We shall therefore not DMA unmap and remap the
skb->data. Instead, we invalidate the cache
with dma_sync_single_for_cpu() once the received data has been
copied into the new skb.

The following measures have been obtained on a mpc885 running at 132Mhz.
Measurement is done using the timebase with packets sent to the target
with 'ping -s 1' (packet len is 60):
* Without this patch: 182 TB ticks
* With this patch: 143 TB ticks

As a comparison, if we set the copybreak limit to 0, then we get
148 TB ticks. It means that without this patch, duration is even
worse when copying received data to a new skb instead of
allocating a new skb for next packet to be received
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8572763a
...@@ -226,21 +226,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) ...@@ -226,21 +226,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (sc & BD_ENET_RX_OV) if (sc & BD_ENET_RX_OV)
fep->stats.rx_crc_errors++; fep->stats.rx_crc_errors++;
skb = fep->rx_skbuff[curidx]; skbn = fep->rx_skbuff[curidx];
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
DMA_FROM_DEVICE);
skbn = skb;
} else { } else {
skb = fep->rx_skbuff[curidx]; skb = fep->rx_skbuff[curidx];
dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
DMA_FROM_DEVICE);
/* /*
* Process the incoming frame. * Process the incoming frame.
*/ */
...@@ -256,12 +245,30 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) ...@@ -256,12 +245,30 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
skb_copy_from_linear_data(skb, skb_copy_from_linear_data(skb,
skbn->data, pkt_len); skbn->data, pkt_len);
swap(skb, skbn); swap(skb, skbn);
dma_sync_single_for_cpu(fep->dev,
CBDR_BUFADDR(bdp),
L1_CACHE_ALIGN(pkt_len),
DMA_FROM_DEVICE);
} }
} else { } else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
if (skbn) if (skbn) {
dma_addr_t dma;
skb_align(skbn, ENET_RX_ALIGN); skb_align(skbn, ENET_RX_ALIGN);
dma_unmap_single(fep->dev,
CBDR_BUFADDR(bdp),
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
DMA_FROM_DEVICE);
dma = dma_map_single(fep->dev,
skbn->data,
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
DMA_FROM_DEVICE);
CBDW_BUFADDR(bdp, dma);
}
} }
if (skbn != NULL) { if (skbn != NULL) {
...@@ -276,9 +283,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget) ...@@ -276,9 +283,6 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
} }
fep->rx_skbuff[curidx] = skbn; fep->rx_skbuff[curidx] = skbn;
CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0); CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment