Commit 7d9ca345 authored by Lendacky, Thomas's avatar Lendacky, Thomas Committed by David S. Miller

amd-xgbe: Rework the Rx path SKB allocation

Rework the SKB allocation so that all of the buffers of the first
descriptor are handled in the SKB allocation routine. After copying the
data in the header buffer (which can be just the header if split header
processing succeeded for header plus data if split header processing did
not succeed) into the SKB, check for remaining data in the receive
buffer. If there is data remaining in the receive buffer, add that as a
frag to the SKB. Once an SKB has been allocated, all other descriptors
are added as frags to the SKB.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 34bf65df
......@@ -481,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
if (rdata->state_saved) {
rdata->state_saved = 0;
rdata->state.incomplete = 0;
rdata->state.context_next = 0;
rdata->state.skb = NULL;
rdata->state.len = 0;
rdata->state.error = 0;
......
......@@ -1822,9 +1822,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
struct napi_struct *napi,
struct xgbe_ring_data *rdata,
unsigned int *len)
unsigned int len)
{
struct sk_buff *skb;
u8 *packet;
......@@ -1834,14 +1835,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
if (!skb)
return NULL;
/* Start with the header buffer which may contain just the header
* or the header plus data
*/
dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
*len -= copy_len;
len -= copy_len;
if (len) {
/* Add the remaining data as a frag */
dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
len, rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
return skb;
}
......@@ -1923,7 +1941,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
unsigned int len, put_len, max_len;
unsigned int len, rdesc_len, max_len;
unsigned int received = 0;
int packet_count = 0;
......@@ -1933,6 +1951,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
incomplete = 0;
context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
......@@ -1942,15 +1963,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
/* First time in loop see if we need to restore state */
if (!received && rdata->state_saved) {
incomplete = rdata->state.incomplete;
context_next = rdata->state.context_next;
skb = rdata->state.skb;
error = rdata->state.error;
len = rdata->state.len;
} else {
memset(packet, 0, sizeof(*packet));
incomplete = 0;
context_next = 0;
skb = NULL;
error = 0;
len = 0;
......@@ -1991,23 +2008,16 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
}
if (!context) {
put_len = rdata->rx.len - len;
len += put_len;
if (!skb) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx.hdr.dma,
rdata->rx.hdr.dma_len,
DMA_FROM_DEVICE);
skb = xgbe_create_skb(napi, rdata, &put_len);
if (!skb) {
/* Length is cumulative, get this descriptor's length */
rdesc_len = rdata->rx.len - len;
len += rdesc_len;
if (rdesc_len && !skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
rdesc_len);
if (!skb)
error = 1;
goto skip_data;
}
}
if (put_len) {
} else if (rdesc_len) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx.buf.dma,
rdata->rx.buf.dma_len,
......@@ -2016,12 +2026,12 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
put_len, rdata->rx.buf.dma_len);
rdesc_len,
rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
skip_data:
if (incomplete || context_next)
goto read_again;
......@@ -2084,8 +2094,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (received && (incomplete || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
rdata->state.incomplete = incomplete;
rdata->state.context_next = context_next;
rdata->state.skb = skb;
rdata->state.len = len;
rdata->state.error = error;
......
......@@ -334,8 +334,6 @@ struct xgbe_ring_data {
*/
unsigned int state_saved;
struct {
unsigned int incomplete;
unsigned int context_next;
struct sk_buff *skb;
unsigned int len;
unsigned int error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment