Commit a0006a86 authored by Ian Campbell's avatar Ian Campbell Committed by David S. Miller

cxgb4vf: convert to SKB paged frag API.

Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
Cc: Casey Leedom <leedom@chelsio.com>
Cc: netdev@vger.kernel.org
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e91b0f24
...@@ -144,7 +144,7 @@ struct sge_fl { ...@@ -144,7 +144,7 @@ struct sge_fl {
* An ingress packet gather list. * An ingress packet gather list.
*/ */
struct pkt_gl { struct pkt_gl {
skb_frag_t frags[MAX_SKB_FRAGS]; struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */ void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */ unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */ unsigned int tot_len; /* total length of fragments */
......
...@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb, ...@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb); si = skb_shinfo(skb);
end = &si->frags[si->nr_frags]; end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) { for (fp = si->frags; fp < end; fp++) {
*++addr = dma_map_page(dev, fp->page, fp->page_offset, *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
skb_frag_size(fp), DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr)) if (dma_mapping_error(dev, *addr))
goto unwind; goto unwind;
} }
...@@ -1356,6 +1356,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1356,6 +1356,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
/**
* copy_frags - copy fragments from gather list into skb_shared_info
* @skb: destination skb
* @gl: source internal packet gather list
* @offset: packet start offset in first page
*
* Copy an internal packet gather list into a Linux skb_shared_info
* structure.
*/
static inline void copy_frags(struct sk_buff *skb,
const struct pkt_gl *gl,
unsigned int offset)
{
int i;
/* usually there's just one frag */
__skb_fill_page_desc(skb, 0, gl->frags[0].page,
gl->frags[0].offset + offset,
gl->frags[0].size - offset);
skb_shinfo(skb)->nr_frags = gl->nfrags;
for (i = 1; i < gl->nfrags; i++)
__skb_fill_page_desc(skb, i, gl->frags[i].page,
gl->frags[i].offset,
gl->frags[i].size);
/* get a reference to the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
}
/** /**
* t4vf_pktgl_to_skb - build an sk_buff from a packet gather list * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
* @gl: the gather list * @gl: the gather list
...@@ -1369,7 +1398,6 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, ...@@ -1369,7 +1398,6 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len) unsigned int skb_len, unsigned int pull_len)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct skb_shared_info *ssi;
/* /*
* If the ingress packet is small enough, allocate an skb large enough * If the ingress packet is small enough, allocate an skb large enough
...@@ -1396,21 +1424,10 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, ...@@ -1396,21 +1424,10 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
__skb_put(skb, pull_len); __skb_put(skb, pull_len);
skb_copy_to_linear_data(skb, gl->va, pull_len); skb_copy_to_linear_data(skb, gl->va, pull_len);
ssi = skb_shinfo(skb); copy_frags(skb, gl, pull_len);
ssi->frags[0].page = gl->frags[0].page;
ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len);
if (gl->nfrags > 1)
memcpy(&ssi->frags[1], &gl->frags[1],
(gl->nfrags-1) * sizeof(skb_frag_t));
ssi->nr_frags = gl->nfrags;
skb->len = gl->tot_len; skb->len = gl->tot_len;
skb->data_len = skb->len - pull_len; skb->data_len = skb->len - pull_len;
skb->truesize += skb->data_len; skb->truesize += skb->data_len;
/* Get a reference for the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
} }
out: out:
...@@ -1433,35 +1450,6 @@ void t4vf_pktgl_free(const struct pkt_gl *gl) ...@@ -1433,35 +1450,6 @@ void t4vf_pktgl_free(const struct pkt_gl *gl)
put_page(gl->frags[frag].page); put_page(gl->frags[frag].page);
} }
/**
* copy_frags - copy fragments from gather list into skb_shared_info
* @si: destination skb shared info structure
* @gl: source internal packet gather list
* @offset: packet start offset in first page
*
* Copy an internal packet gather list into a Linux skb_shared_info
* structure.
*/
static inline void copy_frags(struct skb_shared_info *si,
const struct pkt_gl *gl,
unsigned int offset)
{
unsigned int n;
/* usually there's just one frag */
si->frags[0].page = gl->frags[0].page;
si->frags[0].page_offset = gl->frags[0].page_offset + offset;
skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset);
si->nr_frags = gl->nfrags;
n = gl->nfrags - 1;
if (n)
memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
/* get a reference to the last page, we don't own it */
get_page(gl->frags[n].page);
}
/** /**
* do_gro - perform Generic Receive Offload ingress packet processing * do_gro - perform Generic Receive Offload ingress packet processing
* @rxq: ingress RX Ethernet Queue * @rxq: ingress RX Ethernet Queue
...@@ -1484,7 +1472,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, ...@@ -1484,7 +1472,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return; return;
} }
copy_frags(skb_shinfo(skb), gl, PKTSHIFT); copy_frags(skb, gl, PKTSHIFT);
skb->len = gl->tot_len - PKTSHIFT; skb->len = gl->tot_len - PKTSHIFT;
skb->data_len = skb->len; skb->data_len = skb->len;
skb->truesize += skb->data_len; skb->truesize += skb->data_len;
...@@ -1667,7 +1655,7 @@ int process_responses(struct sge_rspq *rspq, int budget) ...@@ -1667,7 +1655,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
rmb(); rmb();
rsp_type = RSPD_TYPE(rc->type_gen); rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) { if (likely(rsp_type == RSP_TYPE_FLBUF)) {
skb_frag_t *fp; struct page_frag *fp;
struct pkt_gl gl; struct pkt_gl gl;
const struct rx_sw_desc *sdesc; const struct rx_sw_desc *sdesc;
u32 bufsz, frag; u32 bufsz, frag;
...@@ -1701,9 +1689,9 @@ int process_responses(struct sge_rspq *rspq, int budget) ...@@ -1701,9 +1689,9 @@ int process_responses(struct sge_rspq *rspq, int budget)
sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
bufsz = get_buf_size(sdesc); bufsz = get_buf_size(sdesc);
fp->page = sdesc->page; fp->page = sdesc->page;
fp->page_offset = rspq->offset; fp->offset = rspq->offset;
skb_frag_size_set(fp, min(bufsz, len)); fp->size = min(bufsz, len);
len -= skb_frag_size(fp); len -= fp->size;
if (!len) if (!len)
break; break;
unmap_rx_buf(rspq->adapter, &rxq->fl); unmap_rx_buf(rspq->adapter, &rxq->fl);
...@@ -1717,9 +1705,9 @@ int process_responses(struct sge_rspq *rspq, int budget) ...@@ -1717,9 +1705,9 @@ int process_responses(struct sge_rspq *rspq, int budget)
*/ */
dma_sync_single_for_cpu(rspq->adapter->pdev_dev, dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
get_buf_addr(sdesc), get_buf_addr(sdesc),
skb_frag_size(fp), DMA_FROM_DEVICE); fp->size, DMA_FROM_DEVICE);
gl.va = (page_address(gl.frags[0].page) + gl.va = (page_address(gl.frags[0].page) +
gl.frags[0].page_offset); gl.frags[0].offset);
prefetch(gl.va); prefetch(gl.va);
/* /*
...@@ -1728,7 +1716,7 @@ int process_responses(struct sge_rspq *rspq, int budget) ...@@ -1728,7 +1716,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
*/ */
ret = rspq->handler(rspq, rspq->cur_desc, &gl); ret = rspq->handler(rspq, rspq->cur_desc, &gl);
if (likely(ret == 0)) if (likely(ret == 0))
rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); rspq->offset += ALIGN(fp->size, FL_ALIGN);
else else
restore_rx_bufs(&gl, &rxq->fl, frag); restore_rx_bufs(&gl, &rxq->fl, frag);
} else if (likely(rsp_type == RSP_TYPE_CPL)) { } else if (likely(rsp_type == RSP_TYPE_CPL)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment