Commit 3e2234b3 authored by Zoltan Kiss's avatar Zoltan Kiss Committed by David S. Miller

xen-netback: Handle foreign mapped pages on the guest RX path

RX path need to know if the SKB fragments are stored on pages from another
domain.
Logically this patch should be after introducing the grant mapping itself, as
it makes sense only after that. But to keep bisectability, I moved it here. It
shouldn't change any functionality here. xenvif_zerocopy_callback and
ubuf_to_vif are just stubs here, they will be introduced properly later on.
Signed-off-by: default avatarZoltan Kiss <zoltan.kiss@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 121fa4b7
...@@ -247,6 +247,9 @@ static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif) ...@@ -247,6 +247,9 @@ static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif)
< MAX_PENDING_REQS; < MAX_PENDING_REQS;
} }
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
extern bool separate_tx_rx_irq; extern bool separate_tx_rx_irq;
#endif /* __XEN_NETBACK__COMMON_H__ */ #endif /* __XEN_NETBACK__COMMON_H__ */
...@@ -101,6 +101,10 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, ...@@ -101,6 +101,10 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx)); return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
} }
static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
{
return NULL;
}
/* This is a miniumum size for the linear area to avoid lots of /* This is a miniumum size for the linear area to avoid lots of
* calls to __pskb_pull_tail() as we set up checksum offsets. The * calls to __pskb_pull_tail() as we set up checksum offsets. The
* value 128 was chosen as it covers all IPv4 and most likely * value 128 was chosen as it covers all IPv4 and most likely
...@@ -221,7 +225,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, ...@@ -221,7 +225,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo, struct netrx_pending_operations *npo,
struct page *page, unsigned long size, struct page *page, unsigned long size,
unsigned long offset, int *head) unsigned long offset, int *head,
struct xenvif *foreign_vif,
grant_ref_t foreign_gref)
{ {
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
...@@ -263,8 +269,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -263,8 +269,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref; copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes; copy_gop->len = bytes;
if (foreign_vif) {
copy_gop->source.domid = foreign_vif->domid;
copy_gop->source.u.ref = foreign_gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
copy_gop->source.domid = DOMID_SELF; copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn = virt_to_mfn(page_address(page)); copy_gop->source.u.gmfn =
virt_to_mfn(page_address(page));
}
copy_gop->source.offset = offset; copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid; copy_gop->dest.domid = vif->domid;
...@@ -325,6 +338,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -325,6 +338,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int old_meta_prod; int old_meta_prod;
int gso_type; int gso_type;
int gso_size; int gso_size;
struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
struct xenvif *foreign_vif = NULL;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
...@@ -365,6 +381,19 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -365,6 +381,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0; npo->copy_off = 0;
npo->copy_gref = req->gref; npo->copy_gref = req->gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
(ubuf->callback == &xenvif_zerocopy_callback)) {
int i = 0;
foreign_vif = ubuf_to_vif(ubuf);
do {
u16 pending_idx = ubuf->desc;
foreign_grefs[i++] =
foreign_vif->pending_tx_info[pending_idx].req.gref;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
}
data = skb->data; data = skb->data;
while (data < skb_tail_pointer(skb)) { while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data); unsigned int offset = offset_in_page(data);
...@@ -374,7 +403,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -374,7 +403,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data; len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(vif, skb, npo, xenvif_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head); virt_to_page(data), len, offset, &head,
NULL,
0);
data += len; data += len;
} }
...@@ -383,7 +414,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -383,7 +414,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].page_offset,
&head); &head,
foreign_vif,
foreign_grefs[i]);
} }
return npo->meta_prod - old_meta_prod; return npo->meta_prod - old_meta_prod;
...@@ -1351,6 +1384,11 @@ static int xenvif_tx_submit(struct xenvif *vif) ...@@ -1351,6 +1384,11 @@ static int xenvif_tx_submit(struct xenvif *vif)
return work_done; return work_done;
} }
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
{
return;
}
/* Called after netfront has transmitted */ /* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget) int xenvif_tx_action(struct xenvif *vif, int budget)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment