Commit 4051bd81 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Paolo Abeni

vhost/net: remove vhost_net_page_frag_refill()

The page frag in vhost_net_page_frag_refill() uses the
'struct page_frag' from skb_page_frag_refill(), but it's
implementation is similar to page_frag_alloc_align() now.

This patch removes vhost_net_page_frag_refill() by using
'struct page_frag_cache' instead of 'struct page_frag',
and allocating frag using page_frag_alloc_align().

The added benefit is that not only unifying the page frag
implementation a little, but also having about 0.5% performance
boost testing by using the vhost_net_test introduced in the
last patch.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent a0727489
...@@ -141,10 +141,8 @@ struct vhost_net { ...@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err; unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */ /* Flush in progress. Protected by tx vq lock. */
bool tx_flush; bool tx_flush;
/* Private page frag */ /* Private page frag cache */
struct page_frag page_frag; struct page_frag_cache pf_cache;
/* Refcount bias of page frag */
int refcnt_bias;
}; };
static unsigned vhost_net_zcopy_mask __read_mostly; static unsigned vhost_net_zcopy_mask __read_mostly;
...@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) ...@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq); !vhost_vq_avail_empty(vq->dev, vq);
} }
static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
struct page_frag *pfrag, gfp_t gfp)
{
if (pfrag->page) {
if (pfrag->offset + sz <= pfrag->size)
return true;
__page_frag_cache_drain(pfrag->page, net->refcnt_bias);
}
pfrag->offset = 0;
net->refcnt_bias = 0;
if (SKB_FRAG_PAGE_ORDER) {
/* Avoid direct reclaim but allow kswapd to wake */
pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_NOWARN |
__GFP_NORETRY | __GFP_NOMEMALLOC,
SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
goto done;
}
}
pfrag->page = alloc_page(gfp);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE;
goto done;
}
return false;
done:
net->refcnt_bias = USHRT_MAX;
page_ref_add(pfrag->page, USHRT_MAX - 1);
return true;
}
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
...@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net, struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev); dev);
struct socket *sock = vhost_vq_get_backend(vq); struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso; struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr; struct tun_xdp_hdr *hdr;
...@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen; int sock_hlen = nvq->sock_hlen;
void *buf; void *buf;
int copied; int copied;
int ret;
if (unlikely(len < nvq->sock_hlen)) if (unlikely(len < nvq->sock_hlen))
return -EFAULT; return -EFAULT;
...@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC; return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad); buflen += SKB_DATA_ALIGN(len + pad);
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
if (unlikely(!vhost_net_page_frag_refill(net, buflen, SMP_CACHE_BYTES);
alloc_frag, GFP_KERNEL))) if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset +
offsetof(struct tun_xdp_hdr, gso),
sock_hlen, from); sock_hlen, from);
if (copied != sock_hlen) if (copied != sock_hlen) {
return -EFAULT; ret = -EFAULT;
goto err;
}
hdr = buf; hdr = buf;
gso = &hdr->gso; gso = &hdr->gso;
...@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) + vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2); vhost16_to_cpu(vq, gso->csum_offset) + 2);
if (vhost16_to_cpu(vq, gso->hdr_len) > len) if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
return -EINVAL; ret = -EINVAL;
goto err;
}
} }
len -= sock_hlen; len -= sock_hlen;
copied = copy_page_from_iter(alloc_frag->page, copied = copy_from_iter(buf + pad, len, from);
alloc_frag->offset + pad, if (copied != len) {
len, from); ret = -EFAULT;
if (copied != len) goto err;
return -EFAULT; }
xdp_init_buff(xdp, buflen, NULL); xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true); xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen; hdr->buflen = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
++nvq->batched_xdp; ++nvq->batched_xdp;
return 0; return 0;
err:
page_frag_free(buf);
return ret;
} }
static void handle_tx_copy(struct vhost_net *net, struct socket *sock) static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
...@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) ...@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]); vqs[VHOST_NET_VQ_RX]);
f->private_data = n; f->private_data = n;
n->page_frag.page = NULL; n->pf_cache.va = NULL;
n->refcnt_bias = 0;
return 0; return 0;
} }
...@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) ...@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp); kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs); kfree(n->dev.vqs);
if (n->page_frag.page) page_frag_cache_drain(&n->pf_cache);
__page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
kvfree(n); kvfree(n);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment