Commit 6702d60d authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'remove-page-frag-implementation-in-vhost_net'

Yunsheng Lin says:

====================
remove page frag implementation in vhost_net

Currently there are three implementations for page frag:

1. mm/page_alloc.c: net stack seems to be using it in the
   rx part with 'struct page_frag_cache' and the main API
   being page_frag_alloc_align().
2. net/core/sock.c: net stack seems to be using it in the
   tx part with 'struct page_frag' and the main API being
   skb_page_frag_refill().
3. drivers/vhost/net.c: vhost seems to be using it to build
   xdp frame, and it's implementation seems to be a mix of
   the above two.

This patchset tries to unfiy the page frag implementation a
little bit by unifying gfp bit for order 3 page allocation
and replacing page frag implementation in vhost.c with the
one in page_alloc.c.

After this patchset, we are not only able to unify the page
frag implementation a little, but also able to have about
0.5% performance boost testing by using the vhost_net_test
introduced in the last patch.

Before this patchset:
Performance counter stats for './vhost_net_test' (10 runs):

     305325.78 msec task-clock                       #    1.738 CPUs utilized               ( +-  0.12% )
       1048668      context-switches                 #    3.435 K/sec                       ( +-  0.00% )
            11      cpu-migrations                   #    0.036 /sec                        ( +- 17.64% )
            33      page-faults                      #    0.108 /sec                        ( +-  0.49% )
  244651819491      cycles                           #    0.801 GHz                         ( +-  0.43% )  (64)
   64714638024      stalled-cycles-frontend          #   26.45% frontend cycles idle        ( +-  2.19% )  (67)
   30774313491      stalled-cycles-backend           #   12.58% backend cycles idle         ( +-  7.68% )  (70)
  201749748680      instructions                     #    0.82  insn per cycle
                                              #    0.32  stalled cycles per insn     ( +-  0.41% )  (66.76%)
   65494787909      branches                         #  214.508 M/sec                       ( +-  0.35% )  (64)
    4284111313      branch-misses                    #    6.54% of all branches             ( +-  0.45% )  (66)

       175.699 +- 0.189 seconds time elapsed  ( +-  0.11% )

After this patchset:
Performance counter stats for './vhost_net_test' (10 runs):

     303974.38 msec task-clock                       #    1.739 CPUs utilized               ( +-  0.14% )
       1048807      context-switches                 #    3.450 K/sec                       ( +-  0.00% )
            14      cpu-migrations                   #    0.046 /sec                        ( +- 12.86% )
            33      page-faults                      #    0.109 /sec                        ( +-  0.46% )
  251289376347      cycles                           #    0.827 GHz                         ( +-  0.32% )  (60)
   67885175415      stalled-cycles-frontend          #   27.01% frontend cycles idle        ( +-  0.48% )  (63)
   27809282600      stalled-cycles-backend           #   11.07% backend cycles idle         ( +-  0.36% )  (71)
  195543234672      instructions                     #    0.78  insn per cycle
                                              #    0.35  stalled cycles per insn     ( +-  0.29% )  (69.04%)
   62423183552      branches                         #  205.357 M/sec                       ( +-  0.48% )  (67)
    4135666632      branch-misses                    #    6.63% of all branches             ( +-  0.63% )  (67)

       174.764 +- 0.214 seconds time elapsed  ( +-  0.12% )

Changelog:
V6: Add timeout for poll() and simplify some logic as suggested
    by Jason.

V5: Address the comment from jason in vhost_net_test.c and the
    comment about leaving out the gfp change for page frag in
    sock.c as suggested by Paolo.

V4: Resend based on latest net-next branch.

V3:
1. Add __page_frag_alloc_align() which is passed with the align mask
   the original function expected as suggested by Alexander.
2. Drop patch 3 in v2 suggested by Alexander.
3. Reorder patch 4 & 5 in v2 suggested by Alexander.

Note that placing this gfp flags handing for order 3 page in an inline
function is not considered, as we may be able to unify the page_frag
and page_frag_cache handling.

V2: Change 'xor'd' to 'masked off', add vhost tx testing for
    vhost_net_test.

V1: Fix some typo, drop RFC tag and rebase on latest net-next.
====================

Link: https://lore.kernel.org/r/20240228093013.8263-1-linyunsheng@huawei.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 0e71862a c5d3705c
...@@ -1276,17 +1276,10 @@ static void gve_unreg_xdp_info(struct gve_priv *priv) ...@@ -1276,17 +1276,10 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv) static void gve_drain_page_cache(struct gve_priv *priv)
{ {
struct page_frag_cache *nc;
int i; int i;
for (i = 0; i < priv->rx_cfg.num_queues; i++) { for (i = 0; i < priv->rx_cfg.num_queues; i++)
nc = &priv->rx[i].page_cache; page_frag_cache_drain(&priv->rx[i].page_cache);
if (nc->va) {
__page_frag_cache_drain(virt_to_page(nc->va),
nc->pagecnt_bias);
nc->va = NULL;
}
}
} }
static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv, static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
......
...@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ...@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{ {
struct page *page;
int i; int i;
for (i = 0; i < q->n_desc; i++) { for (i = 0; i < q->n_desc; i++) {
...@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ...@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL; entry->buf = NULL;
} }
if (!q->cache.va) page_frag_cache_drain(&q->cache);
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
} }
static void static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{ {
struct page *page;
for (;;) { for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
...@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ...@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf); skb_free_frag(buf);
} }
if (!q->cache.va) page_frag_cache_drain(&q->cache);
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
} }
static void static void
......
...@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) ...@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{ {
struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag; unsigned int noreclaim_flag;
...@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue); nvme_tcp_free_crypto(queue);
if (queue->pf_cache.va) { page_frag_cache_drain(&queue->pf_cache);
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
queue->pf_cache.va = NULL;
}
noreclaim_flag = memalloc_noreclaim_save(); noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */ /* ->sock will be released by fput() */
......
...@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) ...@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w) static void nvmet_tcp_release_queue_work(struct work_struct *w)
{ {
struct page *page;
struct nvmet_tcp_queue *queue = struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work); container_of(w, struct nvmet_tcp_queue, release_work);
...@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) ...@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue); nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx); ida_free(&nvmet_tcp_queue_ida, queue->idx);
page = virt_to_head_page(queue->pf_cache.va); page_frag_cache_drain(&queue->pf_cache);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue); kfree(queue);
} }
......
...@@ -141,10 +141,8 @@ struct vhost_net { ...@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err; unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */ /* Flush in progress. Protected by tx vq lock. */
bool tx_flush; bool tx_flush;
/* Private page frag */ /* Private page frag cache */
struct page_frag page_frag; struct page_frag_cache pf_cache;
/* Refcount bias of page frag */
int refcnt_bias;
}; };
static unsigned vhost_net_zcopy_mask __read_mostly; static unsigned vhost_net_zcopy_mask __read_mostly;
...@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) ...@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq); !vhost_vq_avail_empty(vq->dev, vq);
} }
static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
struct page_frag *pfrag, gfp_t gfp)
{
if (pfrag->page) {
if (pfrag->offset + sz <= pfrag->size)
return true;
__page_frag_cache_drain(pfrag->page, net->refcnt_bias);
}
pfrag->offset = 0;
net->refcnt_bias = 0;
if (SKB_FRAG_PAGE_ORDER) {
/* Avoid direct reclaim but allow kswapd to wake */
pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_NOWARN |
__GFP_NORETRY,
SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
goto done;
}
}
pfrag->page = alloc_page(gfp);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE;
goto done;
}
return false;
done:
net->refcnt_bias = USHRT_MAX;
page_ref_add(pfrag->page, USHRT_MAX - 1);
return true;
}
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
...@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net, struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev); dev);
struct socket *sock = vhost_vq_get_backend(vq); struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso; struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr; struct tun_xdp_hdr *hdr;
...@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen; int sock_hlen = nvq->sock_hlen;
void *buf; void *buf;
int copied; int copied;
int ret;
if (unlikely(len < nvq->sock_hlen)) if (unlikely(len < nvq->sock_hlen))
return -EFAULT; return -EFAULT;
...@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC; return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad); buflen += SKB_DATA_ALIGN(len + pad);
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
if (unlikely(!vhost_net_page_frag_refill(net, buflen, SMP_CACHE_BYTES);
alloc_frag, GFP_KERNEL))) if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset +
offsetof(struct tun_xdp_hdr, gso),
sock_hlen, from); sock_hlen, from);
if (copied != sock_hlen) if (copied != sock_hlen) {
return -EFAULT; ret = -EFAULT;
goto err;
}
hdr = buf; hdr = buf;
gso = &hdr->gso; gso = &hdr->gso;
...@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) + vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2); vhost16_to_cpu(vq, gso->csum_offset) + 2);
if (vhost16_to_cpu(vq, gso->hdr_len) > len) if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
return -EINVAL; ret = -EINVAL;
goto err;
}
} }
len -= sock_hlen; len -= sock_hlen;
copied = copy_page_from_iter(alloc_frag->page, copied = copy_from_iter(buf + pad, len, from);
alloc_frag->offset + pad, if (copied != len) {
len, from); ret = -EFAULT;
if (copied != len) goto err;
return -EFAULT; }
xdp_init_buff(xdp, buflen, NULL); xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true); xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen; hdr->buflen = buflen;
--net->refcnt_bias;
alloc_frag->offset += buflen;
++nvq->batched_xdp; ++nvq->batched_xdp;
return 0; return 0;
err:
page_frag_free(buf);
return ret;
} }
static void handle_tx_copy(struct vhost_net *net, struct socket *sock) static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
...@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) ...@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]); vqs[VHOST_NET_VQ_RX]);
f->private_data = n; f->private_data = n;
n->page_frag.page = NULL; n->pf_cache.va = NULL;
n->refcnt_bias = 0;
return 0; return 0;
} }
...@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) ...@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp); kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs); kfree(n->dev.vqs);
if (n->page_frag.page) page_frag_cache_drain(&n->pf_cache);
__page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
kvfree(n); kvfree(n);
return 0; return 0;
} }
......
...@@ -311,15 +311,23 @@ extern void __free_pages(struct page *page, unsigned int order); ...@@ -311,15 +311,23 @@ extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order);
struct page_frag_cache; struct page_frag_cache;
void page_frag_cache_drain(struct page_frag_cache *nc);
extern void __page_frag_cache_drain(struct page *page, unsigned int count); extern void __page_frag_cache_drain(struct page *page, unsigned int count);
extern void *page_frag_alloc_align(struct page_frag_cache *nc, void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
gfp_t gfp_mask, unsigned int align_mask);
static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask, unsigned int fragsz, gfp_t gfp_mask,
unsigned int align_mask); unsigned int align)
{
WARN_ON_ONCE(!is_power_of_2(align));
return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
}
static inline void *page_frag_alloc(struct page_frag_cache *nc, static inline void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask) unsigned int fragsz, gfp_t gfp_mask)
{ {
return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
} }
extern void page_frag_free(void *addr); extern void page_frag_free(void *addr);
......
...@@ -4685,8 +4685,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, ...@@ -4685,8 +4685,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp = gfp_mask; gfp_t gfp = gfp_mask;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
__GFP_NOMEMALLOC; __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
PAGE_FRAG_CACHE_MAX_ORDER); PAGE_FRAG_CACHE_MAX_ORDER);
nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
...@@ -4699,6 +4699,16 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, ...@@ -4699,6 +4699,16 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
return page; return page;
} }
void page_frag_cache_drain(struct page_frag_cache *nc)
{
if (!nc->va)
return;
__page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
nc->va = NULL;
}
EXPORT_SYMBOL(page_frag_cache_drain);
void __page_frag_cache_drain(struct page *page, unsigned int count) void __page_frag_cache_drain(struct page *page, unsigned int count)
{ {
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
...@@ -4708,7 +4718,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) ...@@ -4708,7 +4718,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
} }
EXPORT_SYMBOL(__page_frag_cache_drain); EXPORT_SYMBOL(__page_frag_cache_drain);
void *page_frag_alloc_align(struct page_frag_cache *nc, void *__page_frag_alloc_align(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask, unsigned int fragsz, gfp_t gfp_mask,
unsigned int align_mask) unsigned int align_mask)
{ {
...@@ -4779,7 +4789,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc, ...@@ -4779,7 +4789,7 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
return nc->va + offset; return nc->va + offset;
} }
EXPORT_SYMBOL(page_frag_alloc_align); EXPORT_SYMBOL(__page_frag_alloc_align);
/* /*
* Frees a page fragment allocated out of either a compound or order 0 page. * Frees a page fragment allocated out of either a compound or order 0 page.
......
...@@ -315,7 +315,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) ...@@ -315,7 +315,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz); fragsz = SKB_DATA_ALIGN(fragsz);
return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
align_mask);
} }
EXPORT_SYMBOL(__napi_alloc_frag_align); EXPORT_SYMBOL(__napi_alloc_frag_align);
...@@ -327,13 +328,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) ...@@ -327,13 +328,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
if (in_hardirq() || irqs_disabled()) { if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
align_mask);
} else { } else {
struct napi_alloc_cache *nc; struct napi_alloc_cache *nc;
local_bh_disable(); local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache); nc = this_cpu_ptr(&napi_alloc_cache);
data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
align_mask);
local_bh_enable(); local_bh_enable();
} }
return data; return data;
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
*.d *.d
virtio_test virtio_test
vhost_net_test
vringh_test vringh_test
virtio-trace/trace-agent virtio-trace/trace-agent
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
all: test mod all: test mod
test: virtio_test vringh_test test: virtio_test vringh_test vhost_net_test
virtio_test: virtio_ring.o virtio_test.o virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o vringh_test: vringh_test.o vringh.o virtio_ring.o
vhost_net_test: virtio_ring.o vhost_net_test.o
try-run = $(shell set -e; \ try-run = $(shell set -e; \
if ($(1)) >/dev/null 2>&1; \ if ($(1)) >/dev/null 2>&1; \
...@@ -49,6 +50,7 @@ oot-clean: OOT_BUILD+=clean ...@@ -49,6 +50,7 @@ oot-clean: OOT_BUILD+=clean
.PHONY: all test mod clean vhost oot oot-clean oot-build .PHONY: all test mod clean vhost oot oot-clean oot-build
clean: clean:
${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ ${RM} *.o vringh_test virtio_test vhost_net_test vhost_test/*.o \
vhost_test/Module.symvers vhost_test/modules.order *.d vhost_test/.*.cmd vhost_test/Module.symvers \
vhost_test/modules.order *.d
-include *.d -include *.d
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_VIRTIO_CONFIG_H
#define LINUX_VIRTIO_CONFIG_H
#include <linux/virtio_byteorder.h> #include <linux/virtio_byteorder.h>
#include <linux/virtio.h> #include <linux/virtio.h>
#include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_config.h>
...@@ -95,3 +97,5 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) ...@@ -95,3 +97,5 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{ {
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
} }
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment