Commit e4f1b820 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:
 "A small pull request this time around, mostly because the vduse
  network got postponed to next relase so we can be sure we got the
  security store right"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_ring: fix avail_wrap_counter in virtqueue_add_packed
  virtio_vdpa: build affinity masks conditionally
  virtio_net: merge dma operations when filling mergeable buffers
  virtio_ring: introduce dma sync api for virtqueue
  virtio_ring: introduce dma map api for virtqueue
  virtio_ring: introduce virtqueue_reset()
  virtio_ring: separate the logic of reset/enable from virtqueue_resize
  virtio_ring: correct the expression of the description of virtqueue_resize()
  virtio_ring: skip unmap for premapped
  virtio_ring: introduce virtqueue_dma_dev()
  virtio_ring: support add premapped buf
  virtio_ring: introduce virtqueue_set_dma_premapped()
  virtio_ring: put mapping error check in vring_map_one_sg
  virtio_ring: check use_dma_api before unmap desc for indirect
  vdpa_sim: offer VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK
  vdpa: add get_backend_features vdpa operation
  vdpa: accept VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK backend feature
  vdpa: add VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK flag
  vdpa/mlx5: Remove unused function declarations
parents 5c5e0e81 1acfe2c1
...@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce { ...@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce {
u32 max_usecs; u32 max_usecs;
}; };
/* The dma information of pages allocated at a time. */
struct virtnet_rq_dma {
dma_addr_t addr;
u32 ref;
u16 len;
u16 need_sync;
};
/* Internal representation of a send virtqueue */ /* Internal representation of a send virtqueue */
struct send_queue { struct send_queue {
/* Virtqueue associated with this send _queue */ /* Virtqueue associated with this send _queue */
...@@ -185,6 +193,12 @@ struct receive_queue { ...@@ -185,6 +193,12 @@ struct receive_queue {
char name[16]; char name[16];
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
/* Do dma by self */
bool do_dma;
}; };
/* This structure can contain rss message with maximum settings for indirection table and keysize /* This structure can contain rss message with maximum settings for indirection table and keysize
...@@ -580,6 +594,156 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, ...@@ -580,6 +594,156 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb; return skb;
} }
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
{
struct page *page = virt_to_head_page(buf);
struct virtnet_rq_dma *dma;
void *head;
int offset;
head = page_address(page);
dma = head;
--dma->ref;
if (dma->ref) {
if (dma->need_sync && len) {
offset = buf - (head + sizeof(*dma));
virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
len, DMA_FROM_DEVICE);
}
return;
}
virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
put_page(page);
}
static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, *len);
return buf;
}
static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
{
void *buf;
buf = virtqueue_detach_unused_buf(rq->vq);
if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
return buf;
}
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
{
struct virtnet_rq_dma *dma;
dma_addr_t addr;
u32 offset;
void *head;
if (!rq->do_dma) {
sg_init_one(rq->sg, buf, len);
return;
}
head = page_address(rq->alloc_frag.page);
offset = buf - head;
dma = head;
addr = dma->addr - sizeof(*dma) + offset;
sg_init_table(rq->sg, 1);
rq->sg[0].dma_address = addr;
rq->sg[0].length = len;
}
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
struct virtnet_rq_dma *dma;
void *buf, *head;
dma_addr_t addr;
if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
return NULL;
head = page_address(alloc_frag->page);
if (rq->do_dma) {
dma = head;
/* new pages */
if (!alloc_frag->offset) {
if (rq->last_dma) {
/* Now, the new page is allocated, the last dma
* will not be used. So the dma can be unmapped
* if the ref is 0.
*/
virtnet_rq_unmap(rq, rq->last_dma, 0);
rq->last_dma = NULL;
}
dma->len = alloc_frag->size - sizeof(*dma);
addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
dma->len, DMA_FROM_DEVICE, 0);
if (virtqueue_dma_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr;
dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
* will be freed after the pages are no longer used.
*/
get_page(alloc_frag->page);
dma->ref = 1;
alloc_frag->offset = sizeof(*dma);
rq->last_dma = dma;
}
++dma->ref;
}
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
alloc_frag->offset += size;
return buf;
}
static void virtnet_rq_set_premapped(struct virtnet_info *vi)
{
int i;
/* disable for big mode */
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;
for (i = 0; i < vi->max_queue_pairs; i++) {
if (virtqueue_set_dma_premapped(vi->rq[i].vq))
continue;
vi->rq[i].do_dma = true;
}
}
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{ {
unsigned int len; unsigned int len;
...@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, ...@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
void *buf; void *buf;
int off; int off;
buf = virtqueue_get_buf(rq->vq, &buflen); buf = virtnet_rq_get_buf(rq, &buflen, NULL);
if (unlikely(!buf)) if (unlikely(!buf))
goto err_buf; goto err_buf;
...@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf, ...@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
int len; int len;
while (num_buf-- > 1) { while (num_buf-- > 1) {
buf = virtqueue_get_buf(rq->vq, &len); buf = virtnet_rq_get_buf(rq, &len, NULL);
if (unlikely(!buf)) { if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n", pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf); dev->name, num_buf);
...@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, ...@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
return -EINVAL; return -EINVAL;
while (--*num_buf > 0) { while (--*num_buf > 0) {
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) { if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n", pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf, dev->name, *num_buf,
...@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ...@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
while (--num_buf) { while (--num_buf) {
int num_skb_frags; int num_skb_frags;
buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) { if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n", pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf, dev->name, num_buf,
...@@ -1651,7 +1815,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ...@@ -1651,7 +1815,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp) gfp_t gfp)
{ {
struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf; char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi); unsigned int xdp_headroom = virtnet_get_headroom(vi);
void *ctx = (void *)(unsigned long)xdp_headroom; void *ctx = (void *)(unsigned long)xdp_headroom;
...@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, ...@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) + len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
buf = virtnet_rq_alloc(rq, len, gfp);
if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
get_page(alloc_frag->page); vi->hdr_len + GOOD_PACKET_LEN);
alloc_frag->offset += len;
sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
}
return err; return err;
} }
...@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, ...@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
unsigned int headroom = virtnet_get_headroom(vi); unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
char *buf; unsigned int len, hole;
void *ctx; void *ctx;
char *buf;
int err; int err;
unsigned int len, hole;
/* Extra tailroom is needed to satisfy XDP's assumption. This /* Extra tailroom is needed to satisfy XDP's assumption. This
* means rx frags coalescing won't work, but consider we've * means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue. * disabled GSO for XDP, it won't be a big issue.
*/ */
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
buf = virtnet_rq_alloc(rq, len + room, gfp);
if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */ buf += headroom; /* advance address leaving hole at front of pkt */
get_page(alloc_frag->page);
alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset; hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + room) { if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not /* To avoid internal fragmentation, if there is very likely not
...@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, ...@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
alloc_frag->offset += hole; alloc_frag->offset += hole;
} }
sg_init_one(rq->sg, buf, len); virtnet_rq_init_one_sg(rq, buf, len);
ctx = mergeable_len_to_ctx(len + room, headroom); ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) if (err < 0) {
if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf)); put_page(virt_to_head_page(buf));
}
return err; return err;
} }
...@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, ...@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
void *ctx; void *ctx;
while (stats.packets < budget && while (stats.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
stats.packets++; stats.packets++;
} }
} else { } else {
while (stats.packets < budget && while (stats.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
stats.packets++; stats.packets++;
} }
...@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi) ...@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
{ {
int i; int i;
for (i = 0; i < vi->max_queue_pairs; i++) for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) if (vi->rq[i].alloc_frag.page) {
if (vi->rq[i].do_dma && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page); put_page(vi->rq[i].alloc_frag.page);
}
} }
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf) static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
...@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi) ...@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
} }
for (i = 0; i < vi->max_queue_pairs; i++) { for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq; struct receive_queue *rq = &vi->rq[i];
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
virtnet_rq_free_unused_buf(vq, buf); while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
virtnet_rq_free_unused_buf(rq->vq, buf);
cond_resched(); cond_resched();
} }
} }
...@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi) ...@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi)
if (ret) if (ret)
goto err_free; goto err_free;
virtnet_rq_set_premapped(vi);
cpus_read_lock(); cpus_read_lock();
virtnet_set_affinity(vi); virtnet_set_affinity(vi);
cpus_read_unlock(); cpus_read_unlock();
......
...@@ -100,9 +100,6 @@ struct mlx5_vdpa_dev { ...@@ -100,9 +100,6 @@ struct mlx5_vdpa_dev {
bool suspended; bool suspended;
}; };
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn); int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn); void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn); int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/vdpa.h> #include <linux/vdpa.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
#include <uapi/linux/vdpa.h> #include <uapi/linux/vdpa.h>
#include <uapi/linux/vhost_types.h>
#include "vdpa_sim.h" #include "vdpa_sim.h"
...@@ -410,6 +411,11 @@ static u64 vdpasim_get_device_features(struct vdpa_device *vdpa) ...@@ -410,6 +411,11 @@ static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
return vdpasim->dev_attr.supported_features; return vdpasim->dev_attr.supported_features;
} }
static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
{
return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
}
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features) static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{ {
struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
...@@ -733,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = { ...@@ -733,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vq_align = vdpasim_get_vq_align, .get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group, .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features, .get_device_features = vdpasim_get_device_features,
.get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features, .set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features, .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb, .set_config_cb = vdpasim_set_config_cb,
...@@ -770,6 +777,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = { ...@@ -770,6 +777,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vq_align = vdpasim_get_vq_align, .get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group, .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features, .get_device_features = vdpasim_get_device_features,
.get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features, .set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features, .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb, .set_config_cb = vdpasim_set_config_cb,
......
...@@ -403,6 +403,17 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep) ...@@ -403,6 +403,17 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
return 0; return 0;
} }
static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
if (!ops->get_backend_features)
return 0;
else
return ops->get_backend_features(vdpa);
}
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{ {
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
...@@ -680,7 +691,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -680,7 +691,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return -EFAULT; return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES | if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) | BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
BIT_ULL(VHOST_BACKEND_F_RESUME))) BIT_ULL(VHOST_BACKEND_F_RESUME) |
BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) && if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v)) !vhost_vdpa_can_suspend(v))
...@@ -741,6 +753,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -741,6 +753,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND); features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v)) if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME); features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features))) if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT; r = -EFAULT;
break; break;
......
This diff is collapsed.
...@@ -366,11 +366,14 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, ...@@ -366,11 +366,14 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct irq_affinity default_affd = { 0 }; struct irq_affinity default_affd = { 0 };
struct cpumask *masks; struct cpumask *masks;
struct vdpa_callback cb; struct vdpa_callback cb;
bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0; int i, err, queue_idx = 0;
masks = create_affinity_masks(nvqs, desc ? desc : &default_affd); if (has_affinity) {
if (!masks) masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
return -ENOMEM; if (!masks)
return -ENOMEM;
}
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
...@@ -386,20 +389,22 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, ...@@ -386,20 +389,22 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
goto err_setup_vq; goto err_setup_vq;
} }
if (ops->set_vq_affinity) if (has_affinity)
ops->set_vq_affinity(vdpa, i, &masks[i]); ops->set_vq_affinity(vdpa, i, &masks[i]);
} }
cb.callback = virtio_vdpa_config_cb; cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev; cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb); ops->set_config_cb(vdpa, &cb);
kfree(masks); if (has_affinity)
kfree(masks);
return 0; return 0;
err_setup_vq: err_setup_vq:
virtio_vdpa_del_vqs(vdev); virtio_vdpa_del_vqs(vdev);
kfree(masks); if (has_affinity)
kfree(masks);
return err; return err;
} }
......
...@@ -208,6 +208,9 @@ struct vdpa_map_file { ...@@ -208,6 +208,9 @@ struct vdpa_map_file {
* @vdev: vdpa device * @vdev: vdpa device
* Returns the virtio features support by the * Returns the virtio features support by the
* device * device
* @get_backend_features: Get parent-specific backend features (optional)
* Returns the vdpa features supported by the
* device.
* @set_driver_features: Set virtio features supported by the driver * @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device * @vdev: vdpa device
* @features: feature support by the driver * @features: feature support by the driver
...@@ -358,6 +361,7 @@ struct vdpa_config_ops { ...@@ -358,6 +361,7 @@ struct vdpa_config_ops {
u32 (*get_vq_align)(struct vdpa_device *vdev); u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx); u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev); u64 (*get_device_features)(struct vdpa_device *vdev);
u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features); int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev); u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev, void (*set_config_cb)(struct vdpa_device *vdev,
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/dma-mapping.h>
/** /**
* struct virtqueue - a queue to register buffers for sending or receiving. * struct virtqueue - a queue to register buffers for sending or receiving.
...@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq, ...@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data, void *data,
gfp_t gfp); gfp_t gfp);
struct device *virtqueue_dma_dev(struct virtqueue *vq);
bool virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq);
...@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq); ...@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
int virtqueue_set_dma_premapped(struct virtqueue *_vq);
bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq); bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
...@@ -95,6 +100,8 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq); ...@@ -95,6 +100,8 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num, int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf)); void (*recycle)(struct virtqueue *vq, void *buf));
int virtqueue_reset(struct virtqueue *vq,
void (*recycle)(struct virtqueue *vq, void *buf));
/** /**
* struct virtio_device - representation of a device using virtio * struct virtio_device - representation of a device using virtio
...@@ -206,4 +213,19 @@ void unregister_virtio_driver(struct virtio_driver *drv); ...@@ -206,4 +213,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \ #define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \ module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver) unregister_virtio_driver)
dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */ #endif /* _LINUX_VIRTIO_H */
...@@ -181,5 +181,9 @@ struct vhost_vdpa_iova_range { ...@@ -181,5 +181,9 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_SUSPEND 0x4 #define VHOST_BACKEND_F_SUSPEND 0x4
/* Device can be resumed */ /* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5 #define VHOST_BACKEND_F_RESUME 0x5
/* Device supports the driver enabling virtqueues both before and after
* DRIVER_OK
*/
#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment