Commit f0691533 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio/vhost updates from Michael Tsirkin:
 "New features, performance improvements, cleanups:

   - basic polling support for vhost
   - rework virtio to optionally use DMA API, fixing it on Xen
   - balloon stats gained a new entry
   - using the new napi_alloc_skb speeds up virtio net
   - virtio blk stats can now be read while another VCPU is busy
     inflating or deflating the balloon

  plus misc cleanups in various places"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  virtio_net: replace netdev_alloc_skb_ip_align() with napi_alloc_skb()
  vhost_net: basic polling support
  vhost: introduce vhost_vq_avail_empty()
  vhost: introduce vhost_has_work()
  virtio_balloon: Allow to resize and update the balloon stats in parallel
  virtio_balloon: Use a workqueue instead of "vballoon" kthread
  virtio/s390: size of SET_IND payload
  virtio/s390: use dev_to_virtio
  vhost: rename vhost_init_used()
  vhost: rename cross-endian helpers
  virtio_blk: VIRTIO_BLK_F_WCE->VIRTIO_BLK_F_FLUSH
  vring: Use the DMA API on Xen
  virtio_pci: Use the DMA API if enabled
  virtio_mmio: Use the DMA API if enabled
  virtio: Add improved queue allocation API
  virtio_ring: Support DMA APIs
  vring: Introduce vring_use_dma_api()
  s390/dma: Allow per device dma ops
  alpha/dma: use common noop dma ops
  dma: Provide simple noop dma ops
parents 2b2f72d8 c67f5db8
......@@ -123,44 +123,6 @@ static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
return ret;
}
static void alpha_noop_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return page_to_pa(page) + offset;
}
static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
void *va;
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
sg_dma_len(sg) = sg->length;
}
return nents;
}
static int alpha_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static int alpha_noop_supported(struct device *dev, u64 mask)
{
return mask < 0x00ffffffUL ? 0 : 1;
......@@ -168,10 +130,10 @@ static int alpha_noop_supported(struct device *dev, u64 mask)
struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent,
.free = alpha_noop_free_coherent,
.map_page = alpha_noop_map_page,
.map_sg = alpha_noop_map_sg,
.mapping_error = alpha_noop_mapping_error,
.free = dma_noop_free_coherent,
.map_page = dma_noop_map_page,
.map_sg = dma_noop_map_sg,
.mapping_error = dma_noop_mapping_error,
.dma_supported = alpha_noop_supported,
};
......
......@@ -124,6 +124,7 @@ config S390
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FTRACE_MCOUNT_RECORD
......@@ -617,10 +618,6 @@ config HAS_IOMEM
config IOMMU_HELPER
def_bool PCI
config HAS_DMA
def_bool PCI
select HAVE_DMA_API_DEBUG
config NEED_SG_DMA_LENGTH
def_bool PCI
......
......@@ -3,5 +3,9 @@
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>
struct dev_archdata {
struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
};
......@@ -11,11 +11,13 @@
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
extern struct dma_map_ops s390_dma_ops;
extern struct dma_map_ops s390_pci_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &s390_dma_ops;
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &dma_noop_ops;
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
......
......@@ -641,6 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev)
int i;
pdev->dev.groups = zpci_attr_groups;
pdev->dev.archdata.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
......
......@@ -547,7 +547,7 @@ static int __init dma_debug_do_init(void)
}
fs_initcall(dma_debug_do_init);
struct dma_map_ops s390_dma_ops = {
struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc,
.free = s390_dma_free,
.map_sg = s390_dma_map_sg,
......@@ -558,7 +558,7 @@ struct dma_map_ops s390_dma_ops = {
.is_phys = 0,
/* dma_supported is unconditionally true without a callback */
};
EXPORT_SYMBOL_GPL(s390_dma_ops);
EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
static int __init s390_iommu_setup(char *str)
{
......
......@@ -477,8 +477,13 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
struct virtio_blk_config, wce,
&writeback);
/*
* If WCE is not configurable and flush is not available,
* assume no writeback cache is in use.
*/
if (err)
writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
return writeback;
}
......@@ -833,14 +838,14 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
}
;
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
};
......
......@@ -260,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
......
......@@ -342,13 +342,14 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
/* payload is the address of the indicators */
indicatorp = kmalloc(sizeof(&vcdev->indicators),
GFP_DMA | GFP_KERNEL);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->count = sizeof(vcdev->indicators);
ccw->count = sizeof(&vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
......@@ -656,7 +657,10 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
}
ret = -ENOMEM;
/* We need a data area under 2G to communicate. */
/*
* We need a data area under 2G to communicate. Our payload is
* the address of the indicators.
*/
indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
if (!indicatorp)
goto out;
......@@ -672,7 +676,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vcdev->indicators = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
ccw->count = sizeof(vcdev->indicators);
ccw->count = sizeof(&vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
......@@ -683,7 +687,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vcdev->indicators2 = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
ccw->count = sizeof(vcdev->indicators2);
ccw->count = sizeof(&vcdev->indicators2);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
......@@ -945,8 +949,7 @@ static struct virtio_config_ops virtio_ccw_config_ops = {
static void virtio_ccw_release_dev(struct device *_d)
{
struct virtio_device *dev = container_of(_d, struct virtio_device,
dev);
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
kfree(vcdev->status);
......
......@@ -287,6 +287,43 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
rcu_read_unlock_bh();
}
static inline unsigned long busy_clock(void)
{
return local_clock() >> 10;
}
static bool vhost_can_busy_poll(struct vhost_dev *dev,
unsigned long endtime)
{
return likely(!need_resched()) &&
likely(!time_after(busy_clock(), endtime)) &&
likely(!signal_pending(current)) &&
!vhost_has_work(dev);
}
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num)
{
unsigned long uninitialized_var(endtime);
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
preempt_disable();
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(vq->dev, endtime) &&
vhost_vq_avail_empty(vq->dev, vq))
cpu_relax_lowlatency();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
}
return r;
}
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
......@@ -331,10 +368,9 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV == nvq->done_idx))
break;
head = vhost_get_vq_desc(vq, vq->iov,
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
&out, &in);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
......@@ -435,6 +471,38 @@ static int peek_head_len(struct sock *sk)
return len;
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned long uninitialized_var(endtime);
int len = peek_head_len(sk);
if (!len && vq->busyloop_timeout) {
/* Both tx vq and rx socket were polled here */
mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);
preempt_disable();
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(&net->dev, endtime) &&
skb_queue_empty(&sk->sk_receive_queue) &&
vhost_vq_avail_empty(&net->dev, vq))
cpu_relax_lowlatency();
preempt_enable();
if (vhost_enable_notify(&net->dev, vq))
vhost_poll_queue(&vq->poll);
mutex_unlock(&vq->mutex);
len = peek_head_len(sk);
}
return len;
}
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
* @vq - the relevant virtqueue
......@@ -553,7 +621,7 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
while ((sock_len = peek_head_len(sock->sk))) {
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
......@@ -917,7 +985,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
r = vhost_init_used(vq);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
......
......@@ -1274,7 +1274,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->private_data = vs_tpg;
vhost_init_used(vq);
vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
......
......@@ -196,7 +196,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
oldpriv = vq->private_data;
vq->private_data = priv;
r = vhost_init_used(&n->vqs[index]);
r = vhost_vq_init_access(&n->vqs[index]);
mutex_unlock(&vq->mutex);
......
......@@ -43,11 +43,21 @@ enum {
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
vq->user_be = !virtio_legacy_is_little_endian();
}
static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
{
vq->user_be = true;
}
static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
{
vq->user_be = false;
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
struct vhost_vring_state s;
......@@ -62,7 +72,10 @@ static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
s.num != VHOST_VRING_BIG_ENDIAN)
return -EINVAL;
vq->user_be = s.num;
if (s.num == VHOST_VRING_BIG_ENDIAN)
vhost_enable_cross_endian_big(vq);
else
vhost_enable_cross_endian_little(vq);
return 0;
}
......@@ -91,7 +104,7 @@ static void vhost_init_is_le(struct vhost_virtqueue *vq)
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}
......@@ -113,6 +126,11 @@ static void vhost_init_is_le(struct vhost_virtqueue *vq)
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
vq->is_le = virtio_legacy_is_little_endian();
}
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
......@@ -245,6 +263,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
return !list_empty(&dev->work_list);
}
EXPORT_SYMBOL_GPL(vhost_has_work);
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
......@@ -276,8 +301,9 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call = NULL;
vq->log_ctx = NULL;
vq->memory = NULL;
vq->is_le = virtio_legacy_is_little_endian();
vhost_vq_reset_user_be(vq);
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
}
static int vhost_worker(void *data)
......@@ -912,6 +938,19 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
case VHOST_GET_VRING_ENDIAN:
r = vhost_get_vring_endian(vq, idx, argp);
break;
case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
if (copy_from_user(&s, argp, sizeof(s))) {
r = -EFAULT;
break;
}
vq->busyloop_timeout = s.num;
break;
case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
s.index = idx;
s.num = vq->busyloop_timeout;
if (copy_to_user(argp, &s, sizeof(s)))
r = -EFAULT;
break;
default:
r = -ENOIOCTLCMD;
}
......@@ -1152,14 +1191,14 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
return 0;
}
int vhost_init_used(struct vhost_virtqueue *vq)
int vhost_vq_init_access(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
bool is_le = vq->is_le;
if (!vq->private_data) {
vq->is_le = virtio_legacy_is_little_endian();
vhost_reset_is_le(vq);
return 0;
}
......@@ -1182,7 +1221,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
vq->is_le = is_le;
return r;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
......@@ -1633,6 +1672,20 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__virtio16 avail_idx;
int r;
r = __get_user(avail_idx, &vq->avail->idx);
if (r)
return false;
return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
......
......@@ -37,6 +37,7 @@ struct vhost_poll {
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
......@@ -114,6 +115,7 @@ struct vhost_virtqueue {
/* Ring endianness requested by userspace for cross-endian support. */
bool user_be;
#endif
u32 busyloop_timeout;
};
struct vhost_dev {
......@@ -148,7 +150,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
int vhost_init_used(struct vhost_virtqueue *);
int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
unsigned count);
......@@ -158,6 +160,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
......
......@@ -60,7 +60,7 @@ config VIRTIO_INPUT
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM
depends on HAS_IOMEM && HAS_DMA
select VIRTIO
---help---
This drivers provides support for memory mapped virtio
......
......@@ -22,8 +22,7 @@
#include <linux/virtio.h>
#include <linux/virtio_balloon.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
......@@ -50,11 +49,13 @@ struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
/* Where the ballooning thread waits for config to change. */
wait_queue_head_t config_change;
/* The balloon servicing is delegated to a freezable workqueue. */
struct work_struct update_balloon_stats_work;
struct work_struct update_balloon_size_work;
/* The thread servicing the balloon. */
struct task_struct *thread;
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
bool stop_update;
/* Waiting for host to ack the pages we released. */
wait_queue_head_t acked;
......@@ -77,7 +78,6 @@ struct virtio_balloon {
u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
/* Memory statistics */
int need_stats_update;
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
/* To register callback in oom notifier call chain */
......@@ -124,6 +124,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
/* When host has read buffer, this completes via balloon_ack */
wait_event(vb->acked, virtqueue_get_buf(vq, &len));
}
static void set_page_pfns(u32 pfns[], struct page *page)
......@@ -136,9 +137,10 @@ static void set_page_pfns(u32 pfns[], struct page *page)
pfns[i] = page_to_balloon_pfn(page) + i;
}
static void fill_balloon(struct virtio_balloon *vb, size_t num)
static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
{
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
unsigned num_allocated_pages;
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
......@@ -163,10 +165,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
adjust_managed_page_count(page, -1);
}
num_allocated_pages = vb->num_pfns;
/* Did we get any? */
if (vb->num_pfns != 0)
tell_host(vb, vb->inflate_vq);
mutex_unlock(&vb->balloon_lock);
return num_allocated_pages;
}
static void release_pages_balloon(struct virtio_balloon *vb)
......@@ -257,14 +262,17 @@ static void update_balloon_stats(struct virtio_balloon *vb)
* with a single buffer. From that point forward, all conversations consist of
* a hypervisor request (a call to this function) which directs us to refill
* the virtqueue with a fresh stats buffer. Since stats collection can sleep,
* we notify our kthread which does the actual work via stats_handle_request().
* we delegate the job to a freezable workqueue that will do the actual work via
* stats_handle_request().
*/
static void stats_request(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
vb->need_stats_update = 1;
wake_up(&vb->config_change);
spin_lock(&vb->stop_update_lock);
if (!vb->stop_update)
queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
spin_unlock(&vb->stop_update_lock);
}
static void stats_handle_request(struct virtio_balloon *vb)
......@@ -273,7 +281,6 @@ static void stats_handle_request(struct virtio_balloon *vb)
struct scatterlist sg;
unsigned int len;
vb->need_stats_update = 0;
update_balloon_stats(vb);
vq = vb->stats_vq;
......@@ -287,8 +294,12 @@ static void stats_handle_request(struct virtio_balloon *vb)
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
unsigned long flags;
wake_up(&vb->config_change);
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update)
queue_work(system_freezable_wq, &vb->update_balloon_size_work);
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static inline s64 towards_target(struct virtio_balloon *vb)
......@@ -351,43 +362,32 @@ static int virtballoon_oom_notify(struct notifier_block *self,
return NOTIFY_OK;
}
static int balloon(void *_vballoon)
static void update_balloon_stats_func(struct work_struct *work)
{
struct virtio_balloon *vb = _vballoon;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct virtio_balloon *vb;
set_freezable();
while (!kthread_should_stop()) {
s64 diff;
vb = container_of(work, struct virtio_balloon,
update_balloon_stats_work);
stats_handle_request(vb);
}
try_to_freeze();
static void update_balloon_size_func(struct work_struct *work)
{
struct virtio_balloon *vb;
s64 diff;
add_wait_queue(&vb->config_change, &wait);
for (;;) {
if ((diff = towards_target(vb)) != 0 ||
vb->need_stats_update ||
kthread_should_stop() ||
freezing(current))
break;
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&vb->config_change, &wait);
vb = container_of(work, struct virtio_balloon,
update_balloon_size_work);
diff = towards_target(vb);
if (vb->need_stats_update)
stats_handle_request(vb);
if (diff > 0)
fill_balloon(vb, diff);
diff -= fill_balloon(vb, diff);
else if (diff < 0)
leak_balloon(vb, -diff);
diff += leak_balloon(vb, -diff);
update_balloon_size(vb);
/*
* For large balloon changes, we could spend a lot of time
* and always have work to do. Be nice if preempt disabled.
*/
cond_resched();
}
return 0;
if (diff)
queue_work(system_freezable_wq, work);
}
static int init_vqs(struct virtio_balloon *vb)
......@@ -505,12 +505,14 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out;
}
INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
spin_lock_init(&vb->stop_update_lock);
vb->stop_update = false;
vb->num_pages = 0;
mutex_init(&vb->balloon_lock);
init_waitqueue_head(&vb->config_change);
init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
vb->need_stats_update = 0;
balloon_devinfo_init(&vb->vb_dev_info);
#ifdef CONFIG_BALLOON_COMPACTION
......@@ -529,16 +531,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
err = PTR_ERR(vb->thread);
goto out_del_vqs;
}
return 0;
out_del_vqs:
unregister_oom_notifier(&vb->nb);
out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
......@@ -565,7 +559,13 @@ static void virtballoon_remove(struct virtio_device *vdev)
struct virtio_balloon *vb = vdev->priv;
unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
spin_lock_irq(&vb->stop_update_lock);
vb->stop_update = true;
spin_unlock_irq(&vb->stop_update_lock);
cancel_work_sync(&vb->update_balloon_size_work);
cancel_work_sync(&vb->update_balloon_stats_work);
remove_common(vb);
kfree(vb);
}
......@@ -576,10 +576,9 @@ static int virtballoon_freeze(struct virtio_device *vdev)
struct virtio_balloon *vb = vdev->priv;
/*
* The kthread is already frozen by the PM core before this
* The workqueue is already frozen by the PM core before this
* function is called.
*/
remove_common(vb);
return 0;
}
......@@ -595,7 +594,8 @@ static int virtballoon_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
fill_balloon(vb, towards_target(vb));
if (towards_target(vb))
virtballoon_changed(vdev);
update_balloon_size(vb);
return 0;
}
......
......@@ -99,12 +99,6 @@ struct virtio_mmio_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
/* the number of entries in the queue */
unsigned int num;
/* the virtual address of the ring queue */
void *queue;
/* the list node for the virtqueues list */
struct list_head node;
};
......@@ -322,15 +316,13 @@ static void vm_del_vq(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size;
unsigned long flags;
unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vm_dev->lock, flags);
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) {
......@@ -340,8 +332,8 @@ static void vm_del_vq(struct virtqueue *vq)
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
free_pages_exact(info->queue, size);
vring_del_virtqueue(vq);
kfree(info);
}
......@@ -356,8 +348,6 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name)
......@@ -365,7 +355,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
unsigned long flags, size;
unsigned long flags;
unsigned int num;
int err;
if (!name)
......@@ -388,66 +379,40 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
goto error_kmalloc;
}
/* Allocate pages for the queue - start with a queue as big as
* possible (limited by maximum size allowed by device), drop down
* to a minimal size, just big enough to fit descriptor table
* and two rings (which makes it "alignment_size * 2")
*/
info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
/* If the device reports a 0 entry queue, we won't be able to
* use it to perform I/O, and vring_new_virtqueue() can't create
* empty queues anyway, so don't bother to set up the device.
*/
if (info->num == 0) {
num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
if (num == 0) {
err = -ENOENT;
goto error_alloc_pages;
}
while (1) {
size = PAGE_ALIGN(vring_size(info->num,
VIRTIO_MMIO_VRING_ALIGN));
/* Did the last iter shrink the queue below minimum size? */
if (size < VIRTIO_MMIO_VRING_ALIGN * 2) {
err = -ENOMEM;
goto error_alloc_pages;
}
info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
if (info->queue)
break;
info->num /= 2;
goto error_new_virtqueue;
}
/* Create the vring */
vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, info->queue, vm_notify, callback, name);
vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, true, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
/* Activate the queue */
writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
u64 addr;
addr = virt_to_phys(info->queue);
addr = virtqueue_get_desc_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
addr = virt_to_phys(virtqueue_get_avail(vq));
addr = virtqueue_get_avail_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
addr = virt_to_phys(virtqueue_get_used(vq));
addr = virtqueue_get_used_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
......@@ -471,8 +436,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
free_pages_exact(info->queue, size);
error_alloc_pages:
kfree(info);
error_kmalloc:
error_available:
......
......@@ -35,12 +35,6 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
/* the number of entries in the queue */
int num;
/* the virtual address of the ring queue */
void *queue;
/* the list node for the virtqueues list */
struct list_head node;
......
......@@ -119,7 +119,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 msix_vec)
{
struct virtqueue *vq;
unsigned long size;
u16 num;
int err;
......@@ -131,27 +130,19 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT);
info->num = num;
info->msix_vector = msix_vec;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
if (info->queue == NULL)
/* create the vring */
vq = vring_create_virtqueue(index, num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
true, false, vp_notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
/* activate the queue */
iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
vq = vring_new_virtqueue(index, info->num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
true, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
}
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
......@@ -159,17 +150,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_assign;
goto out_deactivate;
}
}
return vq;
out_assign:
vring_del_virtqueue(vq);
out_activate_queue:
out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
free_pages_exact(info->queue, size);
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
......@@ -177,7 +166,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
unsigned long size;
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
......@@ -188,13 +176,10 @@ static void del_vq(struct virtio_pci_vq_info *info)
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
}
vring_del_virtqueue(vq);
/* Select and deactivate the queue */
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
free_pages_exact(info->queue, size);
vring_del_virtqueue(vq);
}
static const struct virtio_config_ops virtio_pci_config_ops = {
......@@ -227,6 +212,13 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
if (rc)
rc = dma_set_mask_and_coherent(&pci_dev->dev,
DMA_BIT_MASK(32));
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
if (rc)
return rc;
......
......@@ -287,31 +287,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
return vp_ioread16(&vp_dev->common->msix_config);
}
static size_t vring_pci_size(u16 num)
{
/* We only need a cacheline separation. */
return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
}
static void *alloc_virtqueue_pages(int *num)
{
void *pages;
/* TODO: allocate each queue chunk individually */
for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
pages = alloc_pages_exact(vring_pci_size(*num),
GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
if (pages)
return pages;
}
if (!*num)
return NULL;
/* Try to get a single page. You are my only hope! */
return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned index,
......@@ -343,29 +318,22 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
/* get offset of notification word for this vq */
off = vp_ioread16(&cfg->queue_notify_off);
info->num = num;
info->msix_vector = msix_vec;
info->queue = alloc_virtqueue_pages(&info->num);
if (info->queue == NULL)
return ERR_PTR(-ENOMEM);
/* create the vring */
vq = vring_new_virtqueue(index, info->num,
vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev,
true, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto err_new_queue;
}
true, true, vp_notify, callback, name);
if (!vq)
return ERR_PTR(-ENOMEM);
/* activate the queue */
vp_iowrite16(num, &cfg->queue_size);
vp_iowrite64_twopart(virt_to_phys(info->queue),
vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
&cfg->queue_desc_lo, &cfg->queue_desc_hi);
vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
&cfg->queue_avail_lo, &cfg->queue_avail_hi);
vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
&cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) {
......@@ -410,8 +378,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
err_map_notify:
vring_del_virtqueue(vq);
err_new_queue:
free_pages_exact(info->queue, vring_pci_size(info->num));
return ERR_PTR(err);
}
......@@ -456,8 +422,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
vring_del_virtqueue(vq);
free_pages_exact(info->queue, vring_pci_size(info->num));
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
......@@ -641,6 +605,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
return -EINVAL;
}
err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pci_dev->dev,
DMA_BIT_MASK(32));
if (err)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
/* Device capability is only mandatory for devices that have
* device-specific configuration.
*/
......
This diff is collapsed.
......@@ -70,6 +70,8 @@ struct dma_map_ops {
int is_phys;
};
extern struct dma_map_ops dma_noop_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define DMA_MASK_NONE 0x0ULL
......
......@@ -75,8 +75,27 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
bool virtqueue_is_broken(struct virtqueue *vq);
void *virtqueue_get_avail(struct virtqueue *vq);
void *virtqueue_get_used(struct virtqueue *vq);
const struct vring *virtqueue_get_vring(struct virtqueue *vq);
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
/*
* Legacy accessors -- in almost all cases, these are the wrong functions
* to use.
*/
static inline void *virtqueue_get_desc(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->desc;
}
static inline void *virtqueue_get_avail(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->avail;
}
static inline void *virtqueue_get_used(struct virtqueue *vq)
{
return virtqueue_get_vring(vq)->used;
}
/**
* virtio_device - representation of a device using virtio
......
......@@ -59,6 +59,35 @@ static inline void virtio_store_mb(bool weak_barriers,
struct virtio_device;
struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
* expected. The caller should query virtqueue_get_ring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
/* Creates a virtqueue with a custom layout. */
struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
bool weak_barriers,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name);
/*
* Creates a virtqueue with a standard layout but a caller-allocated
* ring.
*/
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
......@@ -68,7 +97,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
/*
* Destroys a virtqueue. If created with vring_create_virtqueue, this
* also frees the ring.
*/
void vring_del_virtqueue(struct virtqueue *vq);
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
......
......@@ -126,6 +126,12 @@ struct vhost_memory {
#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
/* Set eventfd to signal an error */
#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
/* Set busy loop timeout (in us) */
#define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
struct vhost_vring_state)
/* Get busy loop timeout (in us) */
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
/* VHOST_NET specific defines */
......
......@@ -43,11 +43,11 @@
#ifndef VIRTIO_BLK_NO_LEGACY
#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#ifndef __KERNEL__
/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
/* Old (deprecated) name for VIRTIO_BLK_F_FLUSH. */
#define VIRTIO_BLK_F_WCE VIRTIO_BLK_F_FLUSH
#endif
#endif /* !VIRTIO_BLK_NO_LEGACY */
......
......@@ -18,6 +18,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_HAS_DMA) += dma-noop.o
lib-y += kobject.o klist.o
obj-y += lockref.o
......
/*
* lib/dma-noop.c
*
* Simple DMA noop-ops that map 1:1 with memory
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
static void *dma_noop_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
void *ret;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret)
*dma_handle = virt_to_phys(ret);
return ret;
}
static void dma_noop_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return page_to_phys(page) + offset;
}
static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
void *va;
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
sg_dma_len(sg) = sg->length;
}
return nents;
}
static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static int dma_noop_supported(struct device *dev, u64 mask)
{
return 1;
}
struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc,
.free = dma_noop_free,
.map_page = dma_noop_map_page,
.map_sg = dma_noop_map_sg,
.mapping_error = dma_noop_mapping_error,
.dma_supported = dma_noop_supported,
};
EXPORT_SYMBOL(dma_noop_ops);
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
#ifdef CONFIG_HAS_DMA
# error Virtio userspace code does not support CONFIG_HAS_DMA
#endif
#define PCI_DMA_BUS_IS_PHYS 1
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment