Commit 57b07793 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio updates from Michael Tsirkin:

 - IRQ bypass support for vdpa and IFC

 - MLX5 vdpa driver

 - Endianness fixes for virtio drivers

 - Misc other fixes

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (71 commits)
  vdpa/mlx5: fix up endian-ness for mtu
  vdpa: Fix pointer math bug in vdpasim_get_config()
  vdpa/mlx5: Fix pointer math in mlx5_vdpa_get_config()
  vdpa/mlx5: fix memory allocation failure checks
  vdpa/mlx5: Fix uninitialised variable in core/mr.c
  vdpa_sim: init iommu lock
  virtio_config: fix up warnings on parisc
  vdpa/mlx5: Add VDPA driver for supported mlx5 devices
  vdpa/mlx5: Add shared memory registration code
  vdpa/mlx5: Add support library for mlx5 VDPA implementation
  vdpa/mlx5: Add hardware descriptive header file
  vdpa: Modify get_vq_state() to return error code
  net/vdpa: Use struct for set/get vq state
  vdpa: remove hard coded virtq num
  vdpasim: support batch updating
  vhost-vdpa: support IOTLB batching hints
  vhost-vdpa: support get/set backend features
  vhost: generialize backend features setting/getting
  vhost-vdpa: refine ioctl pre-processing
  vDPA: dont change vq irq after DRIVER_OK
  ...
parents ce13266d 8a7c3213
...@@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) ...@@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
} }
break; break;
case VHOST_USER_SLAVE_IOTLB_MSG: case VHOST_USER_SLAVE_IOTLB_MSG:
/* not supported - VIRTIO_F_IOMMU_PLATFORM */ /* not supported - VIRTIO_F_ACCESS_PLATFORM */
case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
default: default:
......
...@@ -10667,11 +10667,17 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, ...@@ -10667,11 +10667,17 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
{ {
struct kvm_kernel_irqfd *irqfd = struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer); container_of(cons, struct kvm_kernel_irqfd, consumer);
int ret;
irqfd->producer = prod; irqfd->producer = prod;
kvm_arch_start_assignment(irqfd->kvm);
return kvm_x86_ops.update_pi_irte(irqfd->kvm, ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
prod->irq, irqfd->gsi, 1); prod->irq, irqfd->gsi, 1);
if (ret)
kvm_arch_end_assignment(irqfd->kvm);
return ret;
} }
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
...@@ -10694,6 +10700,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ...@@ -10694,6 +10700,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
if (ret) if (ret)
printk(KERN_INFO "irq bypass consumer (token %p) unregistration" printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
" fails: %d\n", irqfd->consumer.token, ret); " fails: %d\n", irqfd->consumer.token, ret);
kvm_arch_end_assignment(irqfd->kvm);
} }
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
......
...@@ -204,7 +204,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto) ...@@ -204,7 +204,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
u32 status; u32 status;
int err; int err;
virtio_cread(vcrypto->vdev, virtio_cread_le(vcrypto->vdev,
struct virtio_crypto_config, status, &status); struct virtio_crypto_config, status, &status);
/* /*
...@@ -323,30 +323,30 @@ static int virtcrypto_probe(struct virtio_device *vdev) ...@@ -323,30 +323,30 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (!vcrypto) if (!vcrypto)
return -ENOMEM; return -ENOMEM;
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
max_dataqueues, &max_data_queues); max_dataqueues, &max_data_queues);
if (max_data_queues < 1) if (max_data_queues < 1)
max_data_queues = 1; max_data_queues = 1;
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
max_cipher_key_len, &max_cipher_key_len); max_cipher_key_len, &max_cipher_key_len);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
max_auth_key_len, &max_auth_key_len); max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
max_size, &max_size); max_size, &max_size);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
crypto_services, &crypto_services); crypto_services, &crypto_services);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
cipher_algo_l, &cipher_algo_l); cipher_algo_l, &cipher_algo_l);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
cipher_algo_h, &cipher_algo_h); cipher_algo_h, &cipher_algo_h);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
hash_algo, &hash_algo); hash_algo, &hash_algo);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
mac_algo_l, &mac_algo_l); mac_algo_l, &mac_algo_l);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
mac_algo_h, &mac_algo_h); mac_algo_h, &mac_algo_h);
virtio_cread(vdev, struct virtio_crypto_config, virtio_cread_le(vdev, struct virtio_crypto_config,
aead_algo, &aead_algo); aead_algo, &aead_algo);
/* Add virtio crypto device to global table */ /* Add virtio crypto device to global table */
......
...@@ -39,7 +39,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) ...@@ -39,7 +39,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
u32 events_read, events_clear = 0; u32 events_read, events_clear = 0;
/* read the config space */ /* read the config space */
virtio_cread(vgdev->vdev, struct virtio_gpu_config, virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read); events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
if (vgdev->has_edid) if (vgdev->has_edid)
...@@ -49,7 +49,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) ...@@ -49,7 +49,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(vgdev->ddev); drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY; events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
} }
virtio_cwrite(vgdev->vdev, struct virtio_gpu_config, virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
events_clear, &events_clear); events_clear, &events_clear);
} }
...@@ -165,7 +165,7 @@ int virtio_gpu_init(struct drm_device *dev) ...@@ -165,7 +165,7 @@ int virtio_gpu_init(struct drm_device *dev)
} }
/* get display info */ /* get display info */
virtio_cread(vgdev->vdev, struct virtio_gpu_config, virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts); num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts, vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS); VIRTIO_GPU_MAX_SCANOUTS);
...@@ -176,7 +176,7 @@ int virtio_gpu_init(struct drm_device *dev) ...@@ -176,7 +176,7 @@ int virtio_gpu_init(struct drm_device *dev)
} }
DRM_INFO("number of scanouts: %d\n", num_scanouts); DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread(vgdev->vdev, struct virtio_gpu_config, virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets); num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets);
......
...@@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, ...@@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry **ents, struct virtio_gpu_mem_entry **ents,
unsigned int *nents) unsigned int *nents)
{ {
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg; struct scatterlist *sg;
int si, ret; int si, ret;
......
...@@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, ...@@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) if (use_dma_api)
...@@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) if (use_dma_api)
......
...@@ -1010,7 +1010,7 @@ static int viommu_probe(struct virtio_device *vdev) ...@@ -1010,7 +1010,7 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret) if (ret)
return ret; return ret;
virtio_cread(vdev, struct virtio_iommu_config, page_size_mask, virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
&viommu->pgsize_bitmap); &viommu->pgsize_bitmap);
if (!viommu->pgsize_bitmap) { if (!viommu->pgsize_bitmap) {
...@@ -1022,23 +1022,23 @@ static int viommu_probe(struct virtio_device *vdev) ...@@ -1022,23 +1022,23 @@ static int viommu_probe(struct virtio_device *vdev)
viommu->last_domain = ~0U; viommu->last_domain = ~0U;
/* Optional features */ /* Optional features */
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
struct virtio_iommu_config, input_range.start, struct virtio_iommu_config, input_range.start,
&input_start); &input_start);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
struct virtio_iommu_config, input_range.end, struct virtio_iommu_config, input_range.end,
&input_end); &input_end);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
struct virtio_iommu_config, domain_range.start, struct virtio_iommu_config, domain_range.start,
&viommu->first_domain); &viommu->first_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
struct virtio_iommu_config, domain_range.end, struct virtio_iommu_config, domain_range.end,
&viommu->last_domain); &viommu->last_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
struct virtio_iommu_config, probe_size, struct virtio_iommu_config, probe_size,
&viommu->probe_size); &viommu->probe_size);
......
...@@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi) ...@@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
return; return;
speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
speed));
if (ethtool_validate_speed(speed)) if (ethtool_validate_speed(speed))
vi->speed = speed; vi->speed = speed;
duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
duplex)); virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
if (ethtool_validate_duplex(duplex)) if (ethtool_validate_duplex(duplex))
vi->duplex = duplex; vi->duplex = duplex;
} }
......
...@@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev) ...@@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
goto out_err; goto out_err;
} }
virtio_cread(vpmem->vdev, struct virtio_pmem_config, virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
start, &vpmem->start); start, &vpmem->start);
virtio_cread(vpmem->vdev, struct virtio_pmem_config, virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
size, &vpmem->size); size, &vpmem->size);
res.start = vpmem->start; res.start = vpmem->start;
......
...@@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, ...@@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
vdev_id = VIRTIO_ID_NET; vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr); hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net; config = &fifo->vdev[vdev_id]->config.net;
if (ntohs(hdr.len) > config->mtu + /* A legacy-only interface for now. */
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD) MLXBF_TMFIFO_NET_L2_OVERHEAD)
return; return;
} else { } else {
...@@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) ...@@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
/* Create the network vdev. */ /* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config)); memset(&net_config, 0, sizeof(net_config));
net_config.mtu = ETH_DATA_LEN;
net_config.status = VIRTIO_NET_S_LINK_UP; /* A legacy-only interface for now. */
net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
ETH_DATA_LEN);
net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
VIRTIO_NET_S_LINK_UP);
mlxbf_tmfifo_get_cfg_mac(net_config.mac); mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config, MLXBF_TMFIFO_NET_FEATURES, &net_config,
......
...@@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = { ...@@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = {
#define virtscsi_config_get(vdev, fld) \ #define virtscsi_config_get(vdev, fld) \
({ \ ({ \
typeof(((struct virtio_scsi_config *)0)->fld) __val; \ __virtio_native_type(struct virtio_scsi_config, fld) __val; \
virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
__val; \ __val; \
}) })
#define virtscsi_config_set(vdev, fld, val) \ #define virtscsi_config_set(vdev, fld, val) \
do { \ do { \
typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0) } while(0)
......
...@@ -29,4 +29,23 @@ config IFCVF ...@@ -29,4 +29,23 @@ config IFCVF
To compile this driver as a module, choose M here: the module will To compile this driver as a module, choose M here: the module will
be called ifcvf. be called ifcvf.
config MLX5_VDPA
bool "MLX5 VDPA support library for ConnectX devices"
depends on MLX5_CORE
default n
help
Support library for Mellanox VDPA drivers. Provides code that is
common for all types of VDPA drivers. The following drivers are planned:
net, block.
config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
depends on MLX5_VDPA
default n
help
VDPA network driver for ConnectX6 and newer. Provides offloading
of virtio net datapath such that descriptors put on the ring will
be executed by the hardware. It also supports a variety of stateless
offloads depending on the actual device used and firmware version.
endif # VDPA endif # VDPA
...@@ -2,3 +2,4 @@ ...@@ -2,3 +2,4 @@
obj-$(CONFIG_VDPA) += vdpa.o obj-$(CONFIG_VDPA) += vdpa.o
obj-$(CONFIG_VDPA_SIM) += vdpa_sim/ obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
obj-$(CONFIG_IFCVF) += ifcvf/ obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
...@@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw) ...@@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw)
return 0; return 0;
} }
u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
{ {
struct ifcvf_lm_cfg __iomem *ifcvf_lm; struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr; void __iomem *avail_idx_addr;
...@@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) ...@@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
return last_avail_idx; return last_avail_idx;
} }
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
{ {
struct ifcvf_lm_cfg __iomem *ifcvf_lm; struct ifcvf_lm_cfg __iomem *ifcvf_lm;
void __iomem *avail_idx_addr; void __iomem *avail_idx_addr;
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
(1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VIRTIO_F_VERSION_1) | \
(1ULL << VIRTIO_NET_F_STATUS) | \ (1ULL << VIRTIO_NET_F_STATUS) | \
(1ULL << VIRTIO_F_ORDER_PLATFORM) | \ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
(1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \
(1ULL << VIRTIO_NET_F_MRG_RXBUF)) (1ULL << VIRTIO_NET_F_MRG_RXBUF))
/* Only one queue pair for now. */ /* Only one queue pair for now. */
...@@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); ...@@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
void ifcvf_reset(struct ifcvf_hw *hw); void ifcvf_reset(struct ifcvf_hw *hw);
u64 ifcvf_get_features(struct ifcvf_hw *hw); u64 ifcvf_get_features(struct ifcvf_hw *hw);
u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num); int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */ #endif /* _IFCVF_H_ */
...@@ -50,8 +50,10 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) ...@@ -50,8 +50,10 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
int i; int i;
for (i = 0; i < queues; i++) for (i = 0; i < queues; i++) {
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
vf->vring[i].irq = -EINVAL;
}
ifcvf_free_irq_vectors(pdev); ifcvf_free_irq_vectors(pdev);
} }
...@@ -235,19 +237,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) ...@@ -235,19 +237,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
return IFCVF_QUEUE_MAX; return IFCVF_QUEUE_MAX;
} }
static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_vq_state *state)
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_get_vq_state(vf, qid); state->avail_index = ifcvf_get_vq_state(vf, qid);
return 0;
} }
static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
u64 num) const struct vdpa_vq_state *state)
{ {
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_set_vq_state(vf, qid, num); return ifcvf_set_vq_state(vf, qid, state->avail_index);
} }
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
...@@ -352,6 +356,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, ...@@ -352,6 +356,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
vf->config_cb.private = cb->private; vf->config_cb.private = cb->private;
} }
static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
u16 qid)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vf->vring[qid].irq;
}
/* /*
* IFCVF currently does't have on-chip IOMMU, so not * IFCVF currently does't have on-chip IOMMU, so not
* implemented set_map()/dma_map()/dma_unmap() * implemented set_map()/dma_map()/dma_unmap()
...@@ -369,6 +381,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { ...@@ -369,6 +381,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_vq_ready = ifcvf_vdpa_get_vq_ready, .get_vq_ready = ifcvf_vdpa_get_vq_ready,
.set_vq_num = ifcvf_vdpa_set_vq_num, .set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address, .set_vq_address = ifcvf_vdpa_set_vq_address,
.get_vq_irq = ifcvf_vdpa_get_vq_irq,
.kick_vq = ifcvf_vdpa_kick_vq, .kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation, .get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id, .get_device_id = ifcvf_vdpa_get_device_id,
...@@ -384,7 +397,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -384,7 +397,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ifcvf_adapter *adapter; struct ifcvf_adapter *adapter;
struct ifcvf_hw *vf; struct ifcvf_hw *vf;
int ret; int ret, i;
ret = pcim_enable_device(pdev); ret = pcim_enable_device(pdev);
if (ret) { if (ret) {
...@@ -420,7 +433,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -420,7 +433,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops); dev, &ifc_vdpa_ops,
IFCVF_MAX_QUEUE_PAIRS * 2);
if (adapter == NULL) { if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM; return -ENOMEM;
...@@ -441,6 +455,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -441,6 +455,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err; goto err;
} }
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
vf->vring[i].irq = -EINVAL;
ret = vdpa_register_device(&adapter->vdpa); ret = vdpa_register_device(&adapter->vdpa);
if (ret) { if (ret) {
IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
......
subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core
obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o
mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#ifndef __MLX5_VDPA_H__
#define __MLX5_VDPA_H__
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
struct mlx5_vdpa_direct_mr {
u64 start;
u64 end;
u32 perm;
struct mlx5_core_mkey mr;
struct sg_table sg_head;
int log_size;
int nsg;
struct list_head list;
u64 offset;
};
struct mlx5_vdpa_mr {
struct mlx5_core_mkey mkey;
/* list of direct MRs descendants of this indirect mr */
struct list_head head;
unsigned long num_directs;
unsigned long num_klms;
bool initialized;
/* serialize mkey creation and destruction */
struct mutex mkey_mtx;
};
struct mlx5_vdpa_resources {
u32 pdn;
struct mlx5_uars_page *uar;
void __iomem *kick_addr;
u16 uid;
u32 null_mkey;
bool valid;
};
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res;
u64 mlx_features;
u64 actual_features;
u8 status;
u32 max_vqs;
u32 generation;
struct mlx5_vdpa_mr mr;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn);
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn);
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn);
int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
#define mlx5_vdpa_warn(__dev, format, ...) \
dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
current->pid, ##__VA_ARGS__)
#define mlx5_vdpa_info(__dev, format, ...) \
dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
current->pid, ##__VA_ARGS__)
#define mlx5_vdpa_dbg(__dev, format, ...) \
dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \
current->pid, ##__VA_ARGS__)
#endif /* __MLX5_VDPA_H__ */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#ifndef __MLX5_VDPA_IFC_H_
#define __MLX5_VDPA_IFC_H_
#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
};
enum {
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
};
enum {
MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
};
struct mlx5_ifc_virtio_q_bits {
u8 virtio_q_type[0x8];
u8 reserved_at_8[0x5];
u8 event_mode[0x3];
u8 queue_index[0x10];
u8 full_emulation[0x1];
u8 virtio_version_1_0[0x1];
u8 reserved_at_22[0x2];
u8 offload_type[0x4];
u8 event_qpn_or_msix[0x18];
u8 doorbell_stride_index[0x10];
u8 queue_size[0x10];
u8 device_emulation_id[0x20];
u8 desc_addr[0x40];
u8 used_addr[0x40];
u8 available_addr[0x40];
u8 virtio_q_mkey[0x20];
u8 max_tunnel_desc[0x10];
u8 reserved_at_170[0x8];
u8 error_type[0x8];
u8 umem_1_id[0x20];
u8 umem_1_size[0x20];
u8 umem_1_offset[0x40];
u8 umem_2_id[0x20];
u8 umem_2_size[0x20];
u8 umem_2_offset[0x40];
u8 umem_3_id[0x20];
u8 umem_3_size[0x20];
u8 umem_3_offset[0x40];
u8 counter_set_id[0x20];
u8 reserved_at_320[0x8];
u8 pd[0x18];
u8 reserved_at_340[0xc0];
};
struct mlx5_ifc_virtio_net_q_object_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x20];
u8 vhca_id[0x10];
u8 reserved_at_70[0x10];
u8 queue_feature_bit_mask_12_3[0xa];
u8 dirty_bitmap_dump_enable[0x1];
u8 vhost_log_page[0x5];
u8 reserved_at_90[0xc];
u8 state[0x4];
u8 reserved_at_a0[0x5];
u8 queue_feature_bit_mask_2_0[0x3];
u8 tisn_or_qpn[0x18];
u8 dirty_bitmap_mkey[0x20];
u8 dirty_bitmap_size[0x20];
u8 dirty_bitmap_addr[0x40];
u8 hw_available_index[0x10];
u8 hw_used_index[0x10];
u8 reserved_at_160[0xa0];
struct mlx5_ifc_virtio_q_bits virtio_q_context;
};
struct mlx5_ifc_create_virtio_net_q_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_virtio_net_q_object_bits obj_context;
};
struct mlx5_ifc_create_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
struct mlx5_ifc_destroy_virtio_net_q_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
};
struct mlx5_ifc_destroy_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
struct mlx5_ifc_query_virtio_net_q_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
};
struct mlx5_ifc_query_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
struct mlx5_ifc_virtio_net_q_object_bits obj_context;
};
enum {
MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
};
enum {
MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
};
enum {
MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
};
struct mlx5_ifc_modify_virtio_net_q_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_virtio_net_q_object_bits obj_context;
};
struct mlx5_ifc_modify_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
#endif /* __MLX5_VDPA_IFC_H_ */
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid)
{
struct mlx5_core_dev *mdev = dev->mdev;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
int err;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out);
if (!err)
*pdn = MLX5_GET(alloc_pd_out, out, pd);
return err;
}
static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
struct mlx5_core_dev *mdev = dev->mdev;
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
return mlx5_cmd_exec_in(mdev, dealloc_pd, in);
}
static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
struct mlx5_core_dev *mdev = dev->mdev;
int err;
MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out);
if (!err)
*null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey);
return err;
}
static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid)
{
u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
int inlen;
void *in;
int err;
/* 0 means not supported */
if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx))
return -EOPNOTSUPP;
inlen = MLX5_ST_SZ_BYTES(create_uctx_in);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
kfree(in);
if (!err)
*uid = MLX5_GET(create_uctx_out, out, uid);
return err;
}
static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid)
{
u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
MLX5_SET(destroy_uctx_in, in, uid, uid);
mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
MLX5_SET(create_tis_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
return err;
}
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in);
}
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn)
{
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
return err;
}
void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in);
}
int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
return err;
}
void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in);
}
int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain);
return err;
}
void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
}
int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
void *mkc;
int err;
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout));
if (err)
return err;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->key |= mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
return 0;
}
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
{
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
struct mlx5_vdpa_resources *res = &mvdev->res;
struct mlx5_core_dev *mdev = mvdev->mdev;
u64 kick_addr;
int err;
if (res->valid) {
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
mutex_init(&mvdev->mr.mkey_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
goto err_uars;
}
err = create_uctx(mvdev, &res->uid);
if (err)
goto err_uctx;
err = alloc_pd(mvdev, &res->pdn, res->uid);
if (err)
goto err_pd;
err = get_null_mkey(mvdev, &res->null_mkey);
if (err)
goto err_key;
kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
err = -ENOMEM;
goto err_key;
}
res->valid = true;
return 0;
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
destroy_uctx(mvdev, res->uid);
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
mutex_destroy(&mvdev->mr.mkey_mtx);
return err;
}
void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_resources *res = &mvdev->res;
if (!res->valid)
return;
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
mutex_destroy(&mvdev->mr.mkey_mtx);
res->valid = false;
}
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#include "mlx5_vdpa_ifc.h"
#include "mlx5_vnet.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox VDPA driver");
MODULE_LICENSE("Dual BSD/GPL");
static bool required_caps_supported(struct mlx5_core_dev *mdev)
{
u8 event_mode;
u64 got;
got = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
return false;
event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode);
if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
return false;
if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type))
return false;
return true;
}
static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev)
{
struct mlx5_vdpa_dev *vdev;
if (mlx5_core_is_pf(mdev))
return NULL;
if (!required_caps_supported(mdev)) {
dev_info(mdev->device, "virtio net emulation not supported\n");
return NULL;
}
vdev = mlx5_vdpa_add_dev(mdev);
if (IS_ERR(vdev))
return NULL;
return vdev;
}
static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context)
{
struct mlx5_vdpa_dev *vdev = context;
mlx5_vdpa_remove_dev(vdev);
}
static struct mlx5_interface mlx5_vdpa_interface = {
.add = mlx5_vdpa_add,
.remove = mlx5_vdpa_remove,
.protocol = MLX5_INTERFACE_PROTOCOL_VDPA,
};
static int __init mlx5_vdpa_init(void)
{
return mlx5_register_interface(&mlx5_vdpa_interface);
}
static void __exit mlx5_vdpa_exit(void)
{
mlx5_unregister_interface(&mlx5_vdpa_interface);
}
module_init(mlx5_vdpa_init);
module_exit(mlx5_vdpa_exit);
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#ifndef __MLX5_VNET_H_
#define __MLX5_VNET_H_
#include <linux/vdpa.h>
#include <linux/virtio_net.h>
#include <linux/vringh.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include "mlx5_vdpa.h"
static inline u32 mlx5_vdpa_max_qps(int max_vqs)
{
return max_vqs / 2;
}
#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev)
void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev);
void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev);
#endif /* __MLX5_VNET_H_ */
...@@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d) ...@@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d)
* initialized but before registered. * initialized but before registered.
* @parent: the parent device * @parent: the parent device
* @config: the bus operations that is supported by this device * @config: the bus operations that is supported by this device
* @nvqs: number of virtqueues supported by this device
* @size: size of the parent structure that contains private data * @size: size of the parent structure that contains private data
* *
* Driver should use vdpa_alloc_device() wrapper macro instead of * Driver should use vdpa_alloc_device() wrapper macro instead of
...@@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d) ...@@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d)
*/ */
struct vdpa_device *__vdpa_alloc_device(struct device *parent, struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config, const struct vdpa_config_ops *config,
int nvqs,
size_t size) size_t size)
{ {
struct vdpa_device *vdev; struct vdpa_device *vdev;
...@@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, ...@@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
vdev->dev.release = vdpa_release_dev; vdev->dev.release = vdpa_release_dev;
vdev->index = err; vdev->index = err;
vdev->config = config; vdev->config = config;
vdev->features_valid = false;
vdev->nvqs = nvqs;
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
if (err) if (err)
......
This diff is collapsed.
...@@ -65,6 +65,7 @@ config VHOST_VDPA ...@@ -65,6 +65,7 @@ config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend" tristate "Vhost driver for vDPA-based backend"
depends on EVENTFD depends on EVENTFD
select VHOST select VHOST
select IRQ_BYPASS_MANAGER
depends on VDPA depends on VDPA
help help
This kernel module can be loaded in host kernel to accelerate This kernel module can be loaded in host kernel to accelerate
......
...@@ -73,7 +73,7 @@ enum { ...@@ -73,7 +73,7 @@ enum {
VHOST_NET_FEATURES = VHOST_FEATURES | VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
(1ULL << VIRTIO_F_IOMMU_PLATFORM) (1ULL << VIRTIO_F_ACCESS_PLATFORM)
}; };
enum { enum {
...@@ -1615,21 +1615,6 @@ static long vhost_net_reset_owner(struct vhost_net *n) ...@@ -1615,21 +1615,6 @@ static long vhost_net_reset_owner(struct vhost_net *n)
return err; return err;
} }
static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
{
int i;
mutex_lock(&n->dev.mutex);
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_backend_features = features;
mutex_unlock(&n->vqs[i].vq.mutex);
}
mutex_unlock(&n->dev.mutex);
return 0;
}
static int vhost_net_set_features(struct vhost_net *n, u64 features) static int vhost_net_set_features(struct vhost_net *n, u64 features)
{ {
size_t vhost_hlen, sock_hlen, hdr_len; size_t vhost_hlen, sock_hlen, hdr_len;
...@@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) ...@@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
!vhost_log_access_ok(&n->dev)) !vhost_log_access_ok(&n->dev))
goto out_unlock; goto out_unlock;
if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
if (vhost_init_device_iotlb(&n->dev, true)) if (vhost_init_device_iotlb(&n->dev, true))
goto out_unlock; goto out_unlock;
} }
...@@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, ...@@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT; return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES) if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return vhost_net_set_backend_features(n, features); vhost_set_backend_features(&n->dev, features);
return 0;
case VHOST_RESET_OWNER: case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n); return vhost_net_reset_owner(n);
case VHOST_SET_OWNER: case VHOST_SET_OWNER:
......
This diff is collapsed.
...@@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) ...@@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
__vhost_vq_meta_reset(d->vqs[i]); __vhost_vq_meta_reset(d->vqs[i]);
} }
static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{
call_ctx->ctx = NULL;
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
spin_lock_init(&call_ctx->ctx_lock);
}
static void vhost_vq_reset(struct vhost_dev *dev, static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq) struct vhost_virtqueue *vq)
{ {
...@@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev, ...@@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->log_base = NULL; vq->log_base = NULL;
vq->error_ctx = NULL; vq->error_ctx = NULL;
vq->kick = NULL; vq->kick = NULL;
vq->call_ctx = NULL;
vq->log_ctx = NULL; vq->log_ctx = NULL;
vhost_reset_is_le(vq); vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq); vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0; vq->busyloop_timeout = 0;
vq->umem = NULL; vq->umem = NULL;
vq->iotlb = NULL; vq->iotlb = NULL;
vhost_vring_call_reset(&vq->call_ctx);
__vhost_vq_meta_reset(vq); __vhost_vq_meta_reset(vq);
} }
...@@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev) ...@@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
eventfd_ctx_put(dev->vqs[i]->error_ctx); eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->kick) if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick); fput(dev->vqs[i]->kick);
if (dev->vqs[i]->call_ctx) if (dev->vqs[i]->call_ctx.ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx); eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
vhost_vq_reset(dev, dev->vqs[i]); vhost_vq_reset(dev, dev->vqs[i]);
} }
vhost_dev_free_iovecs(dev); vhost_dev_free_iovecs(dev);
...@@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) ...@@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
memcpy(newmem, &mem, size); memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions, if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) { flex_array_size(newmem, regions, mem.nregions))) {
kvfree(newmem); kvfree(newmem);
return -EFAULT; return -EFAULT;
} }
...@@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg ...@@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = PTR_ERR(ctx); r = PTR_ERR(ctx);
break; break;
} }
swap(ctx, vq->call_ctx);
spin_lock(&vq->call_ctx.ctx_lock);
swap(ctx, vq->call_ctx.ctx);
spin_unlock(&vq->call_ctx.ctx_lock);
break; break;
case VHOST_SET_VRING_ERR: case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) { if (copy_from_user(&f, argp, sizeof f)) {
...@@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) ...@@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ {
/* Signal the Guest tell them we used something up. */ /* Signal the Guest tell them we used something up. */
if (vq->call_ctx && vhost_notify(dev, vq)) if (vq->call_ctx.ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1); eventfd_signal(vq->call_ctx.ctx, 1);
} }
EXPORT_SYMBOL_GPL(vhost_signal); EXPORT_SYMBOL_GPL(vhost_signal);
...@@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, ...@@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
} }
EXPORT_SYMBOL_GPL(vhost_dequeue_msg); EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
{
struct vhost_virtqueue *vq;
int i;
mutex_lock(&dev->mutex);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_backend_features = features;
mutex_unlock(&vq->mutex);
}
mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(vhost_set_backend_features);
static int __init vhost_init(void) static int __init vhost_init(void)
{ {
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/virtio_ring.h> #include <linux/virtio_ring.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
#include <linux/irqbypass.h>
struct vhost_work; struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work); typedef void (*vhost_work_fn_t)(struct vhost_work *work);
...@@ -60,6 +61,12 @@ enum vhost_uaddr_type { ...@@ -60,6 +61,12 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3, VHOST_NUM_ADDRS = 3,
}; };
struct vhost_vring_call {
struct eventfd_ctx *ctx;
struct irq_bypass_producer producer;
spinlock_t ctx_lock;
};
/* The virtqueue structure describes a queue attached to a device. */ /* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue { struct vhost_virtqueue {
struct vhost_dev *dev; struct vhost_dev *dev;
...@@ -72,7 +79,7 @@ struct vhost_virtqueue { ...@@ -72,7 +79,7 @@ struct vhost_virtqueue {
vring_used_t __user *used; vring_used_t __user *used;
const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick; struct file *kick;
struct eventfd_ctx *call_ctx; struct vhost_vring_call call_ctx;
struct eventfd_ctx *error_ctx; struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx; struct eventfd_ctx *log_ctx;
...@@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev, ...@@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node); struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head); struct list_head *head);
void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait); poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
......
...@@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb) ...@@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb)
s64 target; s64 target;
u32 num_pages; u32 num_pages;
virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
/* Legacy balloon config space is LE, unlike all other devices. */ /* Legacy balloon config space is LE, unlike all other devices. */
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
num_pages = le32_to_cpu((__force __le32)num_pages); &num_pages);
target = num_pages; target = num_pages;
return target - vb->num_pages; return target - vb->num_pages;
...@@ -462,10 +459,7 @@ static void update_balloon_size(struct virtio_balloon *vb) ...@@ -462,10 +459,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
u32 actual = vb->num_pages; u32 actual = vb->num_pages;
/* Legacy balloon config space is LE, unlike all other devices. */ /* Legacy balloon config space is LE, unlike all other devices. */
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual,
actual = (__force u32)cpu_to_le32(actual);
virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual); &actual);
} }
...@@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) ...@@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
{ {
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap)) { &vb->config_read_bitmap)) {
virtio_cread(vb->vdev, struct virtio_balloon_config, /* Legacy balloon config space is LE, unlike all other devices. */
virtio_cread_le(vb->vdev, struct virtio_balloon_config,
free_page_hint_cmd_id, free_page_hint_cmd_id,
&vb->cmd_id_received_cache); &vb->cmd_id_received_cache);
/* Legacy balloon config space is LE, unlike all other devices. */
if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
} }
return vb->cmd_id_received_cache; return vb->cmd_id_received_cache;
...@@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb) ...@@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
while (virtqueue_get_buf(vq, &unused)) while (virtqueue_get_buf(vq, &unused))
; ;
vb->cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
virtio_balloon_cmd_id_received(vb)); virtio_balloon_cmd_id_received(vb));
sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
...@@ -987,7 +979,7 @@ static int virtballoon_probe(struct virtio_device *vdev) ...@@ -987,7 +979,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
if (!want_init_on_free()) if (!want_init_on_free())
memset(&poison_val, PAGE_POISON, sizeof(poison_val)); memset(&poison_val, PAGE_POISON, sizeof(poison_val));
virtio_cwrite(vb->vdev, struct virtio_balloon_config, virtio_cwrite_le(vb->vdev, struct virtio_balloon_config,
poison_val, &poison_val); poison_val, &poison_val);
} }
...@@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev) ...@@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev)
else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
return 0; return 0;
} }
......
...@@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi, ...@@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi,
{ {
u8 size; u8 size;
virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select); virtio_cwrite_le(vi->vdev, struct virtio_input_config, select, &select);
virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel); virtio_cwrite_le(vi->vdev, struct virtio_input_config, subsel, &subsel);
virtio_cread(vi->vdev, struct virtio_input_config, size, &size); virtio_cread_le(vi->vdev, struct virtio_input_config, size, &size);
return size; return size;
} }
...@@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs) ...@@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
u32 mi, ma, re, fu, fl; u32 mi, ma, re, fu, fl;
virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs); virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi); virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma); virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re); virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.res, &re);
virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu); virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
input_set_abs_params(vi->idev, abs, mi, ma, fu, fl); input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
input_abs_set_res(vi->idev, abs, re); input_abs_set_res(vi->idev, abs, re);
} }
...@@ -244,13 +244,13 @@ static int virtinput_probe(struct virtio_device *vdev) ...@@ -244,13 +244,13 @@ static int virtinput_probe(struct virtio_device *vdev)
size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0); size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
if (size >= sizeof(struct virtio_input_devids)) { if (size >= sizeof(struct virtio_input_devids)) {
virtio_cread(vi->vdev, struct virtio_input_config, virtio_cread_le(vi->vdev, struct virtio_input_config,
u.ids.bustype, &vi->idev->id.bustype); u.ids.bustype, &vi->idev->id.bustype);
virtio_cread(vi->vdev, struct virtio_input_config, virtio_cread_le(vi->vdev, struct virtio_input_config,
u.ids.vendor, &vi->idev->id.vendor); u.ids.vendor, &vi->idev->id.vendor);
virtio_cread(vi->vdev, struct virtio_input_config, virtio_cread_le(vi->vdev, struct virtio_input_config,
u.ids.product, &vi->idev->id.product); u.ids.product, &vi->idev->id.product);
virtio_cread(vi->vdev, struct virtio_input_config, virtio_cread_le(vi->vdev, struct virtio_input_config,
u.ids.version, &vi->idev->id.version); u.ids.version, &vi->idev->id.version);
} else { } else {
vi->idev->id.bustype = BUS_VIRTUAL; vi->idev->id.bustype = BUS_VIRTUAL;
......
...@@ -1530,20 +1530,20 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm) ...@@ -1530,20 +1530,20 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
uint64_t new_plugged_size, usable_region_size, end_addr; uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */ /* the plugged_size is just a reflection of what _we_ did previously */
virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
&new_plugged_size); &new_plugged_size);
if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
vm->plugged_size = new_plugged_size; vm->plugged_size = new_plugged_size;
/* calculate the last usable memory block id */ /* calculate the last usable memory block id */
virtio_cread(vm->vdev, struct virtio_mem_config, virtio_cread_le(vm->vdev, struct virtio_mem_config,
usable_region_size, &usable_region_size); usable_region_size, &usable_region_size);
end_addr = vm->addr + usable_region_size; end_addr = vm->addr + usable_region_size;
end_addr = min(end_addr, phys_limit); end_addr = min(end_addr, phys_limit);
vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1; vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
/* see if there is a request to change the size */ /* see if there is a request to change the size */
virtio_cread(vm->vdev, struct virtio_mem_config, requested_size, virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
&vm->requested_size); &vm->requested_size);
dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
...@@ -1677,15 +1677,15 @@ static int virtio_mem_init(struct virtio_mem *vm) ...@@ -1677,15 +1677,15 @@ static int virtio_mem_init(struct virtio_mem *vm)
} }
/* Fetch all properties that can't change. */ /* Fetch all properties that can't change. */
virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
&vm->plugged_size); &vm->plugged_size);
virtio_cread(vm->vdev, struct virtio_mem_config, block_size, virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
&vm->device_block_size); &vm->device_block_size);
virtio_cread(vm->vdev, struct virtio_mem_config, node_id, virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
&node_id); &node_id);
vm->nid = virtio_mem_translate_node_id(vm, node_id); vm->nid = virtio_mem_translate_node_id(vm, node_id);
virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr); virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
virtio_cread(vm->vdev, struct virtio_mem_config, region_size, virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size); &vm->region_size);
/* /*
......
...@@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { ...@@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
* @dev: the pci device * @dev: the pci device
* @cfg_type: the VIRTIO_PCI_CAP_* value we seek * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
* @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
* @bars: the bitmask of BARs
* *
* Returns offset of the capability, or 0. * Returns offset of the capability, or 0.
*/ */
......
...@@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq, ...@@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
static bool vring_use_dma_api(struct virtio_device *vdev) static bool vring_use_dma_api(struct virtio_device *vdev)
{ {
if (!virtio_has_iommu_quirk(vdev)) if (!virtio_has_dma_quirk(vdev))
return true; return true;
/* Otherwise, we are left to guess. */ /* Otherwise, we are left to guess. */
...@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) ...@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
if (unlikely(vq->broken))
return false;
virtio_mb(vq->weak_barriers); virtio_mb(vq->weak_barriers);
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
virtqueue_poll_split(_vq, last_used_idx); virtqueue_poll_split(_vq, last_used_idx);
...@@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev) ...@@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev)
break; break;
case VIRTIO_F_VERSION_1: case VIRTIO_F_VERSION_1:
break; break;
case VIRTIO_F_IOMMU_PLATFORM: case VIRTIO_F_ACCESS_PLATFORM:
break; break;
case VIRTIO_F_RING_PACKED: case VIRTIO_F_RING_PACKED:
break; break;
......
...@@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset, ...@@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len) void *buf, unsigned len)
{ {
struct vdpa_device *vdpa = vd_get_vdpa(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
ops->get_config(vdpa, offset, buf, len); vdpa_get_config(vdpa, offset, buf, len);
} }
static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset, static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
...@@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status) ...@@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
static void virtio_vdpa_reset(struct virtio_device *vdev) static void virtio_vdpa_reset(struct virtio_device *vdev)
{ {
struct vdpa_device *vdpa = vd_get_vdpa(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
return ops->set_status(vdpa, 0); vdpa_reset(vdpa);
} }
static bool virtio_vdpa_notify(struct virtqueue *vq) static bool virtio_vdpa_notify(struct virtqueue *vq)
...@@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev) ...@@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
static int virtio_vdpa_finalize_features(struct virtio_device *vdev) static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
{ {
struct vdpa_device *vdpa = vd_get_vdpa(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
/* Give virtio_ring a chance to accept features. */ /* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev); vring_transport_features(vdev);
return ops->set_features(vdpa, vdev->features); return vdpa_set_features(vdpa, vdev->features);
} }
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
......
...@@ -606,7 +606,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -606,7 +606,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
virtio_cread(vdev, struct virtio_fs_config, num_request_queues, virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
&fs->num_request_queues); &fs->num_request_queues);
if (fs->num_request_queues == 0) if (fs->num_request_queues == 0)
return -EINVAL; return -EINVAL;
......
...@@ -27,18 +27,29 @@ struct vdpa_notification_area { ...@@ -27,18 +27,29 @@ struct vdpa_notification_area {
resource_size_t size; resource_size_t size;
}; };
/**
* vDPA vq_state definition
* @avail_index: available index
*/
struct vdpa_vq_state {
u16 avail_index;
};
/** /**
* vDPA device - representation of a vDPA device * vDPA device - representation of a vDPA device
* @dev: underlying device * @dev: underlying device
* @dma_dev: the actual device that is performing DMA * @dma_dev: the actual device that is performing DMA
* @config: the configuration ops for this device. * @config: the configuration ops for this device.
* @index: device index * @index: device index
* @features_valid: were features initialized? for legacy guests
*/ */
struct vdpa_device { struct vdpa_device {
struct device dev; struct device dev;
struct device *dma_dev; struct device *dma_dev;
const struct vdpa_config_ops *config; const struct vdpa_config_ops *config;
unsigned int index; unsigned int index;
bool features_valid;
int nvqs;
}; };
/** /**
...@@ -77,16 +88,22 @@ struct vdpa_device { ...@@ -77,16 +88,22 @@ struct vdpa_device {
* @set_vq_state: Set the state for a virtqueue * @set_vq_state: Set the state for a virtqueue
* @vdev: vdpa device * @vdev: vdpa device
* @idx: virtqueue index * @idx: virtqueue index
* @state: virtqueue state (last_avail_idx) * @state: pointer to set virtqueue state (last_avail_idx)
* Returns integer: success (0) or error (< 0) * Returns integer: success (0) or error (< 0)
* @get_vq_state: Get the state for a virtqueue * @get_vq_state: Get the state for a virtqueue
* @vdev: vdpa device * @vdev: vdpa device
* @idx: virtqueue index * @idx: virtqueue index
* Returns virtqueue state (last_avail_idx) * @state: pointer to returned state (last_avail_idx)
* @get_vq_notification: Get the notification area for a virtqueue * @get_vq_notification: Get the notification area for a virtqueue
* @vdev: vdpa device * @vdev: vdpa device
* @idx: virtqueue index * @idx: virtqueue index
* Returns the notifcation area * Returns the notifcation area
* @get_vq_irq: Get the irq number of a virtqueue (optional,
* but must implemented if require vq irq offloading)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns int: irq number of a virtqueue,
* negative number if no irq assigned.
* @get_vq_align: Get the virtqueue align requirement * @get_vq_align: Get the virtqueue align requirement
* for the device * for the device
* @vdev: vdpa device * @vdev: vdpa device
...@@ -174,10 +191,14 @@ struct vdpa_config_ops { ...@@ -174,10 +191,14 @@ struct vdpa_config_ops {
struct vdpa_callback *cb); struct vdpa_callback *cb);
void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); const struct vdpa_vq_state *state);
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
struct vdpa_vq_state *state);
struct vdpa_notification_area struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx); (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
/* Device ops */ /* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev); u32 (*get_vq_align)(struct vdpa_device *vdev);
...@@ -208,11 +229,12 @@ struct vdpa_config_ops { ...@@ -208,11 +229,12 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent, struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config, const struct vdpa_config_ops *config,
int nvqs,
size_t size); size_t size);
#define vdpa_alloc_device(dev_struct, member, parent, config) \ #define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
container_of(__vdpa_alloc_device( \ container_of(__vdpa_alloc_device( \
parent, config, \ parent, config, nvqs, \
sizeof(dev_struct) + \ sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \ BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member))), \ dev_struct, member))), \
...@@ -266,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) ...@@ -266,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
{ {
return vdev->dma_dev; return vdev->dma_dev;
} }
static inline void vdpa_reset(struct vdpa_device *vdev)
{
const struct vdpa_config_ops *ops = vdev->config;
vdev->features_valid = false;
ops->set_status(vdev, 0);
}
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
const struct vdpa_config_ops *ops = vdev->config;
vdev->features_valid = true;
return ops->set_features(vdev, features);
}
static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
void *buf, unsigned int len)
{
const struct vdpa_config_ops *ops = vdev->config;
/*
* Config accesses aren't supposed to trigger before features are set.
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
vdpa_set_features(vdev, 0);
ops->get_config(vdev, offset, buf, len);
}
#endif /* _LINUX_VDPA_H */ #endif /* _LINUX_VDPA_H */
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
#include <linux/types.h> #include <linux/types.h>
struct virtio_caif_transf_config { struct virtio_caif_transf_config {
u16 headroom; __virtio16 headroom;
u16 tailroom; __virtio16 tailroom;
u32 mtu; __virtio32 mtu;
u8 reserved[4]; u8 reserved[4];
}; };
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_byteorder.h> #include <linux/virtio_byteorder.h>
#include <linux/compiler_types.h>
#include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_config.h>
struct irq_affinity; struct irq_affinity;
...@@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, ...@@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
} }
/** /**
* virtio_has_iommu_quirk - determine whether this device has the iommu quirk * virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device * @vdev: the device
*/ */
static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
{ {
/* /*
* Note the reverse polarity of the quirk feature (compared to most * Note the reverse polarity of the quirk feature (compared to most
* other features), this is for compatibility with legacy systems. * other features), this is for compatibility with legacy systems.
*/ */
return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
} }
static inline static inline
...@@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) ...@@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
} }
#define virtio_to_cpu(vdev, x) \
_Generic((x), \
__u8: (x), \
__virtio16: virtio16_to_cpu((vdev), (x)), \
__virtio32: virtio32_to_cpu((vdev), (x)), \
__virtio64: virtio64_to_cpu((vdev), (x)) \
)
#define cpu_to_virtio(vdev, x, m) \
_Generic((m), \
__u8: (x), \
__virtio16: cpu_to_virtio16((vdev), (x)), \
__virtio32: cpu_to_virtio32((vdev), (x)), \
__virtio64: cpu_to_virtio64((vdev), (x)) \
)
#define __virtio_native_type(structname, member) \
typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
/* Config space accessors. */ /* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \ #define virtio_cread(vdev, structname, member, ptr) \
do { \ do { \
typeof(((structname*)0)->member) virtio_cread_v; \
\
might_sleep(); \ might_sleep(); \
/* Must match the member's type, and be integer */ \ /* Sanity check: must match the member's type */ \
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
(*ptr) = 1; \
\ \
switch (sizeof(*ptr)) { \ switch (sizeof(virtio_cread_v)) { \
case 1: \ case 1: \
*(ptr) = virtio_cread8(vdev, \
offsetof(structname, member)); \
break; \
case 2: \ case 2: \
*(ptr) = virtio_cread16(vdev, \
offsetof(structname, member)); \
break; \
case 4: \ case 4: \
*(ptr) = virtio_cread32(vdev, \ vdev->config->get((vdev), \
offsetof(structname, member)); \ offsetof(structname, member), \
break; \ &virtio_cread_v, \
case 8: \ sizeof(virtio_cread_v)); \
*(ptr) = virtio_cread64(vdev, \
offsetof(structname, member)); \
break; \ break; \
default: \ default: \
BUG(); \ __virtio_cread_many((vdev), \
offsetof(structname, member), \
&virtio_cread_v, \
1, \
sizeof(virtio_cread_v)); \
break; \
} \ } \
*(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
} while(0) } while(0)
/* Config space accessors. */ /* Config space accessors. */
#define virtio_cwrite(vdev, structname, member, ptr) \ #define virtio_cwrite(vdev, structname, member, ptr) \
do { \ do { \
typeof(((structname*)0)->member) virtio_cwrite_v = \
cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
\
might_sleep(); \
/* Sanity check: must match the member's type */ \
typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
\
vdev->config->set((vdev), offsetof(structname, member), \
&virtio_cwrite_v, \
sizeof(virtio_cwrite_v)); \
} while(0)
/*
* Nothing virtio-specific about these, but let's worry about generalizing
* these later.
*/
#define virtio_le_to_cpu(x) \
_Generic((x), \
__u8: (u8)(x), \
__le16: (u16)le16_to_cpu(x), \
__le32: (u32)le32_to_cpu(x), \
__le64: (u64)le64_to_cpu(x) \
)
#define virtio_cpu_to_le(x, m) \
_Generic((m), \
__u8: (x), \
__le16: cpu_to_le16(x), \
__le32: cpu_to_le32(x), \
__le64: cpu_to_le64(x) \
)
/* LE (e.g. modern) Config space accessors. */
#define virtio_cread_le(vdev, structname, member, ptr) \
do { \
typeof(((structname*)0)->member) virtio_cread_v; \
\
might_sleep(); \ might_sleep(); \
/* Must match the member's type, and be integer */ \ /* Sanity check: must match the member's type */ \
if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
BUG_ON((*ptr) == 1); \
\ \
switch (sizeof(*ptr)) { \ switch (sizeof(virtio_cread_v)) { \
case 1: \ case 1: \
virtio_cwrite8(vdev, \
offsetof(structname, member), \
*(ptr)); \
break; \
case 2: \ case 2: \
virtio_cwrite16(vdev, \
offsetof(structname, member), \
*(ptr)); \
break; \
case 4: \ case 4: \
virtio_cwrite32(vdev, \ vdev->config->get((vdev), \
offsetof(structname, member), \ offsetof(structname, member), \
*(ptr)); \ &virtio_cread_v, \
sizeof(virtio_cread_v)); \
break; \ break; \
case 8: \ default: \
virtio_cwrite64(vdev, \ __virtio_cread_many((vdev), \
offsetof(structname, member), \ offsetof(structname, member), \
*(ptr)); \ &virtio_cread_v, \
1, \
sizeof(virtio_cread_v)); \
break; \ break; \
default: \
BUG(); \
} \ } \
*(ptr) = virtio_le_to_cpu(virtio_cread_v); \
} while(0) } while(0)
#define virtio_cwrite_le(vdev, structname, member, ptr) \
do { \
typeof(((structname*)0)->member) virtio_cwrite_v = \
virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
\
might_sleep(); \
/* Sanity check: must match the member's type */ \
typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
\
vdev->config->set((vdev), offsetof(structname, member), \
&virtio_cwrite_v, \
sizeof(virtio_cwrite_v)); \
} while(0)
/* Read @count fields, @bytes each. */ /* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev, static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset, unsigned int offset,
...@@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev, ...@@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev,
static inline u16 virtio_cread16(struct virtio_device *vdev, static inline u16 virtio_cread16(struct virtio_device *vdev,
unsigned int offset) unsigned int offset)
{ {
u16 ret; __virtio16 ret;
might_sleep(); might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret)); vdev->config->get(vdev, offset, &ret, sizeof(ret));
return virtio16_to_cpu(vdev, (__force __virtio16)ret); return virtio16_to_cpu(vdev, ret);
} }
static inline void virtio_cwrite16(struct virtio_device *vdev, static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val) unsigned int offset, u16 val)
{ {
__virtio16 v;
might_sleep(); might_sleep();
val = (__force u16)cpu_to_virtio16(vdev, val); v = cpu_to_virtio16(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val)); vdev->config->set(vdev, offset, &v, sizeof(v));
} }
static inline u32 virtio_cread32(struct virtio_device *vdev, static inline u32 virtio_cread32(struct virtio_device *vdev,
unsigned int offset) unsigned int offset)
{ {
u32 ret; __virtio32 ret;
might_sleep(); might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret)); vdev->config->get(vdev, offset, &ret, sizeof(ret));
return virtio32_to_cpu(vdev, (__force __virtio32)ret); return virtio32_to_cpu(vdev, ret);
} }
static inline void virtio_cwrite32(struct virtio_device *vdev, static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val) unsigned int offset, u32 val)
{ {
__virtio32 v;
might_sleep(); might_sleep();
val = (__force u32)cpu_to_virtio32(vdev, val); v = cpu_to_virtio32(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val)); vdev->config->set(vdev, offset, &v, sizeof(v));
} }
static inline u64 virtio_cread64(struct virtio_device *vdev, static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset) unsigned int offset)
{ {
u64 ret; __virtio64 ret;
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
return virtio64_to_cpu(vdev, (__force __virtio64)ret); return virtio64_to_cpu(vdev, ret);
} }
static inline void virtio_cwrite64(struct virtio_device *vdev, static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val) unsigned int offset, u64 val)
{ {
__virtio64 v;
might_sleep(); might_sleep();
val = (__force u64)cpu_to_virtio64(vdev, val); v = cpu_to_virtio64(vdev, val);
vdev->config->set(vdev, offset, &val, sizeof(val)); vdev->config->set(vdev, offset, &v, sizeof(v));
} }
/* Conditional config space accessors. */ /* Conditional config space accessors. */
...@@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, ...@@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
_r; \ _r; \
}) })
/* Conditional config space accessors. */
#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
({ \
int _r = 0; \
if (!virtio_has_feature(vdev, fbit)) \
_r = -ENOENT; \
else \
virtio_cread_le((vdev), structname, member, ptr); \
_r; \
})
#endif /* _LINUX_VIRTIO_CONFIG_H */ #endif /* _LINUX_VIRTIO_CONFIG_H */
This diff is collapsed.
...@@ -91,6 +91,8 @@ ...@@ -91,6 +91,8 @@
/* Use message type V2 */ /* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
/* IOTLB can accept batching hints */
#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment