Commit 6b8350a4 authored by David S. Miller's avatar David S. Miller

Merge branch 'vsock-add-local-transport-support'

Stefano Garzarella says:

====================
vsock: add local transport support

v2:
 - style fixes [Dave]
 - removed RCU sync and changed 'the_vsock_loopback' in a global
   static variable [Stefan]
 - use G2H transport when local transport is not loaded and remote cid
   is VMADDR_CID_LOCAL [Stefan]
 - rebased on net-next

v1: https://patchwork.kernel.org/cover/11251735/

This series introduces a new transport (vsock_loopback) to handle
local communication.
This could be useful to test vsock core itself and to allow developers
to test their applications without launching a VM.

Before this series, vmci and virtio transports allowed this behavior,
but only in the guest.
We are moving the loopback handling in a new transport, because it
might be useful to provide this feature also in the host or when
no H2G/G2H transports (hyperv, virtio, vmci) are loaded.

The user can use the loopback with the new VMADDR_CID_LOCAL (that
replaces VMADDR_CID_RESERVED) in any condition.
Otherwise, if the G2H transport is loaded, it can also use the guest
local CID as previously supported by vmci and virtio transports.
If G2H transport is not loaded, the user can also use VMADDR_CID_HOST
for local communication.

Patch 1 is a cleanup to build virtio_transport_common without virtio
Patch 2 adds the new VMADDR_CID_LOCAL, replacing VMADDR_CID_RESERVED
Patch 3 adds a new feature flag to register a loopback transport
Patch 4 adds the new vsock_loopback transport based on the loopback
        implementation of virtio_transport
Patch 5 implements the logic to use the local transport for loopback
        communication
Patch 6 removes the loopback from virtio_transport
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bea0f4a5 bf5432b1
......@@ -17480,6 +17480,7 @@ F: net/vmw_vsock/diag.c
F: net/vmw_vsock/af_vsock_tap.c
F: net/vmw_vsock/virtio_transport_common.c
F: net/vmw_vsock/virtio_transport.c
F: net/vmw_vsock/vsock_loopback.c
F: drivers/net/vsockmon.c
F: drivers/vhost/vsock.c
F: tools/testing/vsock/
......
......@@ -98,6 +98,8 @@ struct vsock_transport_send_notify_data {
#define VSOCK_TRANSPORT_F_G2H 0x00000002
/* Transport provides DGRAM communication */
#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
/* Transport provides local (loopback) communication */
#define VSOCK_TRANSPORT_F_LOCAL 0x00000008
struct vsock_transport {
struct module *module;
......
......@@ -99,11 +99,13 @@
#define VMADDR_CID_HYPERVISOR 0
/* This CID is specific to VMCI and can be considered reserved (even VMCI
* doesn't use it anymore, it's a legacy value from an older release).
/* Use this as the destination CID in an address when referring to the
* local communication (loopback).
* (This was VMADDR_CID_RESERVED, but even VMCI doesn't use it anymore,
* it was a legacy value from an older release).
*/
#define VMADDR_CID_RESERVED 1
#define VMADDR_CID_LOCAL 1
/* Use this as the destination CID in an address when referring to the host
* (any process other than the hypervisor). VMCI relies on it being 2, but
......
......@@ -26,6 +26,18 @@ config VSOCKETS_DIAG
Enable this module so userspace applications can query open sockets.
config VSOCKETS_LOOPBACK
tristate "Virtual Sockets loopback transport"
depends on VSOCKETS
default y
select VIRTIO_VSOCKETS_COMMON
help
This module implements a loopback transport for Virtual Sockets,
using vmw_vsock_virtio_transport_common.
To compile this driver as a module, choose M here: the module
will be called vsock_loopback. If unsure, say N.
config VMWARE_VMCI_VSOCKETS
tristate "VMware VMCI transport for Virtual Sockets"
depends on VSOCKETS && VMWARE_VMCI
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
obj-$(CONFIG_VIRTIO_VSOCKETS) += vmw_vsock_virtio_transport.o
obj-$(CONFIG_VIRTIO_VSOCKETS_COMMON) += vmw_vsock_virtio_transport_common.o
obj-$(CONFIG_HYPERV_VSOCKETS) += hv_sock.o
obj-$(CONFIG_VSOCKETS_LOOPBACK) += vsock_loopback.o
vsock-y += af_vsock.o af_vsock_tap.o vsock_addr.o
......
......@@ -136,6 +136,8 @@ static const struct vsock_transport *transport_h2g;
static const struct vsock_transport *transport_g2h;
/* Transport used for DGRAM communication */
static const struct vsock_transport *transport_dgram;
/* Transport used for local communication */
static const struct vsock_transport *transport_local;
static DEFINE_MUTEX(vsock_register_mutex);
/**** UTILS ****/
......@@ -386,6 +388,21 @@ void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
}
EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
static bool vsock_use_local_transport(unsigned int remote_cid)
{
if (!transport_local)
return false;
if (remote_cid == VMADDR_CID_LOCAL)
return true;
if (transport_g2h) {
return remote_cid == transport_g2h->get_local_cid();
} else {
return remote_cid == VMADDR_CID_HOST;
}
}
static void vsock_deassign_transport(struct vsock_sock *vsk)
{
if (!vsk->transport)
......@@ -402,9 +419,9 @@ static void vsock_deassign_transport(struct vsock_sock *vsk)
* (e.g. during the connect() or when a connection request on a listener
* socket is received).
* The vsk->remote_addr is used to decide which transport to use:
* - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
* g2h is not loaded, will use local transport;
* - remote CID <= VMADDR_CID_HOST will use guest->host transport;
* - remote CID == local_cid (guest->host transport) will use guest->host
* transport for loopback (host->guest transports don't support loopback);
* - remote CID > VMADDR_CID_HOST will use host->guest transport;
*/
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
......@@ -419,9 +436,9 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
new_transport = transport_dgram;
break;
case SOCK_STREAM:
if (remote_cid <= VMADDR_CID_HOST ||
(transport_g2h &&
remote_cid == transport_g2h->get_local_cid()))
if (vsock_use_local_transport(remote_cid))
new_transport = transport_local;
else if (remote_cid <= VMADDR_CID_HOST)
new_transport = transport_g2h;
else
new_transport = transport_h2g;
......@@ -464,6 +481,9 @@ bool vsock_find_cid(unsigned int cid)
if (transport_h2g && cid == VMADDR_CID_HOST)
return true;
if (transport_local && cid == VMADDR_CID_LOCAL)
return true;
return false;
}
EXPORT_SYMBOL_GPL(vsock_find_cid);
......@@ -2137,7 +2157,7 @@ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
int vsock_core_register(const struct vsock_transport *t, int features)
{
const struct vsock_transport *t_h2g, *t_g2h, *t_dgram;
const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
int err = mutex_lock_interruptible(&vsock_register_mutex);
if (err)
......@@ -2146,6 +2166,7 @@ int vsock_core_register(const struct vsock_transport *t, int features)
t_h2g = transport_h2g;
t_g2h = transport_g2h;
t_dgram = transport_dgram;
t_local = transport_local;
if (features & VSOCK_TRANSPORT_F_H2G) {
if (t_h2g) {
......@@ -2171,9 +2192,18 @@ int vsock_core_register(const struct vsock_transport *t, int features)
t_dgram = t;
}
if (features & VSOCK_TRANSPORT_F_LOCAL) {
if (t_local) {
err = -EBUSY;
goto err_busy;
}
t_local = t;
}
transport_h2g = t_h2g;
transport_g2h = t_g2h;
transport_dgram = t_dgram;
transport_local = t_local;
err_busy:
mutex_unlock(&vsock_register_mutex);
......@@ -2194,6 +2224,9 @@ void vsock_core_unregister(const struct vsock_transport *t)
if (transport_dgram == t)
transport_dgram = NULL;
if (transport_local == t)
transport_local = NULL;
mutex_unlock(&vsock_register_mutex);
}
EXPORT_SYMBOL_GPL(vsock_core_unregister);
......
......@@ -44,10 +44,6 @@ struct virtio_vsock {
spinlock_t send_pkt_list_lock;
struct list_head send_pkt_list;
struct work_struct loopback_work;
spinlock_t loopback_list_lock; /* protects loopback_list */
struct list_head loopback_list;
atomic_t queued_replies;
/* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
......@@ -86,20 +82,6 @@ static u32 virtio_transport_get_local_cid(void)
return ret;
}
static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
struct virtio_vsock_pkt *pkt)
{
int len = pkt->len;
spin_lock_bh(&vsock->loopback_list_lock);
list_add_tail(&pkt->list, &vsock->loopback_list);
spin_unlock_bh(&vsock->loopback_list_lock);
queue_work(virtio_vsock_workqueue, &vsock->loopback_work);
return len;
}
static void
virtio_transport_send_pkt_work(struct work_struct *work)
{
......@@ -194,7 +176,8 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
}
if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
len = virtio_transport_send_pkt_loopback(vsock, pkt);
virtio_transport_free_pkt(pkt);
len = -ENODEV;
goto out_rcu;
}
......@@ -502,33 +485,6 @@ static struct virtio_transport virtio_transport = {
.send_pkt = virtio_transport_send_pkt,
};
static void virtio_transport_loopback_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
container_of(work, struct virtio_vsock, loopback_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->loopback_list_lock);
list_splice_init(&vsock->loopback_list, &pkts);
spin_unlock_bh(&vsock->loopback_list_lock);
mutex_lock(&vsock->rx_lock);
if (!vsock->rx_run)
goto out;
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_recv_pkt(&virtio_transport, pkt);
}
out:
mutex_unlock(&vsock->rx_lock);
}
static void virtio_transport_rx_work(struct work_struct *work)
{
struct virtio_vsock *vsock =
......@@ -633,13 +589,10 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
mutex_init(&vsock->event_lock);
spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
spin_lock_init(&vsock->loopback_list_lock);
INIT_LIST_HEAD(&vsock->loopback_list);
INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
INIT_WORK(&vsock->event_work, virtio_transport_event_work);
INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work);
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
......@@ -720,22 +673,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
}
spin_unlock_bh(&vsock->send_pkt_list_lock);
spin_lock_bh(&vsock->loopback_list_lock);
while (!list_empty(&vsock->loopback_list)) {
pkt = list_first_entry(&vsock->loopback_list,
struct virtio_vsock_pkt, list);
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
spin_unlock_bh(&vsock->loopback_list_lock);
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
/* Other works can be queued before 'config->del_vqs()', so we flush
* all works before to free the vsock object to avoid use after free.
*/
flush_work(&vsock->loopback_work);
flush_work(&vsock->rx_work);
flush_work(&vsock->tx_work);
flush_work(&vsock->event_work);
......
......@@ -11,9 +11,6 @@
#include <linux/sched/signal.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/virtio_vsock.h>
#include <uapi/linux/vsockmon.h>
......
......@@ -648,7 +648,7 @@ static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
static bool vmci_transport_stream_allow(u32 cid, u32 port)
{
static const u32 non_socket_contexts[] = {
VMADDR_CID_RESERVED,
VMADDR_CID_LOCAL,
};
int i;
......
// SPDX-License-Identifier: GPL-2.0-only
/* loopback transport for vsock using virtio_transport_common APIs
*
* Copyright (C) 2013-2019 Red Hat, Inc.
* Authors: Asias He <asias@redhat.com>
* Stefan Hajnoczi <stefanha@redhat.com>
* Stefano Garzarella <sgarzare@redhat.com>
*
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/virtio_vsock.h>
struct vsock_loopback {
struct workqueue_struct *workqueue;
spinlock_t pkt_list_lock; /* protects pkt_list */
struct list_head pkt_list;
struct work_struct pkt_work;
};
static struct vsock_loopback the_vsock_loopback;
static u32 vsock_loopback_get_local_cid(void)
{
return VMADDR_CID_LOCAL;
}
static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
int len = pkt->len;
spin_lock_bh(&vsock->pkt_list_lock);
list_add_tail(&pkt->list, &vsock->pkt_list);
spin_unlock_bh(&vsock->pkt_list_lock);
queue_work(vsock->workqueue, &vsock->pkt_work);
return len;
}
static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
struct virtio_vsock_pkt *pkt, *n;
LIST_HEAD(freeme);
spin_lock_bh(&vsock->pkt_list_lock);
list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) {
if (pkt->vsk != vsk)
continue;
list_move(&pkt->list, &freeme);
}
spin_unlock_bh(&vsock->pkt_list_lock);
list_for_each_entry_safe(pkt, n, &freeme, list) {
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
return 0;
}
static struct virtio_transport loopback_transport = {
.transport = {
.module = THIS_MODULE,
.get_local_cid = vsock_loopback_get_local_cid,
.init = virtio_transport_do_socket_init,
.destruct = virtio_transport_destruct,
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
.cancel_pkt = vsock_loopback_cancel_pkt,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_dequeue = virtio_transport_dgram_dequeue,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_allow = virtio_transport_dgram_allow,
.stream_dequeue = virtio_transport_stream_dequeue,
.stream_enqueue = virtio_transport_stream_enqueue,
.stream_has_data = virtio_transport_stream_has_data,
.stream_has_space = virtio_transport_stream_has_space,
.stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
.notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
.notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
.notify_send_init = virtio_transport_notify_send_init,
.notify_send_pre_block = virtio_transport_notify_send_pre_block,
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size,
},
.send_pkt = vsock_loopback_send_pkt,
};
static void vsock_loopback_work(struct work_struct *work)
{
struct vsock_loopback *vsock =
container_of(work, struct vsock_loopback, pkt_work);
LIST_HEAD(pkts);
spin_lock_bh(&vsock->pkt_list_lock);
list_splice_init(&vsock->pkt_list, &pkts);
spin_unlock_bh(&vsock->pkt_list_lock);
while (!list_empty(&pkts)) {
struct virtio_vsock_pkt *pkt;
pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
list_del_init(&pkt->list);
virtio_transport_deliver_tap_pkt(pkt);
virtio_transport_recv_pkt(&loopback_transport, pkt);
}
}
static int __init vsock_loopback_init(void)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
int ret;
vsock->workqueue = alloc_workqueue("vsock-loopback", 0, 0);
if (!vsock->workqueue)
return -ENOMEM;
spin_lock_init(&vsock->pkt_list_lock);
INIT_LIST_HEAD(&vsock->pkt_list);
INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
ret = vsock_core_register(&loopback_transport.transport,
VSOCK_TRANSPORT_F_LOCAL);
if (ret)
goto out_wq;
return 0;
out_wq:
destroy_workqueue(vsock->workqueue);
return ret;
}
static void __exit vsock_loopback_exit(void)
{
struct vsock_loopback *vsock = &the_vsock_loopback;
struct virtio_vsock_pkt *pkt;
vsock_core_unregister(&loopback_transport.transport);
flush_work(&vsock->pkt_work);
spin_lock_bh(&vsock->pkt_list_lock);
while (!list_empty(&vsock->pkt_list)) {
pkt = list_first_entry(&vsock->pkt_list,
struct virtio_vsock_pkt, list);
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
spin_unlock_bh(&vsock->pkt_list_lock);
destroy_workqueue(vsock->workqueue);
}
module_init(vsock_loopback_init);
module_exit(vsock_loopback_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stefano Garzarella <sgarzare@redhat.com>");
MODULE_DESCRIPTION("loopback transport for vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment