Commit 634f1a71 authored by Bobby Eshleman's avatar Bobby Eshleman Committed by David S. Miller

vsock: support sockmap

This patch adds sockmap support for vsock sockets. It is intended to be
usable by all transports, but only the virtio and loopback transports
are implemented.

SOCK_STREAM, SOCK_DGRAM, and SOCK_SEQPACKET are all supported.
Signed-off-by: default avatarBobby Eshleman <bobby.eshleman@bytedance.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarStefano Garzarella <sgarzare@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 24265c2c
...@@ -439,6 +439,7 @@ static struct virtio_transport vhost_transport = { ...@@ -439,6 +439,7 @@ static struct virtio_transport vhost_transport = {
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size, .notify_buffer_size = virtio_transport_notify_buffer_size,
.read_skb = virtio_transport_read_skb,
}, },
.send_pkt = vhost_transport_send_pkt, .send_pkt = vhost_transport_send_pkt,
......
...@@ -245,4 +245,5 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); ...@@ -245,4 +245,5 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
#endif /* _LINUX_VIRTIO_VSOCK_H */ #endif /* _LINUX_VIRTIO_VSOCK_H */
...@@ -75,6 +75,7 @@ struct vsock_sock { ...@@ -75,6 +75,7 @@ struct vsock_sock {
void *trans; void *trans;
}; };
s64 vsock_connectible_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_data(struct vsock_sock *vsk); s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk); s64 vsock_stream_has_space(struct vsock_sock *vsk);
struct sock *vsock_create_connected(struct sock *parent); struct sock *vsock_create_connected(struct sock *parent);
...@@ -173,6 +174,9 @@ struct vsock_transport { ...@@ -173,6 +174,9 @@ struct vsock_transport {
/* Addressing. */ /* Addressing. */
u32 (*get_local_cid)(void); u32 (*get_local_cid)(void);
/* Read a single skb */
int (*read_skb)(struct vsock_sock *, skb_read_actor_t);
}; };
/**** CORE ****/ /**** CORE ****/
...@@ -225,5 +229,18 @@ int vsock_init_tap(void); ...@@ -225,5 +229,18 @@ int vsock_init_tap(void);
int vsock_add_tap(struct vsock_tap *vt); int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt); int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque); void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
#ifdef CONFIG_BPF_SYSCALL
extern struct proto vsock_proto;
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void __init vsock_bpf_build_proto(void);
#else
static inline void __init vsock_bpf_build_proto(void)
{}
#endif
#endif /* __AF_VSOCK_H__ */ #endif /* __AF_VSOCK_H__ */
...@@ -8,6 +8,7 @@ obj-$(CONFIG_HYPERV_VSOCKETS) += hv_sock.o ...@@ -8,6 +8,7 @@ obj-$(CONFIG_HYPERV_VSOCKETS) += hv_sock.o
obj-$(CONFIG_VSOCKETS_LOOPBACK) += vsock_loopback.o obj-$(CONFIG_VSOCKETS_LOOPBACK) += vsock_loopback.o
vsock-y += af_vsock.o af_vsock_tap.o vsock_addr.o vsock-y += af_vsock.o af_vsock_tap.o vsock_addr.o
vsock-$(CONFIG_BPF_SYSCALL) += vsock_bpf.o
vsock_diag-y += diag.o vsock_diag-y += diag.o
......
...@@ -116,10 +116,13 @@ static void vsock_sk_destruct(struct sock *sk); ...@@ -116,10 +116,13 @@ static void vsock_sk_destruct(struct sock *sk);
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
/* Protocol family. */ /* Protocol family. */
static struct proto vsock_proto = { struct proto vsock_proto = {
.name = "AF_VSOCK", .name = "AF_VSOCK",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.obj_size = sizeof(struct vsock_sock), .obj_size = sizeof(struct vsock_sock),
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = vsock_bpf_update_proto,
#endif
}; };
/* The default peer timeout indicates how long we will wait for a peer response /* The default peer timeout indicates how long we will wait for a peer response
...@@ -865,7 +868,7 @@ s64 vsock_stream_has_data(struct vsock_sock *vsk) ...@@ -865,7 +868,7 @@ s64 vsock_stream_has_data(struct vsock_sock *vsk)
} }
EXPORT_SYMBOL_GPL(vsock_stream_has_data); EXPORT_SYMBOL_GPL(vsock_stream_has_data);
static s64 vsock_connectible_has_data(struct vsock_sock *vsk) s64 vsock_connectible_has_data(struct vsock_sock *vsk)
{ {
struct sock *sk = sk_vsock(vsk); struct sock *sk = sk_vsock(vsk);
...@@ -874,6 +877,7 @@ static s64 vsock_connectible_has_data(struct vsock_sock *vsk) ...@@ -874,6 +877,7 @@ static s64 vsock_connectible_has_data(struct vsock_sock *vsk)
else else
return vsock_stream_has_data(vsk); return vsock_stream_has_data(vsk);
} }
EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk) s64 vsock_stream_has_space(struct vsock_sock *vsk)
{ {
...@@ -1131,6 +1135,13 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, ...@@ -1131,6 +1135,13 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
return mask; return mask;
} }
static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
{
struct vsock_sock *vsk = vsock_sk(sk);
return vsk->transport->read_skb(vsk, read_actor);
}
static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len) size_t len)
{ {
...@@ -1242,18 +1253,42 @@ static int vsock_dgram_connect(struct socket *sock, ...@@ -1242,18 +1253,42 @@ static int vsock_dgram_connect(struct socket *sock,
memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
sock->state = SS_CONNECTED; sock->state = SS_CONNECTED;
/* sock map disallows redirection of non-TCP sockets with sk_state !=
* TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set
* TCP_ESTABLISHED here to allow redirection of connected vsock dgrams.
*
* This doesn't seem to be abnormal state for datagram sockets, as the
* same approach can be see in other datagram socket types as well
* (such as unix sockets).
*/
sk->sk_state = TCP_ESTABLISHED;
out: out:
release_sock(sk); release_sock(sk);
return err; return err;
} }
static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags) size_t len, int flags)
{ {
struct vsock_sock *vsk = vsock_sk(sock->sk); #ifdef CONFIG_BPF_SYSCALL
const struct proto *prot;
#endif
struct vsock_sock *vsk;
struct sock *sk;
sk = sock->sk;
vsk = vsock_sk(sk);
#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto)
return prot->recvmsg(sk, msg, len, flags, NULL);
#endif
return vsk->transport->dgram_dequeue(vsk, msg, len, flags); return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
} }
EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
static const struct proto_ops vsock_dgram_ops = { static const struct proto_ops vsock_dgram_ops = {
.family = PF_VSOCK, .family = PF_VSOCK,
...@@ -1272,6 +1307,7 @@ static const struct proto_ops vsock_dgram_ops = { ...@@ -1272,6 +1307,7 @@ static const struct proto_ops vsock_dgram_ops = {
.recvmsg = vsock_dgram_recvmsg, .recvmsg = vsock_dgram_recvmsg,
.mmap = sock_no_mmap, .mmap = sock_no_mmap,
.sendpage = sock_no_sendpage, .sendpage = sock_no_sendpage,
.read_skb = vsock_read_skb,
}; };
static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
...@@ -2086,13 +2122,16 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg, ...@@ -2086,13 +2122,16 @@ static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
return err; return err;
} }
static int int
vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags) int flags)
{ {
struct sock *sk; struct sock *sk;
struct vsock_sock *vsk; struct vsock_sock *vsk;
const struct vsock_transport *transport; const struct vsock_transport *transport;
#ifdef CONFIG_BPF_SYSCALL
const struct proto *prot;
#endif
int err; int err;
sk = sock->sk; sk = sock->sk;
...@@ -2139,6 +2178,14 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -2139,6 +2178,14 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto out; goto out;
} }
#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto) {
release_sock(sk);
return prot->recvmsg(sk, msg, len, flags, NULL);
}
#endif
if (sk->sk_type == SOCK_STREAM) if (sk->sk_type == SOCK_STREAM)
err = __vsock_stream_recvmsg(sk, msg, len, flags); err = __vsock_stream_recvmsg(sk, msg, len, flags);
else else
...@@ -2148,6 +2195,7 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ...@@ -2148,6 +2195,7 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
release_sock(sk); release_sock(sk);
return err; return err;
} }
EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
static int vsock_set_rcvlowat(struct sock *sk, int val) static int vsock_set_rcvlowat(struct sock *sk, int val)
{ {
...@@ -2188,6 +2236,7 @@ static const struct proto_ops vsock_stream_ops = { ...@@ -2188,6 +2236,7 @@ static const struct proto_ops vsock_stream_ops = {
.mmap = sock_no_mmap, .mmap = sock_no_mmap,
.sendpage = sock_no_sendpage, .sendpage = sock_no_sendpage,
.set_rcvlowat = vsock_set_rcvlowat, .set_rcvlowat = vsock_set_rcvlowat,
.read_skb = vsock_read_skb,
}; };
static const struct proto_ops vsock_seqpacket_ops = { static const struct proto_ops vsock_seqpacket_ops = {
...@@ -2209,6 +2258,7 @@ static const struct proto_ops vsock_seqpacket_ops = { ...@@ -2209,6 +2258,7 @@ static const struct proto_ops vsock_seqpacket_ops = {
.recvmsg = vsock_connectible_recvmsg, .recvmsg = vsock_connectible_recvmsg,
.mmap = sock_no_mmap, .mmap = sock_no_mmap,
.sendpage = sock_no_sendpage, .sendpage = sock_no_sendpage,
.read_skb = vsock_read_skb,
}; };
static int vsock_create(struct net *net, struct socket *sock, static int vsock_create(struct net *net, struct socket *sock,
...@@ -2348,6 +2398,8 @@ static int __init vsock_init(void) ...@@ -2348,6 +2398,8 @@ static int __init vsock_init(void)
goto err_unregister_proto; goto err_unregister_proto;
} }
vsock_bpf_build_proto();
return 0; return 0;
err_unregister_proto: err_unregister_proto:
......
...@@ -457,6 +457,8 @@ static struct virtio_transport virtio_transport = { ...@@ -457,6 +457,8 @@ static struct virtio_transport virtio_transport = {
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size, .notify_buffer_size = virtio_transport_notify_buffer_size,
.read_skb = virtio_transport_read_skb,
}, },
.send_pkt = virtio_transport_send_pkt, .send_pkt = virtio_transport_send_pkt,
......
...@@ -1418,6 +1418,31 @@ int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) ...@@ -1418,6 +1418,31 @@ int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue)
} }
EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_actor)
{
struct virtio_vsock_sock *vvs = vsk->trans;
struct sock *sk = sk_vsock(vsk);
struct sk_buff *skb;
int off = 0;
int copied;
int err;
spin_lock_bh(&vvs->rx_lock);
/* Use __skb_recv_datagram() for race-free handling of the receive. It
* works for types other than dgrams.
*/
skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
spin_unlock_bh(&vvs->rx_lock);
if (!skb)
return err;
copied = recv_actor(sk, skb);
kfree_skb(skb);
return copied;
}
EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He"); MODULE_AUTHOR("Asias He");
MODULE_DESCRIPTION("common code for virtio vsock"); MODULE_DESCRIPTION("common code for virtio vsock");
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Bobby Eshleman <bobby.eshleman@bytedance.com>
*
* Based off of net/unix/unix_bpf.c
*/
#include <linux/bpf.h>
#include <linux/module.h>
#include <linux/skmsg.h>
#include <linux/socket.h>
#include <linux/wait.h>
#include <net/af_vsock.h>
#include <net/sock.h>
#define vsock_sk_has_data(__sk, __psock) \
({ !skb_queue_empty(&(__sk)->sk_receive_queue) || \
!skb_queue_empty(&(__psock)->ingress_skb) || \
!list_empty(&(__psock)->ingress_msg); \
})
static struct proto *vsock_prot_saved __read_mostly;
static DEFINE_SPINLOCK(vsock_prot_lock);
static struct proto vsock_bpf_prot;
static bool vsock_has_data(struct sock *sk, struct sk_psock *psock)
{
struct vsock_sock *vsk = vsock_sk(sk);
s64 ret;
ret = vsock_connectible_has_data(vsk);
if (ret > 0)
return true;
return vsock_sk_has_data(sk, psock);
}
static bool vsock_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo)
{
bool ret;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk->sk_shutdown & RCV_SHUTDOWN)
return true;
if (!timeo)
return false;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
ret = vsock_has_data(sk, psock);
if (!ret) {
wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
ret = vsock_has_data(sk, psock);
}
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
return ret;
}
static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags)
{
struct socket *sock = sk->sk_socket;
int err;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
err = vsock_connectible_recvmsg(sock, msg, len, flags);
else if (sk->sk_type == SOCK_DGRAM)
err = vsock_dgram_recvmsg(sock, msg, len, flags);
else
err = -EPROTOTYPE;
return err;
}
static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct sk_psock *psock;
int copied;
psock = sk_psock_get(sk);
if (unlikely(!psock))
return __vsock_recvmsg(sk, msg, len, flags);
lock_sock(sk);
if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
return __vsock_recvmsg(sk, msg, len, flags);
}
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
while (copied == 0) {
long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
if (!vsock_msg_wait_data(sk, psock, timeo)) {
copied = -EAGAIN;
break;
}
if (sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
return __vsock_recvmsg(sk, msg, len, flags);
}
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
}
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
}
/* Copy of original proto with updated sock_map methods */
static struct proto vsock_bpf_prot = {
.close = sock_map_close,
.recvmsg = vsock_bpf_recvmsg,
.sock_is_readable = sk_msg_is_readable,
.unhash = sock_map_unhash,
};
static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
{
*prot = *base;
prot->close = sock_map_close;
prot->recvmsg = vsock_bpf_recvmsg;
prot->sock_is_readable = sk_msg_is_readable;
}
static void vsock_bpf_check_needs_rebuild(struct proto *ops)
{
/* Paired with the smp_store_release() below. */
if (unlikely(ops != smp_load_acquire(&vsock_prot_saved))) {
spin_lock_bh(&vsock_prot_lock);
if (likely(ops != vsock_prot_saved)) {
vsock_bpf_rebuild_protos(&vsock_bpf_prot, ops);
/* Make sure proto function pointers are updated before publishing the
* pointer to the struct.
*/
smp_store_release(&vsock_prot_saved, ops);
}
spin_unlock_bh(&vsock_prot_lock);
}
}
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
struct vsock_sock *vsk;
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}
vsk = vsock_sk(sk);
if (!vsk->transport)
return -ENODEV;
if (!vsk->transport->read_skb)
return -EOPNOTSUPP;
vsock_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &vsock_bpf_prot);
return 0;
}
void __init vsock_bpf_build_proto(void)
{
vsock_bpf_rebuild_protos(&vsock_bpf_prot, &vsock_proto);
}
...@@ -94,6 +94,8 @@ static struct virtio_transport loopback_transport = { ...@@ -94,6 +94,8 @@ static struct virtio_transport loopback_transport = {
.notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
.notify_buffer_size = virtio_transport_notify_buffer_size, .notify_buffer_size = virtio_transport_notify_buffer_size,
.read_skb = virtio_transport_read_skb,
}, },
.send_pkt = vsock_loopback_send_pkt, .send_pkt = vsock_loopback_send_pkt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment