Commit 4682a035 authored by David Miller's avatar David Miller Committed by David S. Miller

netlink: Always copy on mmap TX.

Checking the file f_count and the nlk->mapped count is not completely
sufficient to prevent the mmap'd area contents from changing from
under us during netlink mmap sendmsg() operations.

Be careful to sample the header's length field only once, because this
could change from under us as well.

Fixes: 5fd96123 ("netlink: implement memory mapped sendmsg()")
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Acked-by: default avatarThomas Graf <tgraf@suug.ch>
parent 65891fea
...@@ -525,14 +525,14 @@ static int netlink_mmap(struct file *file, struct socket *sock, ...@@ -525,14 +525,14 @@ static int netlink_mmap(struct file *file, struct socket *sock,
return err; return err;
} }
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
{ {
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
struct page *p_start, *p_end; struct page *p_start, *p_end;
/* First page is flushed through netlink_{get,set}_status */ /* First page is flushed through netlink_{get,set}_status */
p_start = pgvec_to_page(hdr + PAGE_SIZE); p_start = pgvec_to_page(hdr + PAGE_SIZE);
p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1); p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
while (p_start <= p_end) { while (p_start <= p_end) {
flush_dcache_page(p_start); flush_dcache_page(p_start);
p_start++; p_start++;
...@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, ...@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
struct nl_mmap_hdr *hdr; struct nl_mmap_hdr *hdr;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int maxlen; unsigned int maxlen;
bool excl = true;
int err = 0, len = 0; int err = 0, len = 0;
/* Netlink messages are validated by the receiver before processing.
* In order to avoid userspace changing the contents of the message
* after validation, the socket and the ring may only be used by a
* single process, otherwise we fall back to copying.
*/
if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
atomic_read(&nlk->mapped) > 1)
excl = false;
mutex_lock(&nlk->pg_vec_lock); mutex_lock(&nlk->pg_vec_lock);
ring = &nlk->tx_ring; ring = &nlk->tx_ring;
maxlen = ring->frame_size - NL_MMAP_HDRLEN; maxlen = ring->frame_size - NL_MMAP_HDRLEN;
do { do {
unsigned int nm_len;
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID); hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
if (hdr == NULL) { if (hdr == NULL) {
if (!(msg->msg_flags & MSG_DONTWAIT) && if (!(msg->msg_flags & MSG_DONTWAIT) &&
...@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, ...@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
schedule(); schedule();
continue; continue;
} }
if (hdr->nm_len > maxlen) {
nm_len = ACCESS_ONCE(hdr->nm_len);
if (nm_len > maxlen) {
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
netlink_frame_flush_dcache(hdr); netlink_frame_flush_dcache(hdr, nm_len);
if (likely(dst_portid == 0 && dst_group == 0 && excl)) { skb = alloc_skb(nm_len, GFP_KERNEL);
skb = alloc_skb_head(GFP_KERNEL);
if (skb == NULL) {
err = -ENOBUFS;
goto out;
}
sock_hold(sk);
netlink_ring_setup_skb(skb, sk, ring, hdr);
NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
__skb_put(skb, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
} else {
skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
if (skb == NULL) { if (skb == NULL) {
err = -ENOBUFS; err = -ENOBUFS;
goto out; goto out;
} }
__skb_put(skb, hdr->nm_len); __skb_put(skb, nm_len);
memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len); memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED); netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
}
netlink_increment_head(ring); netlink_increment_head(ring);
...@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb) ...@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
hdr->nm_pid = NETLINK_CB(skb).creds.pid; hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
netlink_frame_flush_dcache(hdr); netlink_frame_flush_dcache(hdr, hdr->nm_len);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID); netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED; NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment