Commit 62f43b58 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Greg Kroah-Hartman

netlink, mmap: transform mmap skb into full skb on taps

[ Upstream commit 1853c949 ]

Ken-ichirou reported that running netlink in mmap mode for receive in
combination with nlmon will throw a NULL pointer dereference in
__kfree_skb() on nlmon_xmit(), in my case I can also trigger an "unable
to handle kernel paging request". The problem is the skb_clone() in
__netlink_deliver_tap_skb() for skbs that are mmaped.

I.e. the cloned skb doesn't have a destructor, whereas the mmap netlink
skb has it pointed to netlink_skb_destructor(), set in the handler
netlink_ring_setup_skb(). There, skb->head is being set to NULL, so
that in such cases, __kfree_skb() doesn't perform a skb_release_data()
via skb_release_all(), where skb->head is possibly being freed through
kfree(head) into slab allocator, although netlink mmap skb->head points
to the mmap buffer. Similarly, the same has to be done also for large
netlink skbs where the data area is vmalloced. Therefore, as discussed,
make a copy for these rather rare cases for now. This fixes the issue
on my and Ken-ichirou's test-cases.

Reference: http://thread.gmane.org/gmane.linux.network/371129
Fixes: bcbde0d4 ("net: netlink: virtual tap device management")
Reported-by: default avatarKen-ichirou MATSUZAWA <chamaken@gmail.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Tested-by: default avatarKen-ichirou MATSUZAWA <chamaken@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 12e082bc
...@@ -124,6 +124,24 @@ static inline u32 netlink_group_mask(u32 group) ...@@ -124,6 +124,24 @@ static inline u32 netlink_group_mask(u32 group)
return group ? 1 << (group - 1) : 0; return group ? 1 << (group - 1) : 0;
} }
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
unsigned int len = skb_end_offset(skb);
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);
if (new == NULL)
return NULL;
NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
memcpy(skb_put(new, len), skb->data, len);
return new;
}
int netlink_add_tap(struct netlink_tap *nt) int netlink_add_tap(struct netlink_tap *nt)
{ {
if (unlikely(nt->dev->type != ARPHRD_NETLINK)) if (unlikely(nt->dev->type != ARPHRD_NETLINK))
...@@ -205,7 +223,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, ...@@ -205,7 +223,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
int ret = -ENOMEM; int ret = -ENOMEM;
dev_hold(dev); dev_hold(dev);
nskb = skb_clone(skb, GFP_ATOMIC);
if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) { if (nskb) {
nskb->dev = dev; nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol); nskb->protocol = htons((u16) sk->sk_protocol);
...@@ -278,11 +300,6 @@ static void netlink_rcv_wake(struct sock *sk) ...@@ -278,11 +300,6 @@ static void netlink_rcv_wake(struct sock *sk)
} }
#ifdef CONFIG_NETLINK_MMAP #ifdef CONFIG_NETLINK_MMAP
static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
{
return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
}
static bool netlink_rx_is_mmaped(struct sock *sk) static bool netlink_rx_is_mmaped(struct sock *sk)
{ {
return nlk_sk(sk)->rx_ring.pg_vec != NULL; return nlk_sk(sk)->rx_ring.pg_vec != NULL;
...@@ -834,7 +851,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) ...@@ -834,7 +851,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
} }
#else /* CONFIG_NETLINK_MMAP */ #else /* CONFIG_NETLINK_MMAP */
#define netlink_skb_is_mmaped(skb) false
#define netlink_rx_is_mmaped(sk) false #define netlink_rx_is_mmaped(sk) false
#define netlink_tx_is_mmaped(sk) false #define netlink_tx_is_mmaped(sk) false
#define netlink_mmap sock_no_mmap #define netlink_mmap sock_no_mmap
......
...@@ -59,6 +59,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) ...@@ -59,6 +59,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
return container_of(sk, struct netlink_sock, sk); return container_of(sk, struct netlink_sock, sk);
} }
static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
{
#ifdef CONFIG_NETLINK_MMAP
return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
#else
return false;
#endif /* CONFIG_NETLINK_MMAP */
}
struct netlink_table { struct netlink_table {
struct rhashtable hash; struct rhashtable hash;
struct hlist_head mc_list; struct hlist_head mc_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment