Commit c94683ed authored by David S. Miller's avatar David S. Miller

Merge branch 'wireguard-fixes'

Jason A. Donenfeld says:

====================
wireguard fixes for 6.4.2/6.5-rc1

Sorry to send these patches during the merge window, but they're net
fixes, not netdev enhancements, and while I'd ordinarily wait anyway,
I just got a first bug report for one of these fixes, which I originally
had thought was mostly unlikely. So please apply the following three
patches to net:

1) Make proper use of nr_cpu_ids with cpumask_next(), rather than
   awkwardly using modulo, to handle dynamic CPU topology changes.
   Linus noticed this a while ago and pointed it out, and today a user
   actually got hit by it.

2) Respect persistent keepalive and other staged packets when setting
   the private key after the interface is already up.

3) Use timer_delete_sync() instead of del_timer_sync(), per the
   documentation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a27ac539 326534e8
......@@ -546,6 +546,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
u8 public_key[NOISE_PUBLIC_KEY_LEN];
struct wg_peer *peer, *temp;
bool send_staged_packets;
if (!crypto_memneq(wg->static_identity.static_private,
private_key, NOISE_PUBLIC_KEY_LEN))
......@@ -564,14 +565,17 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
}
down_write(&wg->static_identity.lock);
wg_noise_set_static_identity_private_key(&wg->static_identity,
private_key);
list_for_each_entry_safe(peer, temp, &wg->peer_list,
peer_list) {
send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev);
wg_noise_set_static_identity_private_key(&wg->static_identity, private_key);
send_staged_packets = send_staged_packets && wg->static_identity.has_identity;
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
wg_noise_precompute_static_static(peer);
wg_noise_expire_current_peer_keypairs(peer);
if (send_staged_packets)
wg_packet_send_staged_packets(peer);
}
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
up_write(&wg->static_identity.lock);
}
skip_set_private_key:
......
......@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
int ret;
memset(queue, 0, sizeof(*queue));
queue->last_cpu = -1;
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
......
......@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
return cpu;
}
/* This function is racy, in the sense that next is unlocked, so it could return
* the same CPU twice. A race-free version of this would be to instead store an
* atomic sequence number, do an increment-and-return, and then iterate through
* every possible CPU until we get to that index -- choose_cpu. However that's
* a bit slower, and it doesn't seem like this potential race actually
* introduces any performance loss, so we live with it.
/* This function is racy, in the sense that it's called while last_cpu is
* unlocked, so it could return the same CPU twice. Adding locking or using
* atomic sequence numbers is slower though, and the consequences of racing are
* harmless, so live with it.
*/
static inline int wg_cpumask_next_online(int *next)
static inline int wg_cpumask_next_online(int *last_cpu)
{
int cpu = *next;
while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
int cpu = cpumask_next(*last_cpu, cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
*last_cpu = cpu;
return cpu;
}
......@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
static inline int wg_queue_enqueue_per_device_and_peer(
struct crypt_queue *device_queue, struct prev_queue *peer_queue,
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
struct sk_buff *skb, struct workqueue_struct *wq)
{
int cpu;
......@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
cpu = wg_cpumask_next_online(next_cpu);
cpu = wg_cpumask_next_online(&device_queue->last_cpu);
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
return -EPIPE;
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
......
......@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
......
......@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err:
......
......@@ -234,10 +234,10 @@ void wg_timers_init(struct wg_peer *peer)
void wg_timers_stop(struct wg_peer *peer)
{
del_timer_sync(&peer->timer_retransmit_handshake);
del_timer_sync(&peer->timer_send_keepalive);
del_timer_sync(&peer->timer_new_handshake);
del_timer_sync(&peer->timer_zero_key_material);
del_timer_sync(&peer->timer_persistent_keepalive);
timer_delete_sync(&peer->timer_retransmit_handshake);
timer_delete_sync(&peer->timer_send_keepalive);
timer_delete_sync(&peer->timer_new_handshake);
timer_delete_sync(&peer->timer_zero_key_material);
timer_delete_sync(&peer->timer_persistent_keepalive);
flush_work(&peer->clear_peer_work);
}
......@@ -514,10 +514,32 @@ n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
n1 ping -W 1 -c 1 192.168.241.2
[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]]
ip1 link del veth1
ip1 link del veth3
ip1 link del wg0
ip2 link del wg0
ip1 link del dev veth3
ip1 link del dev wg0
ip2 link del dev wg0
# Make sure persistent keep alives are sent when an adapter comes up
ip1 link add dev wg0 type wireguard
n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
read _ _ tx_bytes < <(n1 wg show wg0 transfer)
[[ $tx_bytes -eq 0 ]]
ip1 link set dev wg0 up
read _ _ tx_bytes < <(n1 wg show wg0 transfer)
[[ $tx_bytes -gt 0 ]]
ip1 link del dev wg0
# This should also happen even if the private key is set later
ip1 link add dev wg0 type wireguard
n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
read _ _ tx_bytes < <(n1 wg show wg0 transfer)
[[ $tx_bytes -eq 0 ]]
ip1 link set dev wg0 up
read _ _ tx_bytes < <(n1 wg show wg0 transfer)
[[ $tx_bytes -eq 0 ]]
n1 wg set wg0 private-key <(echo "$key1")
read _ _ tx_bytes < <(n1 wg show wg0 transfer)
[[ $tx_bytes -gt 0 ]]
ip1 link del dev veth1
ip1 link del dev wg0
# We test that Netlink/IPC is working properly by doing things that usually cause split responses
ip0 link add dev wg0 type wireguard
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment