Commit 7c68fa2b authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: annotate lockless accesses to sk->sk_pacing_shift

sk->sk_pacing_shift can be read and written without lock
synchronization. This patch adds annotations to
document this fact and avoid future syzbot complains.

This might also avoid unexpected false sharing
in sk_pacing_shift_update(), as the compiler
could remove the conditional check and always
write over sk->sk_pacing_shift :

if (sk->sk_pacing_shift != val)
	sk->sk_pacing_shift = val;
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cad46039
...@@ -2588,9 +2588,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) ...@@ -2588,9 +2588,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
*/ */
static inline void sk_pacing_shift_update(struct sock *sk, int val) static inline void sk_pacing_shift_update(struct sock *sk, int val)
{ {
if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
return; return;
sk->sk_pacing_shift = val; WRITE_ONCE(sk->sk_pacing_shift, val);
} }
/* if a socket is bound to a device, check that the given device /* if a socket is bound to a device, check that the given device
......
...@@ -2916,7 +2916,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -2916,7 +2916,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_max_pacing_rate = ~0UL; sk->sk_max_pacing_rate = ~0UL;
sk->sk_pacing_rate = ~0UL; sk->sk_pacing_rate = ~0UL;
sk->sk_pacing_shift = 10; WRITE_ONCE(sk->sk_pacing_shift, 10);
sk->sk_incoming_cpu = -1; sk->sk_incoming_cpu = -1;
sk_rx_queue_clear(sk); sk_rx_queue_clear(sk);
......
...@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk) ...@@ -306,7 +306,8 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
/* Sort of tcp_tso_autosize() but ignoring /* Sort of tcp_tso_autosize() but ignoring
* driver provided sk_gso_max_size. * driver provided sk_gso_max_size.
*/ */
bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift, bytes = min_t(unsigned long,
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
......
...@@ -1725,7 +1725,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, ...@@ -1725,7 +1725,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
u32 bytes, segs; u32 bytes, segs;
bytes = min_t(unsigned long, bytes = min_t(unsigned long,
sk->sk_pacing_rate >> sk->sk_pacing_shift, sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms, /* Goal is to send at least one packet per ms,
...@@ -2260,7 +2260,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, ...@@ -2260,7 +2260,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit = max_t(unsigned long, limit = max_t(unsigned long,
2 * skb->truesize, 2 * skb->truesize,
sk->sk_pacing_rate >> sk->sk_pacing_shift); sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
if (sk->sk_pacing_status == SK_PACING_NONE) if (sk->sk_pacing_status == SK_PACING_NONE)
limit = min_t(unsigned long, limit, limit = min_t(unsigned long, limit,
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment