Commit 46cc6e49 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: fix lockdep splat in tcp_snd_una_update()

tcp_snd_una_update() and tcp_rcv_nxt_update() call
u64_stats_update_begin() either from process context or BH handler.

This triggers a lockdep splat on 32bit & SMP builds.

We could add u64_stats_update_begin_bh() variant but this would
slow down 32bit builds with useless local_disable_bh() and
local_enable_bh() pairs, since we own the socket lock at this point.

I add sock_owned_by_me() helper to have proper lockdep support
even on 64bit builds, and new u64_stats_update_begin_raw()
and u64_stats_update_end_raw methods.

Fixes: c10d9310 ("tcp: do not assume TCP code is non preemptible")
Reported-by: default avatarFabio Estevam <festevam@gmail.com>
Diagnosed-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Tested-by: default avatarFabio Estevam <fabio.estevam@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5332174a
...@@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) ...@@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif #endif
} }
static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
raw_write_seqcount_begin(&syncp->seq);
#endif
}
static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
raw_write_seqcount_end(&syncp->seq);
#endif
}
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
......
...@@ -1421,11 +1421,16 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) ...@@ -1421,11 +1421,16 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
* accesses from user process context. * accesses from user process context.
*/ */
static inline bool sock_owned_by_user(const struct sock *sk) static inline void sock_owned_by_me(const struct sock *sk)
{ {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
#endif #endif
}
static inline bool sock_owned_by_user(const struct sock *sk)
{
sock_owned_by_me(sk);
return sk->sk_lock.owned; return sk->sk_lock.owned;
} }
......
...@@ -3355,9 +3355,10 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) ...@@ -3355,9 +3355,10 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
{ {
u32 delta = ack - tp->snd_una; u32 delta = ack - tp->snd_una;
u64_stats_update_begin(&tp->syncp); sock_owned_by_me((struct sock *)tp);
u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_acked += delta; tp->bytes_acked += delta;
u64_stats_update_end(&tp->syncp); u64_stats_update_end_raw(&tp->syncp);
tp->snd_una = ack; tp->snd_una = ack;
} }
...@@ -3366,9 +3367,10 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) ...@@ -3366,9 +3367,10 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
{ {
u32 delta = seq - tp->rcv_nxt; u32 delta = seq - tp->rcv_nxt;
u64_stats_update_begin(&tp->syncp); sock_owned_by_me((struct sock *)tp);
u64_stats_update_begin_raw(&tp->syncp);
tp->bytes_received += delta; tp->bytes_received += delta;
u64_stats_update_end(&tp->syncp); u64_stats_update_end_raw(&tp->syncp);
tp->rcv_nxt = seq; tp->rcv_nxt = seq;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment