Commit 3c118547 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by David S. Miller

u64_stats: Disable preemption on 32bit UP+SMP PREEMPT_RT during updates.

On PREEMPT_RT the seqcount_t for synchronisation is required on 32bit
architectures even on UP because the softirq (and the threaded IRQ handler) can
be preempted.

With the seqcount_t for synchronisation, a reader with higher priority can
preempt the writer and then spin endlessly in read_seqcount_begin() while the
writer can't make progress.

To avoid such a lock up on PREEMPT_RT the writer must disable preemption during
the update. There is no need to disable interrupts because no writer is using
this API in hard-IRQ context on PREEMPT_RT.

Disable preemption on 32bit-RT within the u64_stats write section.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d147dd70
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
#include <linux/seqlock.h> #include <linux/seqlock.h>
struct u64_stats_sync { struct u64_stats_sync {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
seqcount_t seq; seqcount_t seq;
#endif #endif
}; };
...@@ -125,7 +125,7 @@ static inline void u64_stats_inc(u64_stats_t *p) ...@@ -125,7 +125,7 @@ static inline void u64_stats_inc(u64_stats_t *p)
} }
#endif #endif
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
#else #else
static inline void u64_stats_init(struct u64_stats_sync *syncp) static inline void u64_stats_init(struct u64_stats_sync *syncp)
...@@ -135,15 +135,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp) ...@@ -135,15 +135,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp)
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
write_seqcount_begin(&syncp->seq); write_seqcount_begin(&syncp->seq);
#endif #endif
} }
static inline void u64_stats_update_end(struct u64_stats_sync *syncp) static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
write_seqcount_end(&syncp->seq); write_seqcount_end(&syncp->seq);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
#endif #endif
} }
...@@ -152,7 +156,10 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) ...@@ -152,7 +156,10 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
{ {
unsigned long flags = 0; unsigned long flags = 0;
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
else
local_irq_save(flags); local_irq_save(flags);
write_seqcount_begin(&syncp->seq); write_seqcount_begin(&syncp->seq);
#endif #endif
...@@ -163,15 +170,18 @@ static inline void ...@@ -163,15 +170,18 @@ static inline void
u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
unsigned long flags) unsigned long flags)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
write_seqcount_end(&syncp->seq); write_seqcount_end(&syncp->seq);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
else
local_irq_restore(flags); local_irq_restore(flags);
#endif #endif
} }
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
return read_seqcount_begin(&syncp->seq); return read_seqcount_begin(&syncp->seq);
#else #else
return 0; return 0;
...@@ -180,7 +190,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync * ...@@ -180,7 +190,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
preempt_disable(); preempt_disable();
#endif #endif
return __u64_stats_fetch_begin(syncp); return __u64_stats_fetch_begin(syncp);
...@@ -189,7 +199,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy ...@@ -189,7 +199,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy
static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
return read_seqcount_retry(&syncp->seq, start); return read_seqcount_retry(&syncp->seq, start);
#else #else
return false; return false;
...@@ -199,7 +209,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, ...@@ -199,7 +209,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
preempt_enable(); preempt_enable();
#endif #endif
return __u64_stats_fetch_retry(syncp, start); return __u64_stats_fetch_retry(syncp, start);
...@@ -213,7 +223,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, ...@@ -213,7 +223,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
*/ */
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{ {
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
preempt_disable();
#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
local_irq_disable(); local_irq_disable();
#endif #endif
return __u64_stats_fetch_begin(syncp); return __u64_stats_fetch_begin(syncp);
...@@ -222,7 +234,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync ...@@ -222,7 +234,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start) unsigned int start)
{ {
#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) #if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
preempt_enable();
#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
local_irq_enable(); local_irq_enable();
#endif #endif
return __u64_stats_fetch_retry(syncp, start); return __u64_stats_fetch_retry(syncp, start);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment