Commit 0bd3a173 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/preempt, locking: Rework local_bh_{dis,en}able()

Currently local_bh_disable() is out-of-line for no apparent reason.
So inline it to save a few cycles on call/return nonsense, the
function body is a single add on x86 (a few loads and store extra on
load/store archs).

Also expose two new local_bh functions:

  __local_bh_{dis,en}able_ip(unsigned long ip, unsigned int cnt);

Which implement the actual local_bh_{dis,en}able() behaviour.

The next patch uses the exposed @cnt argument to optimize bh lock
functions.

With build fixes from Jacob Pan.

Cc: rjw@rjwysocki.net
Cc: rui.zhang@intel.com
Cc: jacob.jun.pan@linux.intel.com
Cc: Mike Galbraith <bitbucket@online.de>
Cc: hpa@zytor.com
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: lenb@kernel.org
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20131119151338.GF3694@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 10b033d4
#ifndef _LINUX_BH_H #ifndef _LINUX_BH_H
#define _LINUX_BH_H #define _LINUX_BH_H
extern void local_bh_disable(void); #include <linux/preempt.h>
#include <linux/preempt_mask.h>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
preempt_count_add(cnt);
barrier();
}
#endif
static inline void local_bh_disable(void)
{
__local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
extern void _local_bh_enable(void); extern void _local_bh_enable(void);
extern void local_bh_enable(void); extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
extern void local_bh_enable_ip(unsigned long ip);
static inline void local_bh_enable_ip(unsigned long ip)
{
__local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
}
static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
#endif /* _LINUX_BH_H */ #endif /* _LINUX_BH_H */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/ftrace_irq.h> #include <linux/ftrace_irq.h>
#include <linux/vtime.h> #include <linux/vtime.h>
#include <asm/hardirq.h>
extern void synchronize_irq(unsigned int irq); extern void synchronize_irq(unsigned int irq);
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define LINUX_PREEMPT_MASK_H #define LINUX_PREEMPT_MASK_H
#include <linux/preempt.h> #include <linux/preempt.h>
#include <asm/hardirq.h>
/* /*
* We put the hardirq and softirq counter into the preemption * We put the hardirq and softirq counter into the preemption
......
...@@ -89,7 +89,7 @@ static void wakeup_softirqd(void) ...@@ -89,7 +89,7 @@ static void wakeup_softirqd(void)
* where hardirqs are disabled legitimately: * where hardirqs are disabled legitimately:
*/ */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
static void __local_bh_disable(unsigned long ip, unsigned int cnt) void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{ {
unsigned long flags; unsigned long flags;
...@@ -114,21 +114,9 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt) ...@@ -114,21 +114,9 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
if (preempt_count() == cnt) if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
} }
#else /* !CONFIG_TRACE_IRQFLAGS */ EXPORT_SYMBOL(__local_bh_disable_ip);
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
preempt_count_add(cnt);
barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
void local_bh_disable(void)
{
__local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
}
EXPORT_SYMBOL(local_bh_disable);
static void __local_bh_enable(unsigned int cnt) static void __local_bh_enable(unsigned int cnt)
{ {
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
...@@ -151,7 +139,7 @@ void _local_bh_enable(void) ...@@ -151,7 +139,7 @@ void _local_bh_enable(void)
EXPORT_SYMBOL(_local_bh_enable); EXPORT_SYMBOL(_local_bh_enable);
static inline void _local_bh_enable_ip(unsigned long ip) void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{ {
WARN_ON_ONCE(in_irq() || irqs_disabled()); WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
...@@ -166,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) ...@@ -166,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
* Keep preemption disabled until we are done with * Keep preemption disabled until we are done with
* softirq processing: * softirq processing:
*/ */
preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) { if (unlikely(!in_interrupt() && local_softirq_pending())) {
/* /*
...@@ -182,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip) ...@@ -182,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
#endif #endif
preempt_check_resched(); preempt_check_resched();
} }
EXPORT_SYMBOL(__local_bh_enable_ip);
void local_bh_enable(void)
{
_local_bh_enable_ip(_RET_IP_);
}
EXPORT_SYMBOL(local_bh_enable);
void local_bh_enable_ip(unsigned long ip)
{
_local_bh_enable_ip(ip);
}
EXPORT_SYMBOL(local_bh_enable_ip);
/* /*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
...@@ -230,7 +207,7 @@ asmlinkage void __do_softirq(void) ...@@ -230,7 +207,7 @@ asmlinkage void __do_softirq(void)
pending = local_softirq_pending(); pending = local_softirq_pending();
account_irq_enter_time(current); account_irq_enter_time(current);
__local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
lockdep_softirq_enter(); lockdep_softirq_enter();
cpu = smp_processor_id(); cpu = smp_processor_id();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment