Commit c360192b authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/preempt: move preempt_count to the lowcore

Convert s390 to use a field in the struct lowcore for the CPU
preemption count. It is a bit cheaper to access a lowcore field
compared to a thread_info variable and it removes the depencency
on a task related structure.

bloat-o-meter on the vmlinux image for the default configuration
(CONFIG_PREEMPT_NONE=y) reports a small reduction in text size:

add/remove: 0/0 grow/shrink: 18/578 up/down: 228/-5448 (-5220)

A larger improvement is achieved with the default configuration
but with CONFIG_PREEMPT=y and CONFIG_DEBUG_PREEMPT=n:

add/remove: 2/6 grow/shrink: 59/4477 up/down: 1618/-228762 (-227144)
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 1993dbc7
...@@ -126,7 +126,8 @@ struct lowcore { ...@@ -126,7 +126,8 @@ struct lowcore {
__u64 percpu_offset; /* 0x0378 */ __u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */ __u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */ __u64 machine_flags; /* 0x0388 */
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */ __u32 preempt_count; /* 0x0390 */
__u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
__u64 gmap; /* 0x0398 */ __u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */ __u32 spinlock_lockval; /* 0x03a0 */
__u32 fpu_flags; /* 0x03a4 */ __u32 fpu_flags; /* 0x03a4 */
......
#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
#include <asm/current.h>
#include <linux/thread_info.h>
#include <asm/atomic_ops.h>
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
static inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static inline void preempt_count_set(int pc)
{
int old, new;
do {
old = READ_ONCE(S390_lowcore.preempt_count);
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
old, new) != old);
}
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
{
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
static inline void clear_preempt_need_resched(void)
{
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
}
static inline bool test_preempt_need_resched(void)
{
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
}
static inline void __preempt_count_add(int val)
{
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
__atomic_add_const(val, &S390_lowcore.preempt_count);
else
__atomic_add(val, &S390_lowcore.preempt_count);
}
static inline void __preempt_count_sub(int val)
{
__preempt_count_add(-val);
}
static inline bool __preempt_count_dec_and_test(void)
{
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
}
static inline bool should_resched(int preempt_offset)
{
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
preempt_offset);
}
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define PREEMPT_ENABLED (0)
static inline int preempt_count(void)
{
return READ_ONCE(S390_lowcore.preempt_count);
}
static inline void preempt_count_set(int pc)
{
S390_lowcore.preempt_count = pc;
}
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
{
}
static inline void clear_preempt_need_resched(void)
{
}
static inline bool test_preempt_need_resched(void)
{
return false;
}
static inline void __preempt_count_add(int val)
{
S390_lowcore.preempt_count += val;
}
static inline void __preempt_count_sub(int val)
{
S390_lowcore.preempt_count -= val;
}
static inline bool __preempt_count_dec_and_test(void)
{
return !--S390_lowcore.preempt_count && tif_need_resched();
}
static inline bool should_resched(int preempt_offset)
{
return unlikely(preempt_count() == preempt_offset &&
tif_need_resched());
}
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */
...@@ -34,7 +34,6 @@ struct thread_info { ...@@ -34,7 +34,6 @@ struct thread_info {
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
unsigned long sys_call_table; /* System call table address */ unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */ unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
unsigned int system_call; unsigned int system_call;
__u64 user_timer; __u64 user_timer;
__u64 system_timer; __u64 system_timer;
...@@ -49,7 +48,6 @@ struct thread_info { ...@@ -49,7 +48,6 @@ struct thread_info {
.task = &tsk, \ .task = &tsk, \
.flags = 0, \ .flags = 0, \
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
......
...@@ -43,7 +43,6 @@ int main(void) ...@@ -43,7 +43,6 @@ int main(void)
OFFSET(__TI_flags, thread_info, flags); OFFSET(__TI_flags, thread_info, flags);
OFFSET(__TI_sysc_table, thread_info, sys_call_table); OFFSET(__TI_sysc_table, thread_info, sys_call_table);
OFFSET(__TI_cpu, thread_info, cpu); OFFSET(__TI_cpu, thread_info, cpu);
OFFSET(__TI_precount, thread_info, preempt_count);
OFFSET(__TI_user_timer, thread_info, user_timer); OFFSET(__TI_user_timer, thread_info, user_timer);
OFFSET(__TI_system_timer, thread_info, system_timer); OFFSET(__TI_system_timer, thread_info, system_timer);
OFFSET(__TI_last_break, thread_info, last_break); OFFSET(__TI_last_break, thread_info, last_break);
...@@ -175,6 +174,7 @@ int main(void) ...@@ -175,6 +174,7 @@ int main(void)
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset); OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data); OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap); OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_PASTE, lowcore, paste); OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
......
...@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void) ...@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
psw.addr = (unsigned long) s390_base_pgm_handler; psw.addr = (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw; S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler; s390_base_pgm_handler_fn = early_pgm_check_handler;
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
} }
static noinline __init void setup_facility_list(void) static noinline __init void setup_facility_list(void)
......
...@@ -626,7 +626,7 @@ ENTRY(io_int_handler) ...@@ -626,7 +626,7 @@ ENTRY(io_int_handler)
jo .Lio_work_user # yes -> do resched & signal jo .Lio_work_user # yes -> do resched & signal
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# check for preemptive scheduling # check for preemptive scheduling
icm %r0,15,__TI_precount(%r12) icm %r0,15,__LC_PREEMPT_COUNT
jnz .Lio_restore # preemption is disabled jnz .Lio_restore # preemption is disabled
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore jno .Lio_restore
......
...@@ -333,6 +333,7 @@ static void __init setup_lowcore(void) ...@@ -333,6 +333,7 @@ static void __init setup_lowcore(void)
lc->thread_info = (unsigned long) &init_thread_union; lc->thread_info = (unsigned long) &init_thread_union;
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list; lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8); MAX_FACILITY_BIT/8);
......
...@@ -733,6 +733,7 @@ static void pfault_interrupt(struct ext_code ext_code, ...@@ -733,6 +733,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* return to userspace schedule() to block. */ * return to userspace schedule() to block. */
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk); set_tsk_need_resched(tsk);
set_preempt_need_resched();
} }
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment