Commit 5e0b3a4e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler changes from Ingo Molnar:
 "Various optimizations, cleanups and smaller fixes - no major changes
  in scheduler behavior"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix the sd_parent_degenerate() code
  sched/fair: Rework and comment the group_imb code
  sched/fair: Optimize find_busiest_queue()
  sched/fair: Make group power more consistent
  sched/fair: Remove duplicate load_per_task computations
  sched/fair: Shrink sg_lb_stats and play memset games
  sched: Clean-up struct sd_lb_stat
  sched: Factor out code to should_we_balance()
  sched: Remove one division operation in find_busiest_queue()
  sched/cputime: Use this_cpu_add() in task_group_account_field()
  cpumask: Fix cpumask leak in partition_sched_domains()
  sched/x86: Optimize switch_mm() for multi-threaded workloads
  generic-ipi: Kill unnecessary variable - csd_flags
  numa: Mark __node_set() as __always_inline
  sched/fair: Cleanup: remove duplicate variable declaration
  sched/__wake_up_sync_key(): Fix nr_exclusive tasks which lead to WF_SYNC clearing
parents 0d99b708 10866e62
...@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Re-load page tables */ /* Re-load page tables */
load_cr3(next->pgd); load_cr3(next->pgd);
/* stop flush ipis for the previous mm */ /* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_clear_cpu(cpu, mm_cpumask(prev));
/* /* Load the LDT, if the LDT is different: */
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt)) if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context); load_LDT_nolock(&next->context);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled /*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3 * tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables. * to make sure to use no freed page tables.
*/ */
......
...@@ -98,8 +98,17 @@ ...@@ -98,8 +98,17 @@
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_; extern nodemask_t _unused_nodemask_arg_;
/*
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
* inlined we will end up with a section mis-match error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
*/
#define node_set(node, dst) __node_set((node), &(dst)) #define node_set(node, dst) __node_set((node), &(dst))
static inline void __node_set(int node, volatile nodemask_t *dstp) static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
{ {
set_bit(node, dstp->bits); set_bit(node, dstp->bits);
} }
......
...@@ -2677,7 +2677,7 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, ...@@ -2677,7 +2677,7 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
if (unlikely(!q)) if (unlikely(!q))
return; return;
if (unlikely(!nr_exclusive)) if (unlikely(nr_exclusive != 1))
wake_flags = 0; wake_flags = 0;
spin_lock_irqsave(&q->lock, flags); spin_lock_irqsave(&q->lock, flags);
...@@ -4964,7 +4964,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ...@@ -4964,7 +4964,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_BALANCE_FORK | SD_BALANCE_FORK |
SD_BALANCE_EXEC | SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER | SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES); SD_SHARE_PKG_RESOURCES |
SD_PREFER_SIBLING);
if (nr_node_ids == 1) if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE; pflags &= ~SD_SERIALIZE;
} }
...@@ -5173,6 +5174,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ...@@ -5173,6 +5174,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
tmp->parent = parent->parent; tmp->parent = parent->parent;
if (parent->parent) if (parent->parent)
parent->parent->child = tmp; parent->parent->child = tmp;
/*
* Transfer SD_PREFER_SIBLING down in case of a
* degenerate parent; the spans match for this
* so the property transfers.
*/
if (parent->flags & SD_PREFER_SIBLING)
tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent, cpu); destroy_sched_domain(parent, cpu);
} else } else
tmp = tmp->parent; tmp = tmp->parent;
...@@ -6239,8 +6247,9 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ...@@ -6239,8 +6247,9 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
; ;
} }
n = ndoms_cur;
if (doms_new == NULL) { if (doms_new == NULL) {
ndoms_cur = 0; n = 0;
doms_new = &fallback_doms; doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new); WARN_ON_ONCE(dattr_new);
...@@ -6248,7 +6257,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ...@@ -6248,7 +6257,7 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
/* Build new domains */ /* Build new domains */
for (i = 0; i < ndoms_new; i++) { for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur && !new_topology; j++) { for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j)) && dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2; goto match2;
......
...@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index, ...@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
* is the only cgroup, then nothing else should be necessary. * is the only cgroup, then nothing else should be necessary.
* *
*/ */
__get_cpu_var(kernel_cpustat).cpustat[index] += tmp; __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
cpuacct_account_field(p, index, tmp); cpuacct_account_field(p, index, tmp);
} }
......
This diff is collapsed.
...@@ -186,25 +186,13 @@ void generic_smp_call_function_single_interrupt(void) ...@@ -186,25 +186,13 @@ void generic_smp_call_function_single_interrupt(void)
while (!list_empty(&list)) { while (!list_empty(&list)) {
struct call_single_data *csd; struct call_single_data *csd;
unsigned int csd_flags;
csd = list_entry(list.next, struct call_single_data, list); csd = list_entry(list.next, struct call_single_data, list);
list_del(&csd->list); list_del(&csd->list);
/*
* 'csd' can be invalid after this call if flags == 0
* (when called through generic_exec_single()),
* so save them away before making the call:
*/
csd_flags = csd->flags;
csd->func(csd->info); csd->func(csd->info);
/* csd_unlock(csd);
* Unlocked CSDs are valid through generic_exec_single():
*/
if (csd_flags & CSD_FLAG_LOCK)
csd_unlock(csd);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment