Commit dcc2dc45 authored by Ingo Molnar's avatar Ingo Molnar

sched/headers, mm: Move 'struct tlbflush_unmap_batch' from <linux/sched.h> to...

sched/headers, mm: Move 'struct tlbflush_unmap_batch' from <linux/sched.h> to <linux/mm_types_task.h>

Unclutter <linux/sched.h> some more.

Also move the CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH condition inside the
structure body definition, to remove a pair of #ifdefs from sched.h.
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 93b5a9a7
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/cpumask.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -62,4 +63,25 @@ struct page_frag { ...@@ -62,4 +63,25 @@ struct page_frag {
#endif #endif
}; };
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* Each bit set is a CPU that potentially has a TLB entry for one of
* the PFNs being flushed. See set_tlb_ubc_flush_pending().
*/
struct cpumask cpumask;
/* True if any bit in cpumask is set */
bool flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
* flushed before IO is initiated or a stale TLB entry potentially
* allows an update without redirtying the page.
*/
bool writable;
#endif
};
#endif /* _LINUX_MM_TYPES_TASK_H */ #endif /* _LINUX_MM_TYPES_TASK_H */
...@@ -484,25 +484,6 @@ struct wake_q_node { ...@@ -484,25 +484,6 @@ struct wake_q_node {
struct wake_q_node *next; struct wake_q_node *next;
}; };
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
/*
* Each bit set is a CPU that potentially has a TLB entry for one of
* the PFNs being flushed. See set_tlb_ubc_flush_pending().
*/
struct cpumask cpumask;
/* True if any bit in cpumask is set */
bool flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
* flushed before IO is initiated or a stale TLB entry potentially
* allows an update without redirtying the page.
*/
bool writable;
};
struct task_struct { struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK #ifdef CONFIG_THREAD_INFO_IN_TASK
/* /*
...@@ -895,9 +876,7 @@ struct task_struct { ...@@ -895,9 +876,7 @@ struct task_struct {
unsigned long numa_pages_migrated; unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc; struct tlbflush_unmap_batch tlb_ubc;
#endif
struct rcu_head rcu; struct rcu_head rcu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment