Commit 314ff785 authored by Ingo Molnar's avatar Ingo Molnar

mm/vmacache, sched/headers: Introduce 'struct vmacache' and move it from...

mm/vmacache, sched/headers: Introduce 'struct vmacache' and move it from <linux/sched.h> to <linux/mm_types>

The <linux/sched.h> header includes various vmacache related defines,
which are arguably misplaced.

Move them to mm_types.h and minimize the sched.h impact by putting
all task vmacache state into a new 'struct vmacache' structure.

No change in functionality.
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 780de9dd
...@@ -360,6 +360,18 @@ struct vm_area_struct { ...@@ -360,6 +360,18 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx; struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
}; };
/*
* The per task VMA cache array:
*/
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
struct vmacache {
u32 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
struct core_thread { struct core_thread {
struct task_struct *task; struct task_struct *task;
struct core_thread *next; struct core_thread *next;
......
...@@ -134,10 +134,6 @@ struct blk_plug; ...@@ -134,10 +134,6 @@ struct blk_plug;
struct filename; struct filename;
struct nameidata; struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
/* /*
* These are the constant used to fake the fixed-point load-average * These are the constant used to fake the fixed-point load-average
* counting. Some notes: * counting. Some notes:
...@@ -1550,9 +1546,10 @@ struct task_struct { ...@@ -1550,9 +1546,10 @@ struct task_struct {
#endif #endif
struct mm_struct *mm, *active_mm; struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
u32 vmacache_seqnum; /* Per-thread vma caching: */
struct vm_area_struct *vmacache[VMACACHE_SIZE]; struct vmacache vmacache;
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat; struct task_rss_stat rss_stat;
#endif #endif
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
static inline void vmacache_flush(struct task_struct *tsk) static inline void vmacache_flush(struct task_struct *tsk)
{ {
memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
} }
extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_flush_all(struct mm_struct *mm);
......
...@@ -232,9 +232,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) ...@@ -232,9 +232,9 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
int i; int i;
for (i = 0; i < VMACACHE_SIZE; i++) { for (i = 0; i < VMACACHE_SIZE; i++) {
if (!current->vmacache[i]) if (!current->vmacache.vmas[i])
continue; continue;
flush_cache_range(current->vmacache[i], flush_cache_range(current->vmacache.vmas[i],
addr, addr + BREAK_INSTR_SIZE); addr, addr + BREAK_INSTR_SIZE);
} }
} }
......
...@@ -757,7 +757,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) ...@@ -757,7 +757,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
mm->map_count--; mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) { for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */ /* if the vma is cached, invalidate the entire cache */
if (curr->vmacache[i] == vma) { if (curr->vmacache.vmas[i] == vma) {
vmacache_invalidate(mm); vmacache_invalidate(mm);
break; break;
} }
......
...@@ -60,7 +60,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm) ...@@ -60,7 +60,7 @@ static inline bool vmacache_valid_mm(struct mm_struct *mm)
void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
{ {
if (vmacache_valid_mm(newvma->vm_mm)) if (vmacache_valid_mm(newvma->vm_mm))
current->vmacache[VMACACHE_HASH(addr)] = newvma; current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
} }
static bool vmacache_valid(struct mm_struct *mm) static bool vmacache_valid(struct mm_struct *mm)
...@@ -71,12 +71,12 @@ static bool vmacache_valid(struct mm_struct *mm) ...@@ -71,12 +71,12 @@ static bool vmacache_valid(struct mm_struct *mm)
return false; return false;
curr = current; curr = current;
if (mm->vmacache_seqnum != curr->vmacache_seqnum) { if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
/* /*
* First attempt will always be invalid, initialize * First attempt will always be invalid, initialize
* the new cache for this task here. * the new cache for this task here.
*/ */
curr->vmacache_seqnum = mm->vmacache_seqnum; curr->vmacache.seqnum = mm->vmacache_seqnum;
vmacache_flush(curr); vmacache_flush(curr);
return false; return false;
} }
...@@ -93,7 +93,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) ...@@ -93,7 +93,7 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
return NULL; return NULL;
for (i = 0; i < VMACACHE_SIZE; i++) { for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i]; struct vm_area_struct *vma = current->vmacache.vmas[i];
if (!vma) if (!vma)
continue; continue;
...@@ -121,7 +121,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, ...@@ -121,7 +121,7 @@ struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
return NULL; return NULL;
for (i = 0; i < VMACACHE_SIZE; i++) { for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i]; struct vm_area_struct *vma = current->vmacache.vmas[i];
if (vma && vma->vm_start == start && vma->vm_end == end) { if (vma && vma->vm_start == start && vma->vm_end == end) {
count_vm_vmacache_event(VMACACHE_FIND_HITS); count_vm_vmacache_event(VMACACHE_FIND_HITS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment