Commit a913e182 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

ksm: clean up obsolete references

A few cleanups, given the munlock fix: the comment on ksm_test_exit() no
longer applies, and it can be made private to ksm.c; there's no more
reference to mmu_gather or tlb.h, and mmap.c doesn't need ksm.h.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8314c4f2
......@@ -12,8 +12,6 @@
#include <linux/sched.h>
#include <linux/vmstat.h>
struct mmu_gather;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
......@@ -27,19 +25,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
return 0;
}
/*
* For KSM to handle OOM without deadlock when it's breaking COW in a
* likely victim of the OOM killer, exit_mmap() has to serialize with
* ksm_exit() after freeing mm's pages but before freeing its page tables.
* That leaves a window in which KSM might refault pages which have just
* been finally unmapped: guard against that with ksm_test_exit(), and
* use it after getting mmap_sem in ksm.c, to check if mm is exiting.
*/
static inline bool ksm_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
......@@ -79,11 +64,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
return 0;
}
static inline bool ksm_test_exit(struct mm_struct *mm)
{
return 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
}
......
......@@ -32,7 +32,6 @@
#include <linux/mmu_notifier.h>
#include <linux/ksm.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
/*
......@@ -284,6 +283,19 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
return rmap_item->address & STABLE_FLAG;
}
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
* takes mmap_sem briefly to serialize against them. ksm_exit() does not set
* a special flag: they can just back out as soon as mm_users goes to zero.
* ksm_test_exit() is used throughout to make this test for exit: in some
* places for correctness, in some places just to avoid unnecessary work.
*/
static inline bool ksm_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
/*
* We use break_ksm to break COW on a ksm page: it's a stripped down
*
......
......@@ -27,7 +27,6 @@
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/mmu_notifier.h>
#include <linux/perf_event.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment