Commit d614d315 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] fix madvise(MADV_DONTNEED) for nonlinear vmas

From: Hugh Dickins <hugh@veritas.com>

Jamie points out that madvise(MADV_DONTNEED) should unmap pages from a
nonlinear area in such a way that the nonlinear offsets are preserved if the
pages do turn out to be needed later after all, instead of reverting them to
linearity: needs to pass down a zap_details block.

(But this still leaves mincore unaware of nonlinear vmas: bigger job.)
parent e8b338d6
......@@ -439,7 +439,16 @@ struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
void shmem_lock(struct file * file, int lock);
int shmem_zero_setup(struct vm_area_struct *);
struct zap_details;
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
};
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
......
......@@ -92,10 +92,19 @@ static long madvise_willneed(struct vm_area_struct * vma,
static long madvise_dontneed(struct vm_area_struct * vma,
unsigned long start, unsigned long end)
{
struct zap_details details;
if (vma->vm_flags & VM_LOCKED)
return -EINVAL;
zap_page_range(vma, start, end - start, NULL);
if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
details.check_mapping = NULL;
details.nonlinear_vma = vma;
details.first_index = 0;
details.last_index = ULONG_MAX;
zap_page_range(vma, start, end - start, &details);
} else
zap_page_range(vma, start, end - start, NULL);
return 0;
}
......
......@@ -384,16 +384,6 @@ skip_copy_pmd_range: address = (address + PGDIR_SIZE) & PGDIR_MASK;
return -ENOMEM;
}
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
};
static void zap_pte_range(struct mmu_gather *tlb,
pmd_t *pmd, unsigned long address,
unsigned long size, struct zap_details *details)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment