Commit f4d55ef3 authored by Andrew Morton's avatar Andrew Morton Committed by James Bottomley

[PATCH] hugetlb unmap_vmas() SMP && PREEMPT fix

Patch from Kevin Pedretti <pedretti@ieee.org>

The unmap_vmas() logic is designed to chew away at all the pages without
holding off preemption for too long.  But with CONFIG_SMP=y and
CONFIG_PREEMPT=y the number of pages which we batch up between rescheduling
opportunities is not a multiple of HPAGE_SIZE.  So unmap_vmas() ends up
calling unmap_hugepage_range() with a poorly aligned&sized region, and it
goes BUG.

Fix that up by ensuring that we always work across hugepage regions in
HPAGE_SIZE chunks.
parent 4d580698
...@@ -66,6 +66,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) ...@@ -66,6 +66,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#ifndef HPAGE_MASK #ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */ #define HPAGE_MASK 0 /* Keep the compiler happy */
#define HPAGE_SIZE 0
#endif #endif
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
......
...@@ -484,6 +484,16 @@ void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -484,6 +484,16 @@ void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
#define ZAP_BLOCK_SIZE (~(0UL)) #define ZAP_BLOCK_SIZE (~(0UL))
#endif #endif
/*
* hugepage regions must be unmapped with HPAGE_SIZE granularity
*/
static inline unsigned long zap_block_size(struct vm_area_struct *vma)
{
if (is_vm_hugetlb_page(vma))
return HPAGE_SIZE;
return ZAP_BLOCK_SIZE;
}
/** /**
* unmap_vmas - unmap a range of memory covered by a list of vma's * unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlbp: address of the caller's struct mmu_gather * @tlbp: address of the caller's struct mmu_gather
...@@ -514,7 +524,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -514,7 +524,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted) unsigned long end_addr, unsigned long *nr_accounted)
{ {
unsigned long zap_bytes = ZAP_BLOCK_SIZE; unsigned long zap_bytes = zap_block_size(vma);
unsigned long tlb_start; /* For tlb_finish_mmu */ unsigned long tlb_start; /* For tlb_finish_mmu */
int tlb_start_valid = 0; int tlb_start_valid = 0;
int ret = 0; int ret = 0;
...@@ -562,7 +572,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, ...@@ -562,7 +572,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
*tlbp = tlb_gather_mmu(mm, 0); *tlbp = tlb_gather_mmu(mm, 0);
tlb_start_valid = 0; tlb_start_valid = 0;
} }
zap_bytes = ZAP_BLOCK_SIZE; zap_bytes = zap_block_size(vma);
} }
if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end) if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end)
printk("%s: VMA list is not sorted correctly!\n", printk("%s: VMA list is not sorted correctly!\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment