Commit 6fb11e65 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "Six fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  ocfs2: NFS hangs in __ocfs2_cluster_lock due to race with ocfs2_unblock_lock
  reiserfs: fix dereference of ERR_PTR
  ratelimit: fix bug in time interval by resetting right begin time
  mm: fix kernel crash in khugepaged thread
  mm: fix mlock accouting
  thp: change pmd_trans_huge_lock() interface to return ptl
parents 3e1e21c7 b1b1e15e
...@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, ...@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unsigned int gen; unsigned int gen;
int noqueue_attempted = 0; int noqueue_attempted = 0;
int dlm_locked = 0; int dlm_locked = 0;
int kick_dc = 0;
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) { if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
mlog_errno(-EINVAL); mlog_errno(-EINVAL);
...@@ -1524,7 +1525,12 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, ...@@ -1524,7 +1525,12 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
unlock: unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags); spin_unlock_irqrestore(&lockres->l_lock, flags);
if (kick_dc)
ocfs2_wake_downconvert_thread(osb);
out: out:
/* /*
* This is helping work around a lock inversion between the page lock * This is helping work around a lock inversion between the page lock
......
...@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
smaps_pmd_entry(pmd, addr, walk); smaps_pmd_entry(pmd, addr, walk);
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
...@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) { if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd); clear_soft_dirty_pmd(vma, addr, pmd);
goto out; goto out;
...@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, ...@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
int err = 0; int err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge_lock(pmdp, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0; u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp; pmd_t pmd = *pmdp;
...@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ...@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte; pte_t *orig_pte;
pte_t *pte; pte_t *pte;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pte_t huge_pte = *(pte_t *)pmd; pte_t huge_pte = *(pte_t *)pmd;
struct page *page; struct page *page;
......
...@@ -288,7 +288,7 @@ static int finish_unfinished(struct super_block *s) ...@@ -288,7 +288,7 @@ static int finish_unfinished(struct super_block *s)
pathrelse(&path); pathrelse(&path);
inode = reiserfs_iget(s, &obj_key); inode = reiserfs_iget(s, &obj_key);
if (!inode) { if (IS_ERR_OR_NULL(inode)) {
/* /*
* the unlink almost completed, it just did not * the unlink almost completed, it just did not
* manage to remove "save" link and release objectid * manage to remove "save" link and release objectid
......
...@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
long adjust_next); long adjust_next);
extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl); struct vm_area_struct *vma);
/* mmap_sem must be held on entry */ /* mmap_sem must be held on entry */
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl) struct vm_area_struct *vma)
{ {
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl); return __pmd_trans_huge_lock(pmd, vma);
else else
return false; return false;
} }
...@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next) long adjust_next)
{ {
} }
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
spinlock_t **ptl) struct vm_area_struct *vma)
{ {
return false; return NULL;
} }
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
......
...@@ -46,10 +46,10 @@ SCAN_STATUS ...@@ -46,10 +46,10 @@ SCAN_STATUS
TRACE_EVENT(mm_khugepaged_scan_pmd, TRACE_EVENT(mm_khugepaged_scan_pmd,
TP_PROTO(struct mm_struct *mm, unsigned long pfn, bool writable, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
bool referenced, int none_or_zero, int status), bool referenced, int none_or_zero, int status),
TP_ARGS(mm, pfn, writable, referenced, none_or_zero, status), TP_ARGS(mm, page, writable, referenced, none_or_zero, status),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct mm_struct *, mm) __field(struct mm_struct *, mm)
...@@ -62,7 +62,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, ...@@ -62,7 +62,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd,
TP_fast_assign( TP_fast_assign(
__entry->mm = mm; __entry->mm = mm;
__entry->pfn = pfn; __entry->pfn = page ? page_to_pfn(page) : -1;
__entry->writable = writable; __entry->writable = writable;
__entry->referenced = referenced; __entry->referenced = referenced;
__entry->none_or_zero = none_or_zero; __entry->none_or_zero = none_or_zero;
...@@ -104,10 +104,10 @@ TRACE_EVENT(mm_collapse_huge_page, ...@@ -104,10 +104,10 @@ TRACE_EVENT(mm_collapse_huge_page,
TRACE_EVENT(mm_collapse_huge_page_isolate, TRACE_EVENT(mm_collapse_huge_page_isolate,
TP_PROTO(unsigned long pfn, int none_or_zero, TP_PROTO(struct page *page, int none_or_zero,
bool referenced, bool writable, int status), bool referenced, bool writable, int status),
TP_ARGS(pfn, none_or_zero, referenced, writable, status), TP_ARGS(page, none_or_zero, referenced, writable, status),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, pfn) __field(unsigned long, pfn)
...@@ -118,7 +118,7 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, ...@@ -118,7 +118,7 @@ TRACE_EVENT(mm_collapse_huge_page_isolate,
), ),
TP_fast_assign( TP_fast_assign(
__entry->pfn = pfn; __entry->pfn = page ? page_to_pfn(page) : -1;
__entry->none_or_zero = none_or_zero; __entry->none_or_zero = none_or_zero;
__entry->referenced = referenced; __entry->referenced = referenced;
__entry->writable = writable; __entry->writable = writable;
......
...@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) ...@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (rs->missed) if (rs->missed)
printk(KERN_WARNING "%s: %d callbacks suppressed\n", printk(KERN_WARNING "%s: %d callbacks suppressed\n",
func, rs->missed); func, rs->missed);
rs->begin = 0; rs->begin = jiffies;
rs->printed = 0; rs->printed = 0;
rs->missed = 0; rs->missed = 0;
} }
......
...@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
int ret = 0; int ret = 0;
if (!pmd_trans_huge_lock(pmd, vma, &ptl)) ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
goto out_unlocked; goto out_unlocked;
orig_pmd = *pmd; orig_pmd = *pmd;
...@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd; pmd_t orig_pmd;
spinlock_t *ptl; spinlock_t *ptl;
if (!__pmd_trans_huge_lock(pmd, vma, &ptl)) ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0; return 0;
/* /*
* For architectures like ppc64 we look at deposited pgtable * For architectures like ppc64 we look at deposited pgtable
...@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ...@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
* We don't have to worry about the ordering of src and dst * We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock. * ptlocks because exclusive mmap_sem prevents deadlock.
*/ */
if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) { old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
if (old_ptl) {
new_ptl = pmd_lockptr(mm, new_pmd); new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl) if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
...@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl; spinlock_t *ptl;
int ret = 0; int ret = 0;
if (__pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = __pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pmd_t entry; pmd_t entry;
bool preserve_write = prot_numa && pmd_write(*pmd); bool preserve_write = prot_numa && pmd_write(*pmd);
ret = 1; ret = 1;
...@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* Note that if it returns true, this routine returns without unlocking page * Note that if it returns true, this routine returns without unlocking page
* table lock. So callers must unlock it. * table lock. So callers must unlock it.
*/ */
bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
spinlock_t **ptl)
{ {
*ptl = pmd_lock(vma->vm_mm, pmd); spinlock_t *ptl;
ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
return true; return ptl;
spin_unlock(*ptl); spin_unlock(ptl);
return false; return NULL;
} }
#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
...@@ -2068,7 +2072,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -2068,7 +2072,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (likely(writable)) { if (likely(writable)) {
if (likely(referenced)) { if (likely(referenced)) {
result = SCAN_SUCCEED; result = SCAN_SUCCEED;
trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero, trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result); referenced, writable, result);
return 1; return 1;
} }
...@@ -2078,7 +2082,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -2078,7 +2082,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
out: out:
release_pte_pages(pte, _pte); release_pte_pages(pte, _pte);
trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero, trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result); referenced, writable, result);
return 0; return 0;
} }
...@@ -2576,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, ...@@ -2576,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
collapse_huge_page(mm, address, hpage, vma, node); collapse_huge_page(mm, address, hpage, vma, node);
} }
out: out:
trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced, trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
none_or_zero, result); none_or_zero, result);
return ret; return ret;
} }
......
...@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, ...@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR; mc.precharge += HPAGE_PMD_NR;
spin_unlock(ptl); spin_unlock(ptl);
...@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
union mc_target target; union mc_target target;
struct page *page; struct page *page;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (mc.precharge < HPAGE_PMD_NR) { if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(ptl); spin_unlock(ptl);
return 0; return 0;
......
...@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned char *vec = walk->private; unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT; int nr = (end - addr) >> PAGE_SHIFT;
if (pmd_trans_huge_lock(pmd, vma, &ptl)) { ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
memset(vec, 1, nr); memset(vec, 1, nr);
spin_unlock(ptl); spin_unlock(ptl);
goto out; goto out;
......
...@@ -175,7 +175,7 @@ static void __munlock_isolation_failed(struct page *page) ...@@ -175,7 +175,7 @@ static void __munlock_isolation_failed(struct page *page)
*/ */
unsigned int munlock_vma_page(struct page *page) unsigned int munlock_vma_page(struct page *page)
{ {
unsigned int nr_pages; int nr_pages;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
/* For try_to_munlock() and to serialize with page migration */ /* For try_to_munlock() and to serialize with page migration */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment