Commit 75caf310 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "5 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  include/asm-generic/topology.h: guard cpumask_of_node() macro argument
  fs/binfmt_elf.c: allocate initialized memory in fill_thread_core_info()
  mm: remove VM_BUG_ON(PageSlab()) from page_mapcount()
  mm,thp: stop leaking unreleased file pages
  mm/z3fold: silence kmemleak false positives of slots
parents d16eea2f 4377748c
...@@ -1733,7 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, ...@@ -1733,7 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
(!regset->active || regset->active(t->task, regset) > 0)) { (!regset->active || regset->active(t->task, regset) > 0)) {
int ret; int ret;
size_t size = regset_size(t->task, regset); size_t size = regset_size(t->task, regset);
void *data = kmalloc(size, GFP_KERNEL); void *data = kzalloc(size, GFP_KERNEL);
if (unlikely(!data)) if (unlikely(!data))
return 0; return 0;
ret = regset->get(t->task, regset, ret = regset->get(t->task, regset,
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#ifdef CONFIG_NEED_MULTIPLE_NODES #ifdef CONFIG_NEED_MULTIPLE_NODES
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else #else
#define cpumask_of_node(node) ((void)node, cpu_online_mask) #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif #endif
#endif #endif
#ifndef pcibus_to_node #ifndef pcibus_to_node
......
...@@ -782,6 +782,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) ...@@ -782,6 +782,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr); extern void kvfree(const void *addr);
/*
* Mapcount of compound page as a whole, does not include mapped sub-pages.
*
* Must be called only for compound pages or any their tail sub-pages.
*/
static inline int compound_mapcount(struct page *page) static inline int compound_mapcount(struct page *page)
{ {
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page);
...@@ -801,10 +806,16 @@ static inline void page_mapcount_reset(struct page *page) ...@@ -801,10 +806,16 @@ static inline void page_mapcount_reset(struct page *page)
int __page_mapcount(struct page *page); int __page_mapcount(struct page *page);
/*
* Mapcount of 0-order page; when compound sub-page, includes
* compound_mapcount().
*
* Result is undefined for pages which cannot be mapped into userspace.
* For example SLAB or special types of pages. See function page_has_type().
* They use this place in struct page differently.
*/
static inline int page_mapcount(struct page *page) static inline int page_mapcount(struct page *page)
{ {
VM_BUG_ON_PAGE(PageSlab(page), page);
if (unlikely(PageCompound(page))) if (unlikely(PageCompound(page)))
return __page_mapcount(page); return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1; return atomic_read(&page->_mapcount) + 1;
......
...@@ -1692,6 +1692,7 @@ static void collapse_file(struct mm_struct *mm, ...@@ -1692,6 +1692,7 @@ static void collapse_file(struct mm_struct *mm,
if (page_has_private(page) && if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL)) { !try_to_release_page(page, GFP_KERNEL)) {
result = SCAN_PAGE_HAS_PRIVATE; result = SCAN_PAGE_HAS_PRIVATE;
putback_lru_page(page);
goto out_unlock; goto out_unlock;
} }
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/zpool.h> #include <linux/zpool.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/kmemleak.h>
/* /*
* NCHUNKS_ORDER determines the internal allocation granularity, effectively * NCHUNKS_ORDER determines the internal allocation granularity, effectively
...@@ -215,6 +216,8 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, ...@@ -215,6 +216,8 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
(gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
if (slots) { if (slots) {
/* It will be freed separately in free_handle(). */
kmemleak_not_leak(slots);
memset(slots->slot, 0, sizeof(slots->slot)); memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool; slots->pool = (unsigned long)pool;
rwlock_init(&slots->lock); rwlock_init(&slots->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment