Commit 14970f20 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "20 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  drivers/misc/sgi-gru/grumain.c: remove bogus 0x prefix from printk
  cris/arch-v32: cryptocop: print a hex number after a 0x prefix
  ipack: print a hex number after a 0x prefix
  block: DAC960: print a hex number after a 0x prefix
  fs: exofs: print a hex number after a 0x prefix
  lib/genalloc.c: start search from start of chunk
  mm: memcontrol: do not recurse in direct reclaim
  CREDITS: update credit information for Martin Kepplinger
  proc: fix NULL dereference when reading /proc/<pid>/auxv
  mm: kmemleak: ensure that the task stack is not freed during scanning
  lib/stackdepot.c: bump stackdepot capacity from 16MB to 128MB
  latent_entropy: raise CONFIG_FRAME_WARN by default
  kconfig.h: remove config_enabled() macro
  ipc: account for kmem usage on mqueue and msg
  mm/slab: improve performance of gathering slabinfo stats
  mm: page_alloc: use KERN_CONT where appropriate
  mm/list_lru.c: avoid error-path NULL pointer deref
  h8300: fix syscall restarting
  kcov: properly check if we are in an interrupt
  mm/slab: fix kmemcg cache creation delayed issue
parents 67463e54 8e819101
...@@ -1864,10 +1864,11 @@ S: The Netherlands ...@@ -1864,10 +1864,11 @@ S: The Netherlands
N: Martin Kepplinger N: Martin Kepplinger
E: martink@posteo.de E: martink@posteo.de
E: martin.kepplinger@theobroma-systems.com E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver D: mma8452 accelerators iio driver
D: Kernel cleanups D: pegasus_notetaker input driver
D: Kernel fixes and cleanups
S: Garnisonstraße 26 S: Garnisonstraße 26
S: 4020 Linz S: 4020 Linz
S: Austria S: Austria
......
...@@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop) ...@@ -3149,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n"); printk("print_dma_descriptors start\n");
printk("iop:\n"); printk("iop:\n");
printk("\tsid: 0x%lld\n", iop->sid); printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in); printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
......
...@@ -31,7 +31,6 @@ struct thread_info { ...@@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; mm_segment_t addr_limit;
struct restart_block restart_block;
}; };
/* /*
...@@ -44,9 +43,6 @@ struct thread_info { ...@@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
......
...@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0) ...@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0; unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall; current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */ /* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0) #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
......
...@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void) ...@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
* consistent with the vaddr_start/vaddr_end variables. * consistent with the vaddr_start/vaddr_end variables.
*/ */
BUILD_BUG_ON(vaddr_start >= vaddr_end); BUILD_BUG_ON(vaddr_start >= vaddr_end);
BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) && BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START); vaddr_end >= EFI_VA_START);
BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) || BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
config_enabled(CONFIG_EFI)) && IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map); vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
......
...@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device, ...@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller: case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80, if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) { Controller->FullModelName)) {
DAC960_Error("IO port 0x%d busy for Controller at\n", DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address); Controller, Controller->IO_Address);
goto Failure; goto Failure;
} }
...@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device, ...@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller: case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80, if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){ Controller->FullModelName)){
DAC960_Error("IO port 0x%d busy for Controller at\n", DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address); Controller, Controller->IO_Address);
goto Failure; goto Failure;
} }
......
...@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, ...@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
idev->id_vendor, idev->id_device); idev->id_vendor, idev->id_device);
} }
ipack_device_attr(id_format, "0x%hhu\n"); ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id); static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device); static DEVICE_ATTR_RO(id_device);
......
...@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru, ...@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
spin_lock(&gru->gs_asid_lock); spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap; asids->mt_ctxbitmap ^= ctxbitmap;
gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock); spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock); spin_unlock(&gms->ms_asid_lock);
......
...@@ -137,7 +137,7 @@ static bool exofs_check_page(struct page *page) ...@@ -137,7 +137,7 @@ static bool exofs_check_page(struct page *page)
bad_entry: bad_entry:
EXOFS_ERR( EXOFS_ERR(
"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
"offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)), _LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len); rec_len, p->name_len);
......
...@@ -1012,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf, ...@@ -1012,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf,
{ {
struct mm_struct *mm = file->private_data; struct mm_struct *mm = file->private_data;
unsigned int nwords = 0; unsigned int nwords = 0;
if (!mm)
return 0;
do { do {
nwords += 2; nwords += 2;
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
......
...@@ -70,7 +70,7 @@ KSYM(__kcrctab_\name): ...@@ -70,7 +70,7 @@ KSYM(__kcrctab_\name):
#include <generated/autoksyms.h> #include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, val, sec) \ #define __EXPORT_SYMBOL(sym, val, sec) \
__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \ #define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf) ___cond_export_sym(sym, val, sec, conf)
#define ___cond_export_sym(sym, val, sec, enabled) \ #define ___cond_export_sym(sym, val, sec, enabled) \
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero. * the last step cherry picks the 2nd arg, we get a zero.
*/ */
#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x) #define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
...@@ -41,13 +40,13 @@ ...@@ -41,13 +40,13 @@
* otherwise. For boolean options, this is equivalent to * otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO). * IS_ENABLED(CONFIG_FOO).
*/ */
#define IS_BUILTIN(option) config_enabled(option) #define IS_BUILTIN(option) __is_defined(option)
/* /*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise. * otherwise.
*/ */
#define IS_MODULE(option) config_enabled(option##_MODULE) #define IS_MODULE(option) __is_defined(option##_MODULE)
/* /*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
......
...@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len) ...@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
size_t alen; size_t alen;
alen = min(len, DATALEN_MSG); alen = min(len, DATALEN_MSG);
msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
if (msg == NULL) if (msg == NULL)
return NULL; return NULL;
...@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len) ...@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
while (len > 0) { while (len > 0) {
struct msg_msgseg *seg; struct msg_msgseg *seg;
alen = min(len, DATALEN_SEG); alen = min(len, DATALEN_SEG);
seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
if (seg == NULL) if (seg == NULL)
goto out_err; goto out_err;
*pseg = seg; *pseg = seg;
......
...@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void) ...@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void)
/* /*
* We are interested in code coverage as a function of a syscall inputs, * We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts. * so we ignore code executed in interrupts.
* The checks for whether we are in an interrupt are open-coded, because
* 1. We can't use in_interrupt() here, since it also returns true
* when we are inside local_bh_disable() section.
* 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
* since that leads to slower generated code (three separate tests,
* one for each of the flags).
*/ */
if (!t || in_interrupt()) if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
| NMI_MASK)))
return; return;
mode = READ_ONCE(t->kcov_mode); mode = READ_ONCE(t->kcov_mode);
if (mode == KCOV_MODE_TRACE) { if (mode == KCOV_MODE_TRACE) {
......
...@@ -198,6 +198,7 @@ config FRAME_WARN ...@@ -198,6 +198,7 @@ config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)" int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192 range 0 8192
default 0 if KASAN default 0 if KASAN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1024 if !64BIT default 1024 if !64BIT
default 2048 if 64BIT default 2048 if 64BIT
help help
......
...@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, ...@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk; struct gen_pool_chunk *chunk;
unsigned long addr = 0; unsigned long addr = 0;
int order = pool->min_alloc_order; int order = pool->min_alloc_order;
int nbits, start_bit = 0, end_bit, remain; int nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi()); BUG_ON(in_nmi());
...@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, ...@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
if (size > atomic_read(&chunk->avail)) if (size > atomic_read(&chunk->avail))
continue; continue;
start_bit = 0;
end_bit = chunk_size(chunk) >> order; end_bit = chunk_size(chunk) >> order;
retry: retry:
start_bit = algo(chunk->bits, end_bit, start_bit, start_bit = algo(chunk->bits, end_bit, start_bit,
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
STACK_ALLOC_ALIGN) STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
#define STACK_ALLOC_SLABS_CAP 1024 #define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \ #define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
......
...@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void) ...@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
scan_block(task_stack_page(p), task_stack_page(p) + void *stack = try_get_task_stack(p);
THREAD_SIZE, NULL); if (stack) {
scan_block(stack, stack + THREAD_SIZE, NULL);
put_task_stack(p);
}
} while_each_thread(g, p); } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
......
...@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, ...@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
err = memcg_init_list_lru(lru, memcg_aware); err = memcg_init_list_lru(lru, memcg_aware);
if (err) { if (err) {
kfree(lru->node); kfree(lru->node);
/* Do this so a list_lru_destroy() doesn't crash: */
lru->node = NULL;
goto out; goto out;
} }
......
...@@ -1917,6 +1917,15 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1917,6 +1917,15 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
current->flags & PF_EXITING)) current->flags & PF_EXITING))
goto force; goto force;
/*
* Prevent unbounded recursion when reclaim operations need to
* allocate memory. This might exceed the limits temporarily,
* but we prefer facilitating memory reclaim and getting back
* under the limit over triggering OOM kills in these cases.
*/
if (unlikely(current->flags & PF_MEMALLOC))
goto force;
if (unlikely(task_in_memcg_oom(current))) if (unlikely(task_in_memcg_oom(current)))
goto nomem; goto nomem;
......
...@@ -4224,7 +4224,7 @@ static void show_migration_types(unsigned char type) ...@@ -4224,7 +4224,7 @@ static void show_migration_types(unsigned char type)
} }
*p = '\0'; *p = '\0';
printk("(%s) ", tmp); printk(KERN_CONT "(%s) ", tmp);
} }
/* /*
...@@ -4335,7 +4335,8 @@ void show_free_areas(unsigned int filter) ...@@ -4335,7 +4335,8 @@ void show_free_areas(unsigned int filter)
free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
show_node(zone); show_node(zone);
printk("%s" printk(KERN_CONT
"%s"
" free:%lukB" " free:%lukB"
" min:%lukB" " min:%lukB"
" low:%lukB" " low:%lukB"
...@@ -4382,8 +4383,8 @@ void show_free_areas(unsigned int filter) ...@@ -4382,8 +4383,8 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_FREE_CMA_PAGES))); K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:"); printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++) for (i = 0; i < MAX_NR_ZONES; i++)
printk(" %ld", zone->lowmem_reserve[i]); printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
printk("\n"); printk(KERN_CONT "\n");
} }
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
...@@ -4394,7 +4395,7 @@ void show_free_areas(unsigned int filter) ...@@ -4394,7 +4395,7 @@ void show_free_areas(unsigned int filter)
if (skip_free_areas_node(filter, zone_to_nid(zone))) if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue; continue;
show_node(zone); show_node(zone);
printk("%s: ", zone->name); printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
...@@ -4412,11 +4413,12 @@ void show_free_areas(unsigned int filter) ...@@ -4412,11 +4413,12 @@ void show_free_areas(unsigned int filter)
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
printk("%lu*%lukB ", nr[order], K(1UL) << order); printk(KERN_CONT "%lu*%lukB ",
nr[order], K(1UL) << order);
if (nr[order]) if (nr[order])
show_migration_types(types[order]); show_migration_types(types[order]);
} }
printk("= %lukB\n", K(total)); printk(KERN_CONT "= %lukB\n", K(total));
} }
hugetlb_show_meminfo(); hugetlb_show_meminfo();
......
...@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) ...@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
spin_lock_init(&parent->list_lock); spin_lock_init(&parent->list_lock);
parent->free_objects = 0; parent->free_objects = 0;
parent->free_touched = 0; parent->free_touched = 0;
parent->num_slabs = 0;
} }
#define MAKE_LIST(cachep, listp, slab, nodeid) \ #define MAKE_LIST(cachep, listp, slab, nodeid) \
...@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, ...@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* guaranteed to be valid until irq is re-enabled, because it will be * guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched(). * freed after synchronize_sched().
*/ */
if (force_change) if (old_shared && force_change)
synchronize_sched(); synchronize_sched();
fail: fail:
...@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) { for_each_kmem_cache_node(cachep, node, n) {
unsigned long active_objs = 0, num_objs = 0, free_objects = 0; unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0; unsigned long active_slabs = 0, num_slabs = 0;
unsigned long num_slabs_partial = 0, num_slabs_free = 0;
unsigned long num_slabs_full;
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->slabs_full, lru) { num_slabs = n->num_slabs;
active_objs += cachep->num;
active_slabs++;
}
list_for_each_entry(page, &n->slabs_partial, lru) { list_for_each_entry(page, &n->slabs_partial, lru) {
active_objs += page->active; active_objs += page->active;
active_slabs++; num_slabs_partial++;
} }
list_for_each_entry(page, &n->slabs_free, lru) list_for_each_entry(page, &n->slabs_free, lru)
num_slabs++; num_slabs_free++;
free_objects += n->free_objects; free_objects += n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags); spin_unlock_irqrestore(&n->list_lock, flags);
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num; num_objs = num_slabs * cachep->num;
active_slabs = num_slabs - num_slabs_free;
num_slabs_full = num_slabs -
(num_slabs_partial + num_slabs_free);
active_objs += (num_slabs_full * cachep->num);
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
node, active_slabs, num_slabs, active_objs, num_objs, node, active_slabs, num_slabs, active_objs, num_objs,
free_objects); free_objects);
...@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache,
page = list_entry(p, struct page, lru); page = list_entry(p, struct page, lru);
list_del(&page->lru); list_del(&page->lru);
n->num_slabs--;
/* /*
* Safe to drop the lock. The slab is no longer linked * Safe to drop the lock. The slab is no longer linked
* to the cache. * to the cache.
...@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) ...@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
list_add_tail(&page->lru, &(n->slabs_free)); list_add_tail(&page->lru, &(n->slabs_free));
else else
fixup_slab_list(cachep, n, page, &list); fixup_slab_list(cachep, n, page, &list);
n->num_slabs++;
STATS_INC_GROWN(cachep); STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active; n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
...@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, ...@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = list_last_entry(&n->slabs_free, struct page, lru); page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list); list_move(&page->lru, list);
n->num_slabs--;
} }
} }
...@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
unsigned long num_objs; unsigned long num_objs;
unsigned long active_slabs = 0; unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0; unsigned long num_slabs, free_objects = 0, shared_avail = 0;
unsigned long num_slabs_partial = 0, num_slabs_free = 0;
unsigned long num_slabs_full = 0;
const char *name; const char *name;
char *error = NULL; char *error = NULL;
int node; int node;
...@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on(); check_irq_on();
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(page, &n->slabs_full, lru) { num_slabs += n->num_slabs;
if (page->active != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
list_for_each_entry(page, &n->slabs_partial, lru) { list_for_each_entry(page, &n->slabs_partial, lru) {
if (page->active == cachep->num && !error) if (page->active == cachep->num && !error)
error = "slabs_partial accounting error"; error = "slabs_partial accounting error";
if (!page->active && !error) if (!page->active && !error)
error = "slabs_partial accounting error"; error = "slabs_partial accounting error";
active_objs += page->active; active_objs += page->active;
active_slabs++; num_slabs_partial++;
} }
list_for_each_entry(page, &n->slabs_free, lru) { list_for_each_entry(page, &n->slabs_free, lru) {
if (page->active && !error) if (page->active && !error)
error = "slabs_free accounting error"; error = "slabs_free accounting error";
num_slabs++; num_slabs_free++;
} }
free_objects += n->free_objects; free_objects += n->free_objects;
if (n->shared) if (n->shared)
shared_avail += n->shared->avail; shared_avail += n->shared->avail;
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
} }
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num; num_objs = num_slabs * cachep->num;
active_slabs = num_slabs - num_slabs_free;
num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
active_objs += (num_slabs_full * cachep->num);
if (num_objs - active_objs != free_objects && !error) if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error"; error = "free_objects accounting error";
......
...@@ -432,6 +432,7 @@ struct kmem_cache_node { ...@@ -432,6 +432,7 @@ struct kmem_cache_node {
struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full; struct list_head slabs_full;
struct list_head slabs_free; struct list_head slabs_free;
unsigned long num_slabs;
unsigned long free_objects; unsigned long free_objects;
unsigned int free_limit; unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */ unsigned int colour_next; /* Per-node cache coloring */
......
...@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, ...@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.gfp_mask, sc.gfp_mask,
sc.reclaim_idx); sc.reclaim_idx);
current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc); nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment