Commit 31bb5feb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc mm fixes from Andrew Morton:
 "7 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: memcontrol: fix percpu vmstats and vmevents flush
  mm, memcg: do not set reclaim_state on soft limit reclaim
  mailmap: add aliases for Dmitry Safonov
  mm/z3fold.c: fix lock/unlock imbalance in z3fold_page_isolate
  mm, memcg: partially revert "mm/memcontrol.c: keep local VM counters in sync with the hierarchical ones"
  mm/zsmalloc.c: fix build when CONFIG_COMPACTION=n
  mm: memcontrol: flush percpu slab vmstats on kmem offlining
parents e0f14b8c 6c1c2808
......@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
......
......@@ -215,8 +215,9 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
* memcg_flush_percpu_vmstats() first. */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
......
......@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update memcg */
__mod_memcg_state(memcg, idx, val);
/* Update lruvec */
__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
struct mem_cgroup_per_node *pi;
/*
* Batch local counters to keep them in sync with
* the hierarchical ones.
*/
__this_cpu_add(pn->lruvec_stat_local->count[idx], x);
for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
atomic_long_add(x, &pi->lruvec_stat[idx]);
x = 0;
......@@ -3260,37 +3258,49 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
}
}
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
{
unsigned long stat[MEMCG_NR_STAT];
struct mem_cgroup *mi;
int node, cpu, i;
int min_idx, max_idx;
for (i = 0; i < MEMCG_NR_STAT; i++)
if (slab_only) {
min_idx = NR_SLAB_RECLAIMABLE;
max_idx = NR_SLAB_UNRECLAIMABLE;
} else {
min_idx = 0;
max_idx = MEMCG_NR_STAT;
}
for (i = min_idx; i < max_idx; i++)
stat[i] = 0;
for_each_online_cpu(cpu)
for (i = 0; i < MEMCG_NR_STAT; i++)
stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
for (i = min_idx; i < max_idx; i++)
stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
for (i = 0; i < MEMCG_NR_STAT; i++)
for (i = min_idx; i < max_idx; i++)
atomic_long_add(stat[i], &mi->vmstats[i]);
if (!slab_only)
max_idx = NR_VM_NODE_STAT_ITEMS;
for_each_node(node) {
struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
struct mem_cgroup_per_node *pi;
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
for (i = min_idx; i < max_idx; i++)
stat[i] = 0;
for_each_online_cpu(cpu)
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
stat[i] += raw_cpu_read(
pn->lruvec_stat_cpu->count[i]);
for (i = min_idx; i < max_idx; i++)
stat[i] += per_cpu(
pn->lruvec_stat_cpu->count[i], cpu);
for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
for (i = min_idx; i < max_idx; i++)
atomic_long_add(stat[i], &pi->lruvec_stat[i]);
}
}
......@@ -3306,8 +3316,8 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
for_each_online_cpu(cpu)
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
events[i] += raw_cpu_read(
memcg->vmstats_percpu->events[i]);
events[i] += per_cpu(memcg->vmstats_percpu->events[i],
cpu);
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
......@@ -3363,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
if (!parent)
parent = root_mem_cgroup;
/*
* Deactivate and reparent kmem_caches. Then flush percpu
* slab statistics to have precise values at the parent and
* all ancestor levels. It's required to keep slab stats
* accurate after the reparenting of kmem_caches.
*/
memcg_deactivate_kmem_caches(memcg, parent);
memcg_flush_percpu_vmstats(memcg, true);
kmemcg_id = memcg->kmemcg_id;
BUG_ON(kmemcg_id < 0);
......@@ -4740,7 +4757,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
* Flush percpu vmstats and vmevents to guarantee the value correctness
* on parent's and all ancestor levels.
*/
memcg_flush_percpu_vmstats(memcg);
memcg_flush_percpu_vmstats(memcg, false);
memcg_flush_percpu_vmevents(memcg);
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
......
......@@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#ifdef CONFIG_MEMCG
/* Only used by soft limit reclaim. Do not reuse for anything else. */
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
......@@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
};
unsigned long lru_pages;
set_task_reclaim_state(current, &sc.reclaim_state);
WARN_ON_ONCE(!current->reclaim_state);
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
......@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
set_task_reclaim_state(current, NULL);
*nr_scanned = sc.nr_scanned;
return sc.nr_reclaimed;
......
......@@ -1406,6 +1406,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
* should freak out.
*/
WARN(1, "Z3fold is experiencing kref problems\n");
z3fold_page_unlock(zhdr);
return false;
}
z3fold_page_unlock(zhdr);
......
......@@ -2412,7 +2412,9 @@ struct zs_pool *zs_create_pool(const char *name)
if (!pool->name)
goto err;
#ifdef CONFIG_COMPACTION
init_waitqueue_head(&pool->migration_wait);
#endif
if (create_cache(pool))
goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment