Commit f51bdd2e authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

mm: fix a vmscan warning

I get the below warning:

  BUG: using smp_processor_id() in preemptible [00000000] code: bash/746
  caller is native_sched_clock+0x37/0x6e
  Pid: 746, comm: bash Tainted: G        W   3.0.0+ #254
  Call Trace:
   [<ffffffff813435c6>] debug_smp_processor_id+0xc2/0xdc
   [<ffffffff8104158d>] native_sched_clock+0x37/0x6e
   [<ffffffff81116219>] try_to_free_mem_cgroup_pages+0x7d/0x270
   [<ffffffff8114f1f8>] mem_cgroup_force_empty+0x24b/0x27a
   [<ffffffff8114ff21>] ? sys_close+0x38/0x138
   [<ffffffff8114ff21>] ? sys_close+0x38/0x138
   [<ffffffff8114f257>] mem_cgroup_force_empty_write+0x17/0x19
   [<ffffffff810c72fb>] cgroup_file_write+0xa8/0xba
   [<ffffffff811522d2>] vfs_write+0xb3/0x138
   [<ffffffff8115241a>] sys_write+0x4a/0x71
   [<ffffffff8114ffd9>] ? sys_close+0xf0/0x138
   [<ffffffff8176deab>] system_call_fastpath+0x16/0x1b

sched_clock() can't be used with preempt enabled.  And we don't need
fast approach to get clock here, so let's use ktime API.
Signed-off-by: default avatarShaohua Li <shaohua.li@intel.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7e8aa048
...@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, ...@@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.mem_cgroup = mem, .mem_cgroup = mem,
.memcg_record = rec, .memcg_record = rec,
}; };
unsigned long start, end; ktime_t start, end;
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
...@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, ...@@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
sc.may_writepage, sc.may_writepage,
sc.gfp_mask); sc.gfp_mask);
start = sched_clock(); start = ktime_get();
/* /*
* NOTE: Although we can get the priority field, using it * NOTE: Although we can get the priority field, using it
* here is not a good idea, since it limits the pages we can scan. * here is not a good idea, since it limits the pages we can scan.
...@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, ...@@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
* the priority and make it zero. * the priority and make it zero.
*/ */
shrink_zone(0, zone, &sc); shrink_zone(0, zone, &sc);
end = sched_clock(); end = ktime_get();
if (rec) if (rec)
rec->elapsed += end - start; rec->elapsed += ktime_to_ns(ktime_sub(end, start));
*scanned = sc.nr_scanned; *scanned = sc.nr_scanned;
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
...@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{ {
struct zonelist *zonelist; struct zonelist *zonelist;
unsigned long nr_reclaimed; unsigned long nr_reclaimed;
unsigned long start, end; ktime_t start, end;
int nid; int nid;
struct scan_control sc = { struct scan_control sc = {
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
...@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.gfp_mask = sc.gfp_mask, .gfp_mask = sc.gfp_mask,
}; };
start = sched_clock(); start = ktime_get();
/* /*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
* take care of from where we get pages. So the node where we start the * take care of from where we get pages. So the node where we start the
...@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
sc.gfp_mask); sc.gfp_mask);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
end = sched_clock(); end = ktime_get();
if (rec) if (rec)
rec->elapsed += end - start; rec->elapsed += ktime_to_ns(ktime_sub(end, start));
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment