Commit 4cd2bb12 authored by Quanyang Wang's avatar Quanyang Wang Committed by Thomas Gleixner

time/sched_clock: Mark sched_clock_read_begin/retry() as notrace

Since sched_clock_read_begin() and sched_clock_read_retry() are called
by notrace function sched_clock(), they shouldn't be traceable either,
or else ftrace_graph_caller will run into a dead loop on the path
as below (arm for instance):

  ftrace_graph_caller()
    prepare_ftrace_return()
      function_graph_enter()
        ftrace_push_return_trace()
          trace_clock_local()
            sched_clock()
              sched_clock_read_begin/retry()

Fixes: 1b86abc1 ("sched_clock: Expose struct clock_read_data")
Signed-off-by: default avatarQuanyang Wang <quanyang.wang@windriver.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20200929082027.16787-1-quanyang.wang@windriver.com
parent 3650b228
...@@ -68,13 +68,13 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) ...@@ -68,13 +68,13 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift; return (cyc * mult) >> shift;
} }
struct clock_read_data *sched_clock_read_begin(unsigned int *seq) notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
{ {
*seq = raw_read_seqcount_latch(&cd.seq); *seq = raw_read_seqcount_latch(&cd.seq);
return cd.read_data + (*seq & 1); return cd.read_data + (*seq & 1);
} }
int sched_clock_read_retry(unsigned int seq) notrace int sched_clock_read_retry(unsigned int seq)
{ {
return read_seqcount_latch_retry(&cd.seq, seq); return read_seqcount_latch_retry(&cd.seq, seq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment