Commit 0abb868b authored by Mathieu Poirier's avatar Mathieu Poirier Committed by Arnaldo Carvalho de Melo

perf cs-etm: Move tid/pid to traceid_queue

The tid/pid fields of structure cs_etm_queue are CPU dependent and as
such need to be part of the cs_etm_traceid_queue in order to support
CPU-wide trace scenarios.
Signed-off-by: default avatarMathieu Poirier <mathieu.poirier@linaro.org>
Tested-by: default avatarLeo Yan <leo.yan@linaro.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20190524173508.29044-13-mathieu.poirier@linaro.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 3c21d7d8
...@@ -62,6 +62,7 @@ struct cs_etm_auxtrace { ...@@ -62,6 +62,7 @@ struct cs_etm_auxtrace {
struct cs_etm_traceid_queue { struct cs_etm_traceid_queue {
u8 trace_chan_id; u8 trace_chan_id;
pid_t pid, tid;
u64 period_instructions; u64 period_instructions;
size_t last_branch_pos; size_t last_branch_pos;
union perf_event *event_buf; union perf_event *event_buf;
...@@ -78,7 +79,6 @@ struct cs_etm_queue { ...@@ -78,7 +79,6 @@ struct cs_etm_queue {
struct cs_etm_decoder *decoder; struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer; struct auxtrace_buffer *buffer;
unsigned int queue_nr; unsigned int queue_nr;
pid_t pid, tid;
u64 offset; u64 offset;
const unsigned char *buf; const unsigned char *buf;
size_t buf_len, buf_used; size_t buf_len, buf_used;
...@@ -159,10 +159,14 @@ static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq, ...@@ -159,10 +159,14 @@ static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
u8 trace_chan_id) u8 trace_chan_id)
{ {
int rc = -ENOMEM; int rc = -ENOMEM;
struct auxtrace_queue *queue;
struct cs_etm_auxtrace *etm = etmq->etm; struct cs_etm_auxtrace *etm = etmq->etm;
cs_etm__clear_packet_queue(&tidq->packet_queue); cs_etm__clear_packet_queue(&tidq->packet_queue);
queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
tidq->tid = queue->tid;
tidq->pid = -1;
tidq->trace_chan_id = trace_chan_id; tidq->trace_chan_id = trace_chan_id;
tidq->packet = zalloc(sizeof(struct cs_etm_packet)); tidq->packet = zalloc(sizeof(struct cs_etm_packet));
...@@ -598,8 +602,6 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, ...@@ -598,8 +602,6 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
queue->priv = etmq; queue->priv = etmq;
etmq->etm = etm; etmq->etm = etm;
etmq->queue_nr = queue_nr; etmq->queue_nr = queue_nr;
etmq->tid = queue->tid;
etmq->pid = -1;
etmq->offset = 0; etmq->offset = 0;
out: out:
...@@ -817,23 +819,19 @@ cs_etm__get_trace(struct cs_etm_queue *etmq) ...@@ -817,23 +819,19 @@ cs_etm__get_trace(struct cs_etm_queue *etmq)
} }
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue) struct auxtrace_queue *queue,
struct cs_etm_traceid_queue *tidq)
{ {
struct cs_etm_traceid_queue *tidq;
struct cs_etm_queue *etmq = queue->priv;
tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
/* CPU-wide tracing isn't supported yet */ /* CPU-wide tracing isn't supported yet */
if (queue->tid == -1) if (queue->tid == -1)
return; return;
if ((!tidq->thread) && (etmq->tid != -1)) if ((!tidq->thread) && (tidq->tid != -1))
tidq->thread = machine__find_thread(etm->machine, -1, tidq->thread = machine__find_thread(etm->machine, -1,
etmq->tid); tidq->tid);
if (tidq->thread) if (tidq->thread)
etmq->pid = tidq->thread->pid_; tidq->pid = tidq->thread->pid_;
} }
static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
...@@ -850,8 +848,8 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, ...@@ -850,8 +848,8 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
event->sample.header.size = sizeof(struct perf_event_header); event->sample.header.size = sizeof(struct perf_event_header);
sample.ip = addr; sample.ip = addr;
sample.pid = etmq->pid; sample.pid = tidq->pid;
sample.tid = etmq->tid; sample.tid = tidq->tid;
sample.id = etmq->etm->instructions_id; sample.id = etmq->etm->instructions_id;
sample.stream_id = etmq->etm->instructions_id; sample.stream_id = etmq->etm->instructions_id;
sample.period = period; sample.period = period;
...@@ -909,8 +907,8 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq, ...@@ -909,8 +907,8 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
event->sample.header.size = sizeof(struct perf_event_header); event->sample.header.size = sizeof(struct perf_event_header);
sample.ip = ip; sample.ip = ip;
sample.pid = etmq->pid; sample.pid = tidq->pid;
sample.tid = etmq->tid; sample.tid = tidq->tid;
sample.addr = cs_etm__first_executed_instr(tidq->packet); sample.addr = cs_etm__first_executed_instr(tidq->packet);
sample.id = etmq->etm->branches_id; sample.id = etmq->etm->branches_id;
sample.stream_id = etmq->etm->branches_id; sample.stream_id = etmq->etm->branches_id;
...@@ -1758,9 +1756,19 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, ...@@ -1758,9 +1756,19 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
for (i = 0; i < queues->nr_queues; i++) { for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &etm->queues.queue_array[i]; struct auxtrace_queue *queue = &etm->queues.queue_array[i];
struct cs_etm_queue *etmq = queue->priv; struct cs_etm_queue *etmq = queue->priv;
struct cs_etm_traceid_queue *tidq;
if (!etmq)
continue;
tidq = cs_etm__etmq_get_traceid_queue(etmq,
CS_ETM_PER_THREAD_TRACEID);
if (!tidq)
continue;
if (etmq && ((tid == -1) || (etmq->tid == tid))) { if ((tid == -1) || (tidq->tid == tid)) {
cs_etm__set_pid_tid_cpu(etm, queue); cs_etm__set_pid_tid_cpu(etm, queue, tidq);
cs_etm__run_decoder(etmq); cs_etm__run_decoder(etmq);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment