Commit 050716fa authored by David Mosberger's avatar David Mosberger

ia64: perfmon update.

Here is a ChangeLog for the patch:

        - The perfmon core will invoke the sampling module handler
          routine once for each overflowed PMD. When multiple PMDs
          overflow at the same time (with the same PMU interrupt),
          then up to 64 distinct calls can happen. A common timestamp
          parameter allows the module to identify this kind of
          entries.

        - Changed the module ovfl_ctrl arguments to simplify the reset
          field. Now it is a simple boolean.

        - Updated perfmon.h to convert the "set" field to ushort from
          uint.  Other structure updates to get better layout.

        - Update perfmon_default_smpl.h to reflect the change in
          overflow processing mentioned above.

        - Cleanup some state checking code to use switch-case instead
          of if-then with macros. Make the code more readable and
          easier to optmize for gcc. Thanks to David for the
          suggestion.

        - Added extra safety checks on pfm_context_load() to verify
          that the task actually exists.

        - The default sampling format module now supports the
          fmt_restart_active callbacks. Patch from David.
parent 11d03417
......@@ -256,6 +256,8 @@ typedef struct {
/*
* 64-bit software counter structure
*
* the next_reset_type is applied to the next call to pfm_reset_regs()
*/
typedef struct {
unsigned long val; /* virtual 64bit counter value */
......@@ -267,7 +269,7 @@ typedef struct {
unsigned long seed; /* seed for random-number generator */
unsigned long mask; /* mask for random-number generator */
unsigned int flags; /* notify/do not notify */
unsigned int reserved; /* for future use */
int next_reset_type;/* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
unsigned long eventid; /* overflow event identifier */
} pfm_counter_t;
......@@ -557,7 +559,6 @@ static struct vm_operations_struct pfm_vm_ops={
close: pfm_vm_close
};
#define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
......@@ -648,7 +649,6 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number);
/* forward declaration */
static struct file_operations pfm_file_ops;
......@@ -1532,7 +1532,7 @@ pfm_lseek(struct file *file, loff_t offset, int whence)
}
static ssize_t
pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
pfm_context_t *ctx;
pfm_msg_t *msg;
......@@ -1628,18 +1628,6 @@ pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
return ret;
}
static ssize_t
pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
{
int oldvar, ret;
oldvar = pfm_debug_var;
pfm_debug_var = pfm_sysctl.debug_pfm_read;
ret = pfm_do_read(filp, buf, size, ppos);
pfm_debug_var = oldvar;
return ret;
}
static ssize_t
pfm_write(struct file *file, const char *ubuf,
size_t size, loff_t *ppos)
......@@ -2759,20 +2747,18 @@ pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag));
if (flag == PFM_PMD_NO_RESET) return;
/*
* now restore reset value on sampling overflowed counters
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if (mask & 0x1) {
if ((mask & 0x1UL) == 0UL) continue;
ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
}
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
}
/*
......@@ -2811,16 +2797,16 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if (mask & 0x1) {
if ((mask & 0x1UL) == 0UL) continue;
val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
pfm_write_soft_counter(ctx, i, val);
}
}
/*
* Now take care of resetting the other registers
......@@ -2861,7 +2847,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (is_loaded) {
thread = &ctx->ctx_task->thread;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
......@@ -3569,51 +3555,49 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
struct task_struct *task;
pfm_buffer_fmt_t *fmt;
pfm_ovfl_ctrl_t rst_ctrl;
int is_loaded;
int state, is_system;
int ret = 0;
state = ctx->ctx_state;
fmt = ctx->ctx_buf_fmt;
is_loaded = CTX_IS_LOADED(ctx);
if (is_loaded && CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) goto proceed;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
/*
* restarting a terminated context is a nop
*/
if (unlikely(CTX_IS_TERMINATED(ctx))) {
switch(state) {
case PFM_CTX_MASKED:
break;
case PFM_CTX_LOADED:
if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
/* fall through */
case PFM_CTX_UNLOADED:
case PFM_CTX_ZOMBIE:
DPRINT(("invalid state=%d\n", state));
return -EBUSY;
case PFM_CTX_TERMINATED:
DPRINT(("context is terminated, nothing to do\n"));
return 0;
default:
DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
return -EINVAL;
}
/*
* LOADED, UNLOADED, ZOMBIE
*/
if (CTX_IS_MASKED(ctx) == 0) return -EBUSY;
proceed:
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY;
}
task = PFM_CTX_TASK(ctx);
/* sanity check */
if (unlikely(task == NULL)) {
printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
return -EINVAL;
}
/*
* this test is always true in system wide mode
*/
if (task == current) {
if (task == current || is_system) {
fmt = ctx->ctx_buf_fmt;
......@@ -3625,25 +3609,23 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
prefetch(ctx->ctx_smpl_hdr);
rst_ctrl.stop_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
if (is_loaded)
if (state == PFM_CTX_LOADED)
ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
else
ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else {
rst_ctrl.stop_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
if (rst_ctrl.reset_pmds)
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, rst_ctrl.reset_pmds);
if (rst_ctrl.bits.reset_ovfl_pmds)
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
if (rst_ctrl.stop_monitoring == 0) {
if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring for [%d]\n", task->pid));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task);
......@@ -3686,7 +3668,6 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
PFM_SET_WORK_PENDING(task, 1);
pfm_set_task_notify(task);
......@@ -3707,10 +3688,9 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_debug_var = pfm_sysctl.debug;
printk(KERN_ERR "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m==0) {
if (m == 0) {
memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
}
......@@ -3718,7 +3698,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
return 0;
}
static int
pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
......@@ -3926,6 +3905,7 @@ static int
pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct pt_regs *tregs;
struct task_struct *task = PFM_CTX_TASK(ctx);
if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL;
......@@ -3975,7 +3955,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* per-task mode
*/
if (ctx->ctx_task == current) {
if (task == current) {
/* stop monitoring at kernel level */
pfm_clear_psr_up();
......@@ -3984,7 +3964,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
ia64_psr(regs)->up = 0;
} else {
tregs = ia64_task_regs(ctx->ctx_task);
tregs = ia64_task_regs(task);
/*
* stop monitoring at the user level
......@@ -3995,7 +3975,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* monitoring disabled in kernel at next reschedule
*/
ctx->ctx_saved_psr_up = 0;
printk("pfm_stop: current [%d] task=[%d]\n", current->pid, ctx->ctx_task->pid);
DPRINT(("pfm_stop: current [%d] task=[%d]\n", current->pid, task->pid));
}
return 0;
}
......@@ -4106,6 +4086,28 @@ pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs
return ret;
}
static int
pfm_check_task_exist(pfm_context_t *ctx)
{
struct task_struct *g, *t;
int ret = -ESRCH;
read_lock(&tasklist_lock);
do_each_thread (g, t) {
if (t->thread.pfm_context == ctx) {
ret = 0;
break;
}
} while_each_thread (g, t);
read_unlock(&tasklist_lock);
DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
return ret;
}
static int
pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
......@@ -4316,8 +4318,17 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
* release task, there is now a link with the context
*/
if (ctx->ctx_fl_system == 0 && task != current) pfm_put_task(task);
if (ctx->ctx_fl_system == 0 && task != current) {
pfm_put_task(task);
if (ret == 0) {
ret = pfm_check_task_exist(ctx);
if (ret) {
CTX_UNLOADED(ctx);
ctx->ctx_task = NULL;
}
}
}
return ret;
}
......@@ -4334,7 +4345,7 @@ static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
static int
pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
struct task_struct *task = ctx->ctx_task;
struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
......@@ -4416,8 +4427,8 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
* cancel user level control
*/
ia64_psr(regs)->sp = 1;
DPRINT(("setting psr.sp for [%d]\n", task->pid));
DPRINT(("setting psr.sp for [%d]\n", task->pid));
}
/*
* save PMDs to context
......@@ -4490,7 +4501,7 @@ pfm_exit_thread(struct task_struct *task)
pfm_context_t *ctx;
unsigned long flags;
struct pt_regs *regs = ia64_task_regs(task);
int ret;
int ret, state;
int free_ok = 0;
ctx = PFM_GET_CTX(task);
......@@ -4499,16 +4510,16 @@ pfm_exit_thread(struct task_struct *task)
DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
state = ctx->ctx_state;
switch(state) {
case PFM_CTX_UNLOADED:
/*
* come here only if attached
*/
if (unlikely(CTX_IS_UNLOADED(ctx))) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
goto skip_all;
}
if (CTX_IS_LOADED(ctx) || CTX_IS_MASKED(ctx)) {
break;
case PFM_CTX_LOADED:
case PFM_CTX_MASKED:
ret = pfm_context_unload(ctx, NULL, 0, regs);
if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
......@@ -4517,8 +4528,8 @@ pfm_exit_thread(struct task_struct *task)
DPRINT(("ctx terminated by [%d]\n", task->pid));
pfm_end_notify_user(ctx);
} else if (CTX_IS_ZOMBIE(ctx)) {
break;
case PFM_CTX_ZOMBIE:
pfm_clear_psr_up();
BUG_ON(ctx->ctx_smpl_hdr);
......@@ -4526,11 +4537,15 @@ pfm_exit_thread(struct task_struct *task)
pfm_force_cleanup(ctx, regs);
free_ok = 1;
break;
default:
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
break;
}
{ u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(GET_PMU_OWNER());
}
skip_all:
UNPROTECT_CTX(ctx, flags);
/*
......@@ -4660,7 +4675,7 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
/*
* reject any call if perfmon was disabled at initialization time
*/
mask*/
if (PFM_IS_DISABLED()) return -ENOSYS;
if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) {
......@@ -4780,6 +4795,8 @@ sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, lon
error_args:
if (args_k) kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
return ret;
}
......@@ -4796,22 +4813,22 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg
*/
if (CTX_HAS_SMPL(ctx)) {
rst_ctrl.stop_monitoring = 1;
rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
/* XXX: check return value */
if (fmt->fmt_restart)
ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
} else {
rst_ctrl.stop_monitoring = 0;
rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
rst_ctrl.bits.mask_monitoring = 0;
rst_ctrl.bits.reset_ovfl_pmds = 1;
}
if (ret == 0) {
if (rst_ctrl.reset_pmds != PFM_PMD_NO_RESET)
pfm_reset_regs(ctx, &ovfl_regs, rst_ctrl.reset_pmds);
if (rst_ctrl.stop_monitoring == 0) {
if (rst_ctrl.bits.reset_ovfl_pmds) {
pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
}
if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring\n"));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current);
} else {
......@@ -4981,11 +4998,12 @@ pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
msg->pfm_ovfl_msg.msg_active_set = 0;
msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
}
DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n",
......@@ -5031,9 +5049,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
pfm_ovfl_arg_t ovfl_arg;
unsigned long mask;
unsigned long old_val;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL;
unsigned long tstamp;
pfm_ovfl_ctrl_t ovfl_ctrl;
unsigned int i, j, has_smpl, first_pmd = ~0U;
unsigned int i, has_smpl;
int must_notify = 0;
if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring;
......@@ -5043,9 +5062,11 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/
if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
tstamp = ia64_get_itc();
mask = pmc0 >> PMU_FIRST_COUNTER;
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s"
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
"used_pmds=0x%lx reload_pmcs=0x%lx\n",
pmc0,
task ? task->pid: -1,
......@@ -5081,91 +5102,132 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* check for overflow condition
*/
if (likely(old_val > ctx->ctx_pmds[i].val)) {
ovfl_pmds |= 1UL << i;
/*
* keep track of pmds of interest for samples
*/
if (has_smpl) {
if (first_pmd == ~0U) first_pmd = i;
smpl_pmds |= ctx->ctx_pmds[i].smpl_pmds[0];
}
if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
}
DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx first_pmd=%u smpl_pmds=0x%lx\n",
DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx "
"ovfl_notify=0x%lx\n",
i, ctx->ctx_pmds[i].val, old_val,
ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, first_pmd, smpl_pmds));
ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify));
}
ovfl_ctrl.notify_user = ovfl_notify ? 1 : 0;
ovfl_ctrl.reset_pmds = ovfl_pmds && ovfl_notify == 0UL ? 1 : 0;
ovfl_ctrl.block = ovfl_notify ? 1 : 0;
ovfl_ctrl.stop_monitoring = ovfl_notify ? 1 : 0;
/*
* there was no 64-bit overflow, nothing else to do
*/
if (ovfl_pmds == 0UL) return;
/*
* reset all control bits
*/
ovfl_ctrl.val = 0;
/*
* when a overflow is detected, check for sampling buffer, if present, invoke
* record() callback.
* if a sampling format module exists, then we "cache" the overflow by
* calling the module's handler() routine.
*/
if (ovfl_pmds && has_smpl) {
unsigned long start_cycles;
if (has_smpl) {
unsigned long start_cycles, end_cycles;
unsigned long pmd_mask, smpl_pmds;
int j, k, ret = 0;
int this_cpu = smp_processor_id();
ovfl_arg.ovfl_pmds[0] = ovfl_pmds;
ovfl_arg.ovfl_notify[0] = ovfl_notify;
ovfl_arg.ovfl_ctrl = ovfl_ctrl;
ovfl_arg.smpl_pmds[0] = smpl_pmds;
pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
prefetch(ctx->ctx_smpl_hdr);
ovfl_arg.pmd_value = ctx->ctx_pmds[first_pmd].val;
ovfl_arg.pmd_last_reset = ctx->ctx_pmds[first_pmd].lval;
ovfl_arg.pmd_eventid = ctx->ctx_pmds[first_pmd].eventid;
for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
mask = 1UL << i;
if ((pmd_mask & 0x1) == 0) continue;
ovfl_arg.ovfl_pmd = (unsigned char )i;
ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0;
ovfl_arg.active_set = 0;
ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */
ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
ovfl_arg.pmd_value = ctx->ctx_pmds[i].val;
ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval;
ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid;
/*
* copy values of pmds of interest. Sampling format may copy them
* into sampling buffer.
*/
if (smpl_pmds) {
for(i=0, j=0; smpl_pmds; i++, smpl_pmds >>=1) {
for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
if ((smpl_pmds & 0x1) == 0) continue;
ovfl_arg.smpl_pmds_values[j++] = PMD_IS_COUNTING(i) ? pfm_read_soft_counter(ctx, i) : ia64_get_pmd(i);
ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
}
}
pfm_stats[this_cpu].pfm_smpl_handler_calls++;
start_cycles = ia64_get_itc();
/*
* call custom buffer format record (handler) routine
*/
(*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs);
ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp);
pfm_stats[this_cpu].pfm_smpl_handler_cycles += ia64_get_itc() - start_cycles;
end_cycles = ia64_get_itc();
ovfl_pmds = ovfl_arg.ovfl_pmds[0];
ovfl_notify = ovfl_arg.ovfl_notify[0];
ovfl_ctrl = ovfl_arg.ovfl_ctrl;
/*
* For those controls, we take the union because they have
* an all or nothing behavior.
*/
ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user;
ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task;
ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring;
ovfl_ctrl.bits.reset_ovfl_pmds |= ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds; /* yes or no */
pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
}
/*
* when the module cannot handle the rest of the overflows, we abort right here
*/
if (ret && pmd_mask) {
DPRINT(("current [%d] handler aborts leftover ovfl_pmds=0x%lx\n",
current->pid,
pmd_mask<<PMU_FIRST_COUNTER));
}
} else {
/*
* when no sampling module is used, then the default
* is to notify on overflow if requested by user
*/
ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
}
if (ovfl_pmds && ovfl_ctrl.reset_pmds) {
pfm_reset_regs(ctx, &ovfl_pmds, ovfl_ctrl.reset_pmds);
/*
* if we (still) have some overflowed PMD but no notification is requested
* then we use the short reset period.
*/
if (ovfl_ctrl.bits.reset_ovfl_pmds) {
unsigned long bm = ovfl_pmds;
pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
}
if (ovfl_notify && ovfl_ctrl.notify_user) {
if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
/*
* keep track of what to reset when unblocking
*/
ctx->ctx_ovfl_regs[0] = ovfl_pmds;
if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.block) {
/*
* check for blocking context
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
/*
* set the perfmon specific checking pending work
* set the perfmon specific checking pending work for the task
*/
PFM_SET_WORK_PENDING(task, 1);
......@@ -5182,21 +5244,22 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
must_notify = 1;
}
DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx stopped=%d\n",
DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
current->pid,
GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
PFM_GET_WORK_PENDING(task),
ctx->ctx_fl_trap_reason,
ovfl_pmds,
ovfl_notify,
ovfl_ctrl.stop_monitoring ? 1 : 0));
ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
/*
* in case monitoring must be stopped, we toggle the psr bits
*/
if (ovfl_ctrl.stop_monitoring) {
if (ovfl_ctrl.bits.mask_monitoring) {
pfm_mask_monitoring(task);
CTX_MASKED(ctx);
}
/*
* send notification now
*/
......@@ -5204,7 +5267,6 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
return;
sanity_check:
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
smp_processor_id(),
......@@ -5312,7 +5374,7 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
static pfm_irq_handler_t
pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
{
unsigned long m;
unsigned long start_cycles, total_cycles;
unsigned long min, max;
int this_cpu;
int ret;
......@@ -5321,19 +5383,22 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
m = ia64_get_itc();
start_cycles = ia64_get_itc();
ret = pfm_do_interrupt_handler(irq, arg, regs);
m = ia64_get_itc() - m;
total_cycles = ia64_get_itc();
/*
* don't measure spurious interrupts
*/
if (ret == 0) {
if (m < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = m;
if (m > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = m;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += m;
if (likely(ret == 0)) {
total_cycles -= start_cycles;
if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
}
PFM_IRQ_HANDLER_RET();
}
......@@ -5488,6 +5553,10 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
pfm_set_psr_pp();
ia64_srlz_i();
}
{ unsigned long val;
val = ia64_get_pmc(4);
if ((val & (1UL<<23)) == 0UL) printk("perfmon: PMU off: pmc4=0x%lx\n", val);
}
}
void
......@@ -5750,13 +5819,6 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(GET_PMU_OWNER());
t = &task->thread;
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
/*
* possible on unload
*/
......@@ -5771,6 +5833,12 @@ pfm_load_regs (struct task_struct *task)
* access, not CPU concurrency.
*/
flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
if (unlikely(CTX_IS_ZOMBIE(ctx))) {
struct pt_regs *regs = ia64_task_regs(task);
......@@ -6133,6 +6201,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
if (is_self) task->thread.pmds[i] = pmd_val;
ctx->ctx_pmds[i].val = val;
}
}
......
......@@ -109,21 +109,15 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v
}
static int
default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs)
default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
{
pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_entry_t *ent;
void *cur, *last;
unsigned long *e;
unsigned long ovfl_mask;
unsigned long ovfl_notify;
unsigned long stamp;
unsigned int npmds, i;
/*
* some time stamp
*/
stamp = ia64_get_itc();
unsigned char ovfl_pmd;
unsigned char ovfl_notify;
if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
......@@ -133,8 +127,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr = (pfm_default_smpl_hdr_t *)buf;
cur = hdr->hdr_cur_pos;
last = hdr->hdr_last_pos;
ovfl_mask = arg->ovfl_pmds[0];
ovfl_notify = arg->ovfl_notify[0];
ovfl_pmd = arg->ovfl_pmd;
ovfl_notify = arg->ovfl_notify;
/*
* check for space against largest possibly entry.
......@@ -153,12 +147,12 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr->hdr_count++;
DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n",
DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
task->pid,
hdr->hdr_count,
cur, last,
last-cur,
ovfl_mask,
ovfl_pmd,
ovfl_notify, npmds));
/*
......@@ -172,7 +166,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
* - this is not necessarily the task controlling the session
*/
ent->pid = current->pid;
ent->cpu = smp_processor_id();
ent->ovfl_pmd = ovfl_pmd;
ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
/*
......@@ -180,13 +174,9 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
*/
ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
/*
* which registers overflowed
*/
ent->ovfl_pmds = ovfl_mask;
ent->tstamp = stamp;
ent->cpu = smp_processor_id();
ent->set = arg->active_set;
ent->reserved1 = 0;
/*
* selectively store PMDs in increasing index number
......@@ -206,14 +196,14 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
/*
* keep same ovfl_pmds, ovfl_notify
*/
arg->ovfl_ctrl.notify_user = 0;
arg->ovfl_ctrl.block = 0;
arg->ovfl_ctrl.stop_monitoring = 0;
arg->ovfl_ctrl.reset_pmds = 1;
arg->ovfl_ctrl.bits.notify_user = 0;
arg->ovfl_ctrl.bits.block_task = 0;
arg->ovfl_ctrl.bits.mask_monitoring = 0;
arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
return 0;
full:
DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify));
DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
/*
* increment number of buffer overflow.
......@@ -222,22 +212,21 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
hdr->hdr_overflows++;
/*
* if no notification is needed, then we just reset the buffer index.
* if no notification is needed, then we saturate the buffer
*/
if (ovfl_notify == 0UL) {
if (ovfl_notify == 0) {
hdr->hdr_count = 0UL;
arg->ovfl_ctrl.notify_user = 0;
arg->ovfl_ctrl.block = 0;
arg->ovfl_ctrl.stop_monitoring = 0;
arg->ovfl_ctrl.reset_pmds = 1;
arg->ovfl_ctrl.bits.notify_user = 0;
arg->ovfl_ctrl.bits.block_task = 0;
arg->ovfl_ctrl.bits.mask_monitoring = 1;
arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
} else {
/* keep same ovfl_pmds, ovfl_notify */
arg->ovfl_ctrl.notify_user = 1;
arg->ovfl_ctrl.block = 1;
arg->ovfl_ctrl.stop_monitoring = 1;
arg->ovfl_ctrl.reset_pmds = 0;
arg->ovfl_ctrl.bits.notify_user = 1;
arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
arg->ovfl_ctrl.bits.mask_monitoring = 1;
arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
}
return 0;
return -1; /* we are full, sorry */
}
static int
......@@ -250,8 +239,8 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru
hdr->hdr_count = 0UL;
hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
ctrl->stop_monitoring = 0;
ctrl->reset_pmds = PFM_PMD_LONG_RESET;
ctrl->bits.mask_monitoring = 0;
ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
return 0;
}
......@@ -272,6 +261,7 @@ static pfm_buffer_fmt_t default_fmt={
.fmt_init = default_init,
.fmt_handler = default_handler,
.fmt_restart = default_restart,
.fmt_restart_active = default_restart,
.fmt_exit = default_exit,
};
......
......@@ -72,10 +72,11 @@ typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type
typedef struct {
pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
unsigned long ctx_flags; /* noblock/block */
unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */
unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
unsigned short ctx_reserved1; /* for future use */
int ctx_fd; /* return arg: unique identification for context */
void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
unsigned long ctx_reserved[11]; /* for future use */
unsigned long ctx_reserved2[11];/* for future use */
} pfarg_context_t;
/*
......@@ -83,7 +84,8 @@ typedef struct {
*/
typedef struct {
unsigned int reg_num; /* which register */
unsigned int reg_set; /* event set for this register */
unsigned short reg_set; /* event set for this register */
unsigned short reg_reserved1; /* for future use */
unsigned long reg_value; /* initial pmc/pmd value */
unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
......@@ -99,15 +101,16 @@ typedef struct {
unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
unsigned long reserved[3]; /* for future use */
unsigned long reg_reserved2[3]; /* for future use */
} pfarg_reg_t;
typedef struct {
unsigned int dbreg_num; /* which debug register */
unsigned int dbreg_set; /* event set for this register */
unsigned short dbreg_set; /* event set for this register */
unsigned short dbreg_reserved1; /* for future use */
unsigned long dbreg_value; /* value for debug register */
unsigned long dbreg_flags; /* return: dbreg error */
unsigned long dbreg_reserved[1]; /* for future use */
unsigned long dbreg_reserved2[1]; /* for future use */
} pfarg_dbreg_t;
typedef struct {
......@@ -118,16 +121,19 @@ typedef struct {
typedef struct {
pid_t load_pid; /* process to load the context into */
unsigned int load_set; /* first event set to load */
unsigned long load_reserved[2]; /* for future use */
unsigned short load_set; /* first event set to load */
unsigned short load_reserved1; /* for future use */
unsigned long load_reserved2[3]; /* for future use */
} pfarg_load_t;
typedef struct {
int msg_type; /* generic message header */
int msg_ctx_fd; /* generic message header */
unsigned long msg_tstamp; /* for perf tuning */
unsigned int msg_active_set; /* active set at the time of overflow */
unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
unsigned short msg_active_set; /* active set at the time of overflow */
unsigned short msg_reserved1; /* for future use */
unsigned int msg_reserved2; /* for future use */
unsigned long msg_tstamp; /* for perf tuning/debug */
} pfm_ovfl_msg_t;
typedef struct {
......@@ -192,25 +198,28 @@ extern void pfm_handle_work(void);
#define PFM_PMD_LONG_RESET 1
#define PFM_PMD_SHORT_RESET 2
typedef struct {
typedef union {
unsigned int val;
struct {
unsigned int notify_user:1; /* notify user program of overflow */
unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
unsigned int block:1; /* block monitored task on kernel exit */
unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */
unsigned int reserved:26; /* for future use */
unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
unsigned int block_task:1; /* block monitored task on kernel exit */
unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
unsigned int reserved:28; /* for future use */
} bits;
} pfm_ovfl_ctrl_t;
typedef struct {
unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */
unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */
unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */
unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */
unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */
unsigned int active_set; /* event set active at the time of the overflow */
unsigned int reserved1;
unsigned long smpl_pmds[4];
unsigned long smpl_pmds_values[PMU_MAX_PMDS];
unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
unsigned short active_set; /* event set active at the time of the overflow */
pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
unsigned long pmd_last_reset; /* last reset value of of the PMD */
unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
unsigned long pmd_value; /* current 64-bit value of the PMD */
unsigned long pmd_eventid; /* eventid associated with PMD */
} pfm_ovfl_arg_t;
......@@ -223,7 +232,7 @@ typedef struct _pfm_buffer_fmt_t {
int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs);
int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
......
......@@ -16,7 +16,9 @@
*/
typedef struct {
unsigned long buf_size; /* size of the buffer in bytes */
unsigned long reserved[3]; /* for future use */
unsigned int flags; /* buffer specific flags */
unsigned int res1; /* for future use */
unsigned long reserved[2]; /* for future use */
} pfm_default_smpl_arg_t;
/*
......@@ -46,28 +48,27 @@ typedef struct {
/*
* Entry header in the sampling buffer. The header is directly followed
* with the PMDs saved in increasing index order: PMD4, PMD5, .... How
* many PMDs are present depends on how the session was programmed.
* with the values of the PMD registers of interest saved in increasing
* index order: PMD4, PMD5, and so on. How many PMDs are present depends
* on how the session was programmed.
*
* XXX: in this version of the entry, only up to 64 registers can be
* recorded. This should be enough for quite some time. Always check
* sampling format before parsing entries!
* In the case where multiple counters overflow at the same time, multiple
* entries are written consecutively.
*
* In the case where multiple counters overflow at the same time, the
* last_reset_value member indicates the initial value of the
* overflowed PMD with the smallest index. For instance, if PMD2 and
* PMD5 have overflowed, the last_reset_value member contains the
* initial value of PMD2.
* last_reset_value member indicates the initial value of the overflowed PMD.
*/
typedef struct {
int pid; /* current process at PMU interrupt point */
int cpu; /* cpu on which the overfow occured */
unsigned long last_reset_val; /* initial value of 1st overflowed PMD */
int pid; /* active process at PMU interrupt point */
unsigned char reserved1[3]; /* reserved for future use */
unsigned char ovfl_pmd; /* index of overflowed PMD */
unsigned long last_reset_val; /* initial value of overflowed PMD */
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */
unsigned long tstamp; /* ar.itc on the CPU that took the overflow */
unsigned int set; /* event set active when overflow ocurred */
unsigned int reserved1; /* for future use */
unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
unsigned short cpu; /* cpu on which the overfow occured */
unsigned short set; /* event set active when overflow ocurred */
unsigned int reserved2; /* for future use */
} pfm_default_smpl_entry_t;
#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment