Commit 3b4e81bd authored by Stéphane Eranian's avatar Stéphane Eranian Committed by David Mosberger

[PATCH] ia64: switch to perfmon2

This patch contains a major rewrite of the perfmon subsystem to
bring it to version 2.0. This version is NOT compatible with the
existing perfmon-1.x version which was in 2.5 and still is in 2.4
kernels. This new codebase brings a lot of new features including
the ability to attach to already running tasks, the ability the
follow clone2, the ability to write your own sampling buffer format
via kernel modules. It is also much more robust than its 1.x
counter-part. This version supports the Itanium, McKinley and
Madison PMUs. This is beta quality code and extensions to the
interface are planned.
parent 5a5bed27
......@@ -5,8 +5,8 @@
extra-y := head.o init_task.o
obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
sys_ia64.o time.o traps.o unaligned.o unwind.o
ivt.o machvec.o pal.o perfmon.o perfmon_default_smpl.o process.o ptrace.o sal.o \
semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o unwind.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_FSYS) += fsys.o
......
......@@ -376,7 +376,8 @@ GLOBAL_ENTRY(fsys_bubble_down)
* - r29: psr
*/
# define PSR_PRESERVED_BITS (IA64_PSR_UP | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_PK \
| IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_RT | IA64_PSR_IC)
| IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_RT \
| IA64_PSR_IC)
/*
* Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. The rest we have
* to synthesize.
......
......@@ -162,8 +162,11 @@ EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
EXPORT_SYMBOL(pfm_register_buffer_fmt);
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
EXPORT_SYMBOL(pfm_mod_read_pmds);
EXPORT_SYMBOL(pfm_mod_write_pmcs);
#endif
#ifdef CONFIG_NUMA
......
This diff is collapsed.
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* This file implements the default sampling buffer format
* for the Linux/ia64 perfmon-2 subsystem.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/delay.h>
#include <linux/smp.h>
#include <asm/perfmon.h>
#include <asm/perfmon_default_smpl.h>
MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("perfmon default sampling format");
MODULE_LICENSE("GPL");
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "debug");
MODULE_PARM(debug_ovfl, "i");
MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
#define DEFAULT_DEBUG 1
#ifdef DEFAULT_DEBUG
#define DPRINT(a) \
do { \
if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#define DPRINT_ovfl(a) \
do { \
if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0)
#else
#define DPRINT(a)
#define DPRINT_ovfl(a)
#endif
static int debug, debug_ovfl;
static int
default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
{
pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t*)data;
int ret = 0;
if (data == NULL) {
DPRINT(("[%d] no argument passed\n", task->pid));
return -EINVAL;
}
DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu));
/*
* must hold at least the buffer header + one minimally sized entry
*/
if (arg->buf_size < PFM_DEFAULT_SMPL_MIN_BUF_SIZE) return -EINVAL;
DPRINT(("buf_size=%lu\n", arg->buf_size));
return ret;
}
static int
default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
{
pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
/*
* size has been validated in default_validate
*/
*size = arg->buf_size;
return 0;
}
static int
default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
{
pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
hdr = (pfm_default_smpl_hdr_t *)buf;
hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
hdr->hdr_buf_size = arg->buf_size;
hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
hdr->hdr_last_pos = (void *)((unsigned long)buf)+arg->buf_size;
hdr->hdr_overflows = 0UL;
hdr->hdr_count = 0UL;
DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u\n",
task->pid,
buf,
hdr->hdr_buf_size,
sizeof(*hdr),
hdr->hdr_version));
return 0;
}
static int
default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs)
{
pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_entry_t *ent;
void *cur, *last;
unsigned long *e;
unsigned long ovfl_mask;
unsigned long ovfl_notify;
unsigned long stamp;
unsigned int npmds, i;
/*
* some time stamp
*/
stamp = ia64_get_itc();
if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
return -EINVAL;
}
hdr = (pfm_default_smpl_hdr_t *)buf;
cur = hdr->hdr_cur_pos;
last = hdr->hdr_last_pos;
ovfl_mask = arg->ovfl_pmds[0];
ovfl_notify = arg->ovfl_notify[0];
/*
* check for space against largest possibly entry.
* We may waste space at the end of the buffer.
*/
if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
npmds = hweight64(arg->smpl_pmds[0]);
ent = (pfm_default_smpl_entry_t *)cur;
prefetch(arg->smpl_pmds_values);
/* position for first pmd */
e = (unsigned long *)(ent+1);
hdr->hdr_count++;
DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n",
task->pid,
hdr->hdr_count,
cur, last,
last-cur,
ovfl_mask,
ovfl_notify, npmds));
/*
* current = task running at the time of the overflow.
*
* per-task mode:
* - this is ususally the task being monitored.
* Under certain conditions, it might be a different task
*
* system-wide:
* - this is not necessarily the task controlling the session
*/
ent->pid = current->pid;
ent->cpu = smp_processor_id();
ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
/*
* where did the fault happen (includes slot number)
*/
ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
/*
* which registers overflowed
*/
ent->ovfl_pmds = ovfl_mask;
ent->tstamp = stamp;
ent->set = arg->active_set;
ent->reserved1 = 0;
/*
* selectively store PMDs in increasing index number
*/
if (npmds) {
unsigned long *val = arg->smpl_pmds_values;
for(i=0; i < npmds; i++) {
*e++ = *val++;
}
}
/*
* update position for next entry
*/
hdr->hdr_cur_pos = cur + sizeof(*ent) + (npmds << 3);
/*
* keep same ovfl_pmds, ovfl_notify
*/
arg->ovfl_ctrl.notify_user = 0;
arg->ovfl_ctrl.block = 0;
arg->ovfl_ctrl.stop_monitoring = 0;
arg->ovfl_ctrl.reset_pmds = 1;
return 0;
full:
DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify));
/*
* increment number of buffer overflow.
* important to detect duplicate set of samples.
*/
hdr->hdr_overflows++;
/*
* if no notification is needed, then we just reset the buffer index.
*/
if (ovfl_notify == 0UL) {
hdr->hdr_count = 0UL;
arg->ovfl_ctrl.notify_user = 0;
arg->ovfl_ctrl.block = 0;
arg->ovfl_ctrl.stop_monitoring = 0;
arg->ovfl_ctrl.reset_pmds = 1;
} else {
/* keep same ovfl_pmds, ovfl_notify */
arg->ovfl_ctrl.notify_user = 1;
arg->ovfl_ctrl.block = 1;
arg->ovfl_ctrl.stop_monitoring = 1;
arg->ovfl_ctrl.reset_pmds = 0;
}
return 0;
}
static int
default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
{
pfm_default_smpl_hdr_t *hdr;
hdr = (pfm_default_smpl_hdr_t *)buf;
hdr->hdr_count = 0UL;
hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
ctrl->stop_monitoring = 0;
ctrl->reset_pmds = PFM_PMD_LONG_RESET;
return 0;
}
static int
default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
{
DPRINT(("[%d] exit(%p)\n", task->pid, buf));
return 0;
}
static pfm_buffer_fmt_t default_fmt={
.fmt_name = "default_format",
.fmt_uuid = PFM_DEFAULT_SMPL_UUID,
.fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
.fmt_validate = default_validate,
.fmt_getsize = default_get_size,
.fmt_init = default_init,
.fmt_handler = default_handler,
.fmt_restart = default_restart,
.fmt_exit = default_exit,
};
static int __init
pfm_default_smpl_init_module(void)
{
int ret;
ret = pfm_register_buffer_fmt(&default_fmt);
if (ret == 0) {
printk("perfmon_default_smpl: %s v%u.%u registered\n",
default_fmt.fmt_name,
PFM_DEFAULT_SMPL_VERSION_MAJ,
PFM_DEFAULT_SMPL_VERSION_MIN);
} else {
printk("perfmon_default_smpl: %s cannot register ret=%d\n",
default_fmt.fmt_name,
ret);
}
return ret;
}
static void __exit
pfm_default_smpl_cleanup_module(void)
{
int ret;
ret = pfm_unregister_buffer_fmt(default_fmt.fmt_uuid);
printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
}
module_init(pfm_default_smpl_init_module);
module_exit(pfm_default_smpl_cleanup_module);
/*
* This file contains the architected PMU register description tables
* This file contains the generic PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined (CONFIG_MCKINLEY)
......@@ -39,10 +41,13 @@ static pfm_reg_desc_t pmd_gen_desc[PMU_MAX_PMDS]={
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.disabled = 1,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 8,
.num_dbrs = 8,
.pmd_desc = pfm_gen_pmd_desc,
.pmc_desc = pfm_gen_pmc_desc
.pmu_name = "Generic",
.pmu_family = 0xff, /* any */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */
.pmd_desc = pfm_gen_pmd_desc,
.pmc_desc = pfm_gen_pmc_desc
};
......@@ -2,7 +2,7 @@
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
......@@ -12,8 +12,8 @@
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
......@@ -59,52 +59,53 @@ static pfm_reg_desc_t pfm_ita_pmd_desc[PMU_MAX_PMDS]={
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.disabled = 1,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 8,
.num_dbrs = 8,
.pmd_desc = pfm_ita_pmd_desc,
.pmc_desc = pfm_ita_pmc_desc
.pmu_name = "Itanium",
.pmu_family = 0x7,
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.pmd_desc = pfm_ita_pmd_desc,
.pmc_desc = pfm_ita_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
static int
pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
pfm_context_t *ctx = task->thread.pfm_context;
int ret;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 11 && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
if (ret) return ret;
}
return 0;
......
......@@ -2,7 +2,7 @@
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
* Copyright (C) 2002 Hewlett Packard Co
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
......@@ -12,9 +12,8 @@
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
static int pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
......@@ -22,17 +21,17 @@ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0000000000800000UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_reserved, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG , 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_CONFIG , 0, 0x0db60db60db60db6UL, 0x2492UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_reserved, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
......@@ -62,20 +61,22 @@ static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.disabled = 1,
.ovfl_val = (1UL << 47) - 1,
.num_ibrs = 8,
.num_dbrs = 8,
.pmd_desc = pfm_mck_pmd_desc,
.pmc_desc = pfm_mck_pmc_desc
.pmu_name = "Itanium 2",
.pmu_family = 0x1f,
.enabled = 0,
.ovfl_val = (1UL << 47) - 1,
.pmd_desc = pfm_mck_pmd_desc,
.pmc_desc = pfm_mck_pmc_desc,
.num_ibrs = 8,
.num_dbrs = 8,
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
/*
* PMC reserved fields must have their power-up values preserved
*/
static int
pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
pfm_mck_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
unsigned long tmp1, tmp2, ival = *val;
......@@ -87,52 +88,51 @@ pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigned long *val
*val = tmp1 | tmp2;
DBprintk(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
return 0;
}
static int
pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
struct thread_struct *th = &task->thread;
pfm_context_t *ctx = task->thread.pfm_context;
int ret = 0, check_case1 = 0;
struct thread_struct *th = &task->thread;
unsigned long val8 = 0, val14 = 0, val13 = 0;
/* first preserve the reserved fields */
pfm_mck_reserved(task, cnum, val, regs);
pfm_mck_reserved(cnum, val, regs);
/*
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
* we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
* before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && (*val & (0xfUL << 45)) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
/*
* we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
* before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 14 && ((*val & 0x2222) != 0x2222) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
/*
/*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
if (ret) return ret;
}
......@@ -141,17 +141,17 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va
case 4: *val |= 1UL << 23; /* force power enable bit */
break;
case 8: val8 = *val;
val13 = th->pmc[13];
val14 = th->pmc[14];
val13 = th->pmcs[13];
val14 = th->pmcs[14];
check_case1 = 1;
break;
case 13: val8 = th->pmc[8];
case 13: val8 = th->pmcs[8];
val13 = *val;
val14 = th->pmc[14];
val14 = th->pmcs[14];
check_case1 = 1;
break;
case 14: val8 = th->pmc[13];
val13 = th->pmc[13];
case 14: val8 = th->pmcs[13];
val13 = th->pmcs[13];
val14 = *val;
check_case1 = 1;
break;
......@@ -165,7 +165,7 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va
&& ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
if (ret) printk(KERN_DEBUG "perfmon: failure check_case1\n");
if (ret) printk("perfmon: failure check_case1\n");
}
return ret ? -EINVAL : 0;
......
......@@ -148,8 +148,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
#endif
#ifdef CONFIG_PERFMON
if (current->thread.pfm_ovfl_block_reset)
pfm_ovfl_block_reset();
if (current->thread.pfm_needs_checking)
pfm_handle_work();
#endif
/* deal with pending signal delivery */
......@@ -387,16 +387,8 @@ copy_thread (int nr, unsigned long clone_flags,
#endif
#ifdef CONFIG_PERFMON
/*
* reset notifiers and owner check (may not have a perfmon context)
*/
atomic_set(&p->thread.pfm_notifiers_check, 0);
atomic_set(&p->thread.pfm_owners_check, 0);
/* clear list of sampling buffer to free for new task */
p->thread.pfm_smpl_buf_list = NULL;
if (current->thread.pfm_context)
retval = pfm_inherit(p, child_ptregs);
pfm_inherit(p, child_ptregs);
#endif
return retval;
}
......@@ -609,34 +601,6 @@ flush_thread (void)
ia64_drop_fpu(current);
}
#ifdef CONFIG_PERFMON
/*
* by the time we get here, the task is detached from the tasklist. This is important
* because it means that no other tasks can ever find it as a notified task, therfore there
* is no race condition between this code and let's say a pfm_context_create().
* Conversely, the pfm_cleanup_notifiers() cannot try to access a task's pfm context if this
* other task is in the middle of its own pfm_context_exit() because it would already be out of
* the task list. Note that this case is very unlikely between a direct child and its parents
* (if it is the notified process) because of the way the exit is notified via SIGCHLD.
*/
void
release_thread (struct task_struct *task)
{
if (task->thread.pfm_context)
pfm_context_exit(task);
if (atomic_read(&task->thread.pfm_notifiers_check) > 0)
pfm_cleanup_notifiers(task);
if (atomic_read(&task->thread.pfm_owners_check) > 0)
pfm_cleanup_owners(task);
if (task->thread.pfm_smpl_buf_list)
pfm_cleanup_smpl_buf(task);
}
#endif
/*
* Clean up state associated with current thread. This is called when
* the thread calls exit().
......@@ -648,7 +612,7 @@ exit_thread (void)
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
if (current->thread.pfm_context)
pfm_flush_regs(current);
pfm_exit_thread(current);
/* free debug register resources */
if (current->thread.flags & IA64_THREAD_DBG_VALID)
......
......@@ -189,24 +189,15 @@ copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
err |= __put_user(from->si_addr, &to->si_addr);
err |= __put_user(from->si_imm, &to->si_imm);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
case __SI_PROF >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
if (from->si_code == PROF_OVFL) {
err |= __put_user(from->si_pfm_ovfl[0], &to->si_pfm_ovfl[0]);
err |= __put_user(from->si_pfm_ovfl[1], &to->si_pfm_ovfl[1]);
err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]);
err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]);
}
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
......@@ -243,10 +234,6 @@ copy_siginfo_from_user (siginfo_t *to, siginfo_t *from)
to->si_code |= __SI_POLL;
break;
case SIGPROF:
to->si_code |= __SI_PROF;
break;
default:
break;
}
......
......@@ -72,7 +72,7 @@
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH)
#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
......
This diff is collapsed.
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* This file implements the default sampling buffer format
* for Linux/ia64 perfmon subsystem.
*/
#ifndef __PERFMON_DEFAULT_SMPL_H__
#define __PERFMON_DEFAULT_SMPL_H__ 1
#define PFM_DEFAULT_SMPL_UUID { \
0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
/*
* format specific parameters (passed at context creation)
*/
typedef struct {
unsigned long buf_size; /* size of the buffer in bytes */
unsigned long reserved[3]; /* for future use */
} pfm_default_smpl_arg_t;
/*
* combined context+format specific structure. Can be passed
* to PFM_CONTEXT_CREATE
*/
typedef struct {
pfarg_context_t ctx_arg;
pfm_default_smpl_arg_t buf_arg;
} pfm_default_smpl_ctx_arg_t;
/*
* This header is at the beginning of the sampling buffer returned to the user.
* It is directly followed by the first record.
*/
typedef struct {
unsigned long hdr_count; /* how many valid entries */
void *hdr_cur_pos; /* current position in the buffer */
void *hdr_last_pos; /* first byte beyond buffer */
unsigned long hdr_overflows; /* how many times the buffer overflowed */
unsigned long hdr_buf_size; /* how many bytes in the buffer */
unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
unsigned int hdr_reserved1; /* for future use */
unsigned long hdr_reserved[10]; /* for future use */
} pfm_default_smpl_hdr_t;
/*
* Entry header in the sampling buffer. The header is directly followed
* with the PMDs saved in increasing index order: PMD4, PMD5, .... How
* many PMDs are present depends on how the session was programmed.
*
* XXX: in this version of the entry, only up to 64 registers can be
* recorded. This should be enough for quite some time. Always check
* sampling format before parsing entries!
*
* In the case where multiple counters overflow at the same time, the
* last_reset_value member indicates the initial value of the
* overflowed PMD with the smallest index. For instance, if PMD2 and
* PMD5 have overflowed, the last_reset_value member contains the
* initial value of PMD2.
*/
typedef struct {
int pid; /* current process at PMU interrupt point */
int cpu; /* cpu on which the overfow occured */
unsigned long last_reset_val; /* initial value of 1st overflowed PMD */
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */
unsigned long tstamp; /* ar.itc on the CPU that took the overflow */
unsigned int set; /* event set active when overflow ocurred */
unsigned int reserved1; /* for future use */
} pfm_default_smpl_entry_t;
#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
#endif /* __PERFMON_DEFAULT_SMPL_H__ */
......@@ -258,20 +258,14 @@ struct thread_struct {
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON
__u64 pmc[IA64_NUM_PMC_REGS];
__u64 pmd[IA64_NUM_PMD_REGS];
unsigned long pfm_ovfl_block_reset;/* non-zero if we need to block or reset regs on ovfl */
void *pfm_context; /* pointer to detailed PMU context */
atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */
atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */
void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */
# define INIT_THREAD_PM .pmc = {0, }, \
.pmd = {0, }, \
.pfm_ovfl_block_reset = 0, \
.pfm_context = NULL, \
.pfm_notifiers_check = { 0 }, \
.pfm_owners_check = { 0 }, \
.pfm_smpl_buf_list = NULL,
__u64 pmcs[IA64_NUM_PMC_REGS];
__u64 pmds[IA64_NUM_PMD_REGS];
void *pfm_context; /* pointer to detailed PMU context */
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
# define INIT_THREAD_PM .pmcs = {0UL, }, \
.pmds = {0UL, }, \
.pfm_context = NULL, \
.pfm_needs_checking = 0UL,
#else
# define INIT_THREAD_PM
#endif
......@@ -326,11 +320,7 @@ struct task_struct;
* parent of DEAD_TASK has collected the exist status of the task via
* wait().
*/
#ifdef CONFIG_PERFMON
extern void release_thread (struct task_struct *task);
#else
# define release_thread(dead_task)
#endif
#define release_thread(dead_task)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
......
......@@ -69,13 +69,6 @@ typedef struct siginfo {
long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
int _fd;
} _sigpoll;
/* SIGPROF */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
unsigned long _pfm_ovfl_counters[4]; /* which PMU counter overflowed */
} _sigprof;
} _sifields;
} siginfo_t;
......@@ -94,14 +87,6 @@ typedef struct siginfo {
#define __ISR_VALID_BIT 0
#define __ISR_VALID (1 << __ISR_VALID_BIT)
/*
* si_code values
* Positive values for kernel-generated signals.
*/
#ifdef __KERNEL__
#define __SI_PROF (6 << 16)
#endif
/*
* SIGILL si_codes
*/
......@@ -137,11 +122,6 @@ typedef struct siginfo {
#undef NSIGTRAP
#define NSIGTRAP 4
/*
* SIGPROF si_codes
*/
#define PROF_OVFL (__SI_PROF|1) /* some counters overflowed */
#ifdef __KERNEL__
#include <linux/string.h>
......@@ -151,8 +131,8 @@ copy_siginfo (siginfo_t *to, siginfo_t *from)
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
else
/* _sigprof is currently the largest know union member */
memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigprof));
/* _sigchld is currently the largest know union member */
memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
}
extern int copy_siginfo_from_user(siginfo_t *to, siginfo_t *from);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment