Commit 5752ba41 authored by Stéphane Eranian's avatar Stéphane Eranian Committed by David Mosberger

[PATCH] ia64: perfmon stack consumption fix

This patch moves the pfm_ovfl_arg_t structure from the kernel stack
into the pfm_context_t structure. This minimizes kernel stack space
consumption.
Signed-off-by: default avatarStephane Eranian <eranian@hpl.hp.com>
Signed-off-by: default avatarDavid Mosberger <davidm@hpl.hp.com>
parent 8800eead
...@@ -311,6 +311,7 @@ typedef struct pfm_context { ...@@ -311,6 +311,7 @@ typedef struct pfm_context {
unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */ unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
int ctx_fd; /* file descriptor used my this context */ int ctx_fd; /* file descriptor used my this context */
pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */ pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */ void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
...@@ -5160,7 +5161,7 @@ pfm_end_notify_user(pfm_context_t *ctx) ...@@ -5160,7 +5161,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
static void static void
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
{ {
pfm_ovfl_arg_t ovfl_arg; pfm_ovfl_arg_t *ovfl_arg;
unsigned long mask; unsigned long mask;
unsigned long old_val, ovfl_val, new_val; unsigned long old_val, ovfl_val, new_val;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds; unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
...@@ -5247,7 +5248,8 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5247,7 +5248,8 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
int j, k, ret = 0; int j, k, ret = 0;
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
ovfl_arg = &ctx->ctx_ovfl_arg;
prefetch(ctx->ctx_smpl_hdr); prefetch(ctx->ctx_smpl_hdr);
...@@ -5257,15 +5259,15 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5257,15 +5259,15 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if ((pmd_mask & 0x1) == 0) continue; if ((pmd_mask & 0x1) == 0) continue;
ovfl_arg.ovfl_pmd = (unsigned char )i; ovfl_arg->ovfl_pmd = (unsigned char )i;
ovfl_arg.ovfl_notify = ovfl_notify & mask ? 1 : 0; ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
ovfl_arg.active_set = 0; ovfl_arg->active_set = 0;
ovfl_arg.ovfl_ctrl.val = 0; /* module must fill in all fields */ ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
ovfl_arg.smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
ovfl_arg.pmd_value = ctx->ctx_pmds[i].val; ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
ovfl_arg.pmd_last_reset = ctx->ctx_pmds[i].lval; ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
ovfl_arg.pmd_eventid = ctx->ctx_pmds[i].eventid; ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
/* /*
* copy values of pmds of interest. Sampling format may copy them * copy values of pmds of interest. Sampling format may copy them
...@@ -5274,8 +5276,8 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5274,8 +5276,8 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if (smpl_pmds) { if (smpl_pmds) {
for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
if ((smpl_pmds & 0x1) == 0) continue; if ((smpl_pmds & 0x1) == 0) continue;
ovfl_arg.smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg.smpl_pmds_values[k-1])); DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
} }
} }
...@@ -5286,7 +5288,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5286,7 +5288,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
/* /*
* call custom buffer format record (handler) routine * call custom buffer format record (handler) routine
*/ */
ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs, tstamp); ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
end_cycles = ia64_get_itc(); end_cycles = ia64_get_itc();
...@@ -5294,13 +5296,13 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5294,13 +5296,13 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* For those controls, we take the union because they have * For those controls, we take the union because they have
* an all or nothing behavior. * an all or nothing behavior.
*/ */
ovfl_ctrl.bits.notify_user |= ovfl_arg.ovfl_ctrl.bits.notify_user; ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
ovfl_ctrl.bits.block_task |= ovfl_arg.ovfl_ctrl.bits.block_task; ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
ovfl_ctrl.bits.mask_monitoring |= ovfl_arg.ovfl_ctrl.bits.mask_monitoring; ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
/* /*
* build the bitmask of pmds to reset now * build the bitmask of pmds to reset now
*/ */
if (ovfl_arg.ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask; if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment