Commit 90489a72 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
 "The main kernel changes were:

   - add support for Intel's "adaptive PEBS v4" - which embedds LBS data
     in PEBS records and can thus batch up and reduce the IRQ (NMI) rate
     significantly - reducing overhead and making call-graph profiling
     less intrusive.

   - add Intel CPU core and uncore support updates for Tremont, Icelake,

   - extend the x86 PMU constraints scheduler with 'constraint ranges'
     to better support Icelake hw constraints,

   - make x86 call-chain support work better with CONFIG_FRAME_POINTER=y

   - misc other changes

  Tooling changes:

   - updates to the main tools: 'perf record', 'perf trace', 'perf
     stat'

   - updated Intel and S/390 vendor events

   - libtraceevent updates

   - misc other updates and fixes"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (69 commits)
  perf/x86: Make perf callchains work without CONFIG_FRAME_POINTER
  watchdog: Fix typo in comment
  perf/x86/intel: Add Tremont core PMU support
  perf/x86/intel/uncore: Add Intel Icelake uncore support
  perf/x86/msr: Add Icelake support
  perf/x86/intel/rapl: Add Icelake support
  perf/x86/intel/cstate: Add Icelake support
  perf/x86/intel: Add Icelake support
  perf/x86: Support constraint ranges
  perf/x86/lbr: Avoid reading the LBRs when adaptive PEBS handles them
  perf/x86/intel: Support adaptive PEBS v4
  perf/x86/intel/ds: Extract code of event update in short period
  perf/x86/intel: Extract memory code PEBS parser for reuse
  perf/x86: Support outputting XMM registers
  perf/x86/intel: Force resched when TFA sysctl is modified
  perf/core: Add perf_pmu_resched() as global function
  perf/headers: Fix stale comment for struct perf_addr_filter
  perf/core: Make perf_swevent_init_cpu() static
  perf/x86: Add sanity checks to x86_schedule_events()
  perf/x86: Optimize x86_schedule_events()
  ...
parents 007dc78f d15d3568
......@@ -560,6 +560,21 @@ int x86_pmu_hw_config(struct perf_event *event)
return -EINVAL;
}
/* sample_regs_user never support XMM registers */
if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
return -EINVAL;
/*
* Besides the general purpose registers, XMM registers may
* be collected in PEBS on some platforms, e.g. Icelake
*/
if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
if (x86_pmu.pebs_no_xmm_regs)
return -EINVAL;
if (!event->attr.precise_ip)
return -EINVAL;
}
return x86_setup_perfctr(event);
}
......@@ -661,6 +676,10 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
struct pmu *x86_get_pmu(void)
{
return &pmu;
}
/*
* Event scheduler state:
*
......@@ -849,18 +868,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
int i, wmin, wmax, unsched = 0;
int n0, i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
/*
* Compute the number of events already present; see x86_pmu_add(),
* validate_group() and x86_pmu_commit_txn(). For the former two
* cpuc->n_events hasn't been updated yet, while for the latter
* cpuc->n_txn contains the number of events added in the current
* transaction.
*/
n0 = cpuc->n_events;
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
n0 -= cpuc->n_txn;
if (x86_pmu.start_scheduling)
x86_pmu.start_scheduling(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
cpuc->event_constraint[i] = NULL;
c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
cpuc->event_constraint[i] = c;
c = cpuc->event_constraint[i];
/*
* Previously scheduled events should have a cached constraint,
* while new events should not have one.
*/
WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
/*
* Request constraints for new events; or for those events that
* have a dynamic constraint -- for those the constraint can
* change due to external factors (sibling state, allow_tfa).
*/
if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
cpuc->event_constraint[i] = c;
}
wmin = min(wmin, c->weight);
wmax = max(wmax, c->weight);
......@@ -925,25 +969,20 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
e->hw.flags |= PERF_X86_EVENT_COMMITTED;
if (x86_pmu.commit_scheduling)
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
}
} else {
for (i = 0; i < n; i++) {
for (i = n0; i < n; i++) {
e = cpuc->event_list[i];
/*
* do not put_constraint() on comitted events,
* because they are good to go
*/
if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
continue;
/*
* release events that failed scheduling
*/
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
cpuc->event_constraint[i] = NULL;
}
}
......@@ -1372,11 +1411,6 @@ static void x86_pmu_del(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
/*
* event is descheduled
*/
event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
/*
* If we're called during a txn, we only need to undo x86_pmu.add.
* The events never got scheduled and ->cancel_txn will truncate
......@@ -1413,6 +1447,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
perf_event_update_userpage(event);
......@@ -2024,7 +2059,7 @@ static int validate_event(struct perf_event *event)
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
if (!c || !c->weight)
ret = -EINVAL;
......@@ -2072,8 +2107,7 @@ static int validate_group(struct perf_event *event)
if (n < 0)
goto out;
fake_cpuc->n_events = n;
fake_cpuc->n_events = 0;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out:
......@@ -2348,6 +2382,15 @@ void arch_perf_update_userpage(struct perf_event *event,
cyc2ns_read_end();
}
/*
* Determine whether the regs were taken from an irq/exception handler rather
* than from perf_arch_fetch_caller_regs().
*/
static bool perf_hw_regs(struct pt_regs *regs)
{
return regs->flags & X86_EFLAGS_FIXED;
}
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
......@@ -2359,11 +2402,15 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
if (perf_callchain_store(entry, regs->ip))
return;
if (perf_hw_regs(regs)) {
if (perf_callchain_store(entry, regs->ip))
return;
unwind_start(&state, current, regs, NULL);
} else {
unwind_start(&state, current, NULL, (void *)regs->sp);
}
for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
......
This diff is collapsed.
......@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
......
This diff is collapsed.
......@@ -488,6 +488,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
* be 'new'. Conversely, a new event can get installed through the
* context switch path for the first time.
*/
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users++;
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
......@@ -507,8 +509,11 @@ void intel_pmu_lbr_del(struct perf_event *event)
task_ctx->lbr_callstack_users--;
}
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users--;
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
}
......@@ -658,7 +663,13 @@ void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!cpuc->lbr_users)
/*
* Don't read when all LBRs users are using adaptive PEBS.
*
* This could be smarter and actually check the event,
* but this simple approach seems to work for now.
*/
if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
......@@ -1080,6 +1091,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
}
}
void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
u64 info = lbr->lbr[i].info;
struct perf_branch_entry *e = &cpuc->lbr_entries[i];
e->from = lbr->lbr[i].from;
e->to = lbr->lbr[i].to;
e->mispred = !!(info & LBR_INFO_MISPRED);
e->predicted = !(info & LBR_INFO_MISPRED);
e->in_tx = !!(info & LBR_INFO_IN_TX);
e->abort = !!(info & LBR_INFO_ABORT);
e->cycles = info & LBR_INFO_CYCLES;
e->reserved = 0;
}
intel_pmu_lbr_filter(cpuc);
}
/*
* Map interface branch filters onto LBR filters
*/
......
......@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
{},
};
......
......@@ -1367,6 +1367,11 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
.pci_init = skx_uncore_pci_init,
};
static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
.cpu_init = icl_uncore_cpu_init,
.pci_init = skl_uncore_pci_init,
};
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
......@@ -1393,6 +1398,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
{},
};
......
......@@ -512,6 +512,7 @@ int skl_uncore_pci_init(void);
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */
......
......@@ -34,6 +34,8 @@
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
......@@ -93,6 +95,12 @@
#define SKL_UNC_PERF_GLOBAL_CTL 0xe01
#define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
/* ICL Cbo register */
#define ICL_UNC_CBO_CONFIG 0x396
#define ICL_UNC_NUM_CBO_MASK 0xf
#define ICL_UNC_CBO_0_PER_CTR0 0x702
#define ICL_UNC_CBO_MSR_OFFSET 0x8
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
......@@ -280,6 +288,70 @@ void skl_uncore_cpu_init(void)
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
static struct intel_uncore_type icl_uncore_cbox = {
.name = "cbox",
.num_counters = 4,
.perf_ctr_bits = 44,
.perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
.event_mask = SNB_UNC_RAW_EVENT_MASK,
.msr_offset = ICL_UNC_CBO_MSR_OFFSET,
.ops = &skl_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
};
static struct uncore_event_desc icl_uncore_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
{ /* end: all zeroes */ },
};
static struct attribute *icl_uncore_clock_formats_attr[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group icl_uncore_clock_format_group = {
.name = "format",
.attrs = icl_uncore_clock_formats_attr,
};
static struct intel_uncore_type icl_uncore_clockbox = {
.name = "clock",
.num_counters = 1,
.num_boxes = 1,
.fixed_ctr_bits = 48,
.fixed_ctr = SNB_UNC_FIXED_CTR,
.fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
.single_fixed = 1,
.event_mask = SNB_UNC_CTL_EV_SEL_MASK,
.format_group = &icl_uncore_clock_format_group,
.ops = &skl_uncore_msr_ops,
.event_descs = icl_uncore_events,
};
static struct intel_uncore_type *icl_msr_uncores[] = {
&icl_uncore_cbox,
&snb_uncore_arb,
&icl_uncore_clockbox,
NULL,
};
static int icl_get_cbox_num(void)
{
u64 num_boxes;
rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
return num_boxes & ICL_UNC_NUM_CBO_MASK;
}
void icl_uncore_cpu_init(void)
{
uncore_msr_uncores = icl_msr_uncores;
icl_uncore_cbox.num_boxes = icl_get_cbox_num();
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
enum {
SNB_PCI_UNCORE_IMC,
};
......@@ -668,6 +740,18 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* end: all zeroes */ },
};
static const struct pci_device_id icl_uncore_pci_ids[] = {
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* end: all zeroes */ },
};
static struct pci_driver snb_uncore_pci_driver = {
.name = "snb_uncore",
.id_table = snb_uncore_pci_ids,
......@@ -693,6 +777,11 @@ static struct pci_driver skl_uncore_pci_driver = {
.id_table = skl_uncore_pci_ids,
};
static struct pci_driver icl_uncore_pci_driver = {
.name = "icl_uncore",
.id_table = icl_uncore_pci_ids,
};
struct imc_uncore_pci_dev {
__u32 pci_id;
struct pci_driver *driver;
......@@ -732,6 +821,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
{ /* end marker */ }
};
......
......@@ -89,6 +89,7 @@ static bool test_intel(int idx)
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
case INTEL_FAM6_ICELAKE_MOBILE:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
......
......@@ -49,28 +49,33 @@ struct event_constraint {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
int overlap;
int flags;
u64 code;
u64 cmask;
int weight;
int overlap;
int flags;
unsigned int size;
};
static inline bool constraint_match(struct event_constraint *c, u64 ecode)
{
return ((ecode & c->cmask) - c->code) <= (u64)c->size;
}
/*
* struct hw_perf_event.flags flags
*/
#define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
#define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
struct amd_nb {
int nb_id; /* NorthBridge id */
......@@ -116,6 +121,24 @@ struct amd_nb {
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
#define PEBS_XMM_REGS \
((1ULL << PERF_REG_X86_XMM0) | \
(1ULL << PERF_REG_X86_XMM1) | \
(1ULL << PERF_REG_X86_XMM2) | \
(1ULL << PERF_REG_X86_XMM3) | \
(1ULL << PERF_REG_X86_XMM4) | \
(1ULL << PERF_REG_X86_XMM5) | \
(1ULL << PERF_REG_X86_XMM6) | \
(1ULL << PERF_REG_X86_XMM7) | \
(1ULL << PERF_REG_X86_XMM8) | \
(1ULL << PERF_REG_X86_XMM9) | \
(1ULL << PERF_REG_X86_XMM10) | \
(1ULL << PERF_REG_X86_XMM11) | \
(1ULL << PERF_REG_X86_XMM12) | \
(1ULL << PERF_REG_X86_XMM13) | \
(1ULL << PERF_REG_X86_XMM14) | \
(1ULL << PERF_REG_X86_XMM15))
/*
* Per register state.
*/
......@@ -207,10 +230,16 @@ struct cpu_hw_events {
int n_pebs;
int n_large_pebs;
/* Current super set of events hardware configuration */
u64 pebs_data_cfg;
u64 active_pebs_data_cfg;
int pebs_record_size;
/*
* Intel LBR bits
*/
int lbr_users;
int lbr_pebs_users;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
......@@ -257,18 +286,29 @@ struct cpu_hw_events {
void *kfree_on_online[X86_PERF_KFREE_MAX];
};
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
{ .idxmsk64 = (n) }, \
.code = (c), \
.size = (e) - (c), \
.cmask = (m), \
.weight = (w), \
.overlap = (o), \
.flags = f, \
}
#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
__EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
/*
* The constraint_match() function only works for 'simple' event codes
* and not for extended (AMD64_EVENTSEL_EVENT) events codes.
*/
#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
__EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
0, PERF_X86_EVENT_EXCL)
......@@ -303,6 +343,12 @@ struct cpu_hw_events {
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on a range of Event codes
*/
#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
......@@ -350,6 +396,9 @@ struct cpu_hw_events {
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
/* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
......@@ -366,6 +415,11 @@ struct cpu_hw_events {
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
__EVENT_CONSTRAINT_RANGE(code, end, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
__EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
......@@ -473,6 +527,7 @@ union perf_capabilities {
* values > 32bit.
*/
u64 full_width_write:1;
u64 pebs_baseline:1;
};
u64 capabilities;
};
......@@ -613,14 +668,16 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1,
pebs_no_tlb :1,
pebs_no_isolation :1;
pebs_no_isolation :1,
pebs_no_xmm_regs :1;
int pebs_record_size;
int pebs_buffer_size;
int max_pebs_events;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
int max_pebs_events;
unsigned long large_pebs_flags;
u64 rtm_abort_event;
/*
* Intel LBR
......@@ -714,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
.event_str_ht = ht, \
}
struct pmu *x86_get_pmu(void);
extern struct x86_pmu x86_pmu __read_mostly;
static inline bool x86_pmu_has_lbr_callstack(void)
......@@ -941,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[];
extern struct event_constraint intel_skl_pebs_event_constraints[];
extern struct event_constraint intel_icl_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
......@@ -959,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
void intel_ds_init(void);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
......
......@@ -8,7 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
#define MAX_FIXED_PEBS_EVENTS 3
#define MAX_FIXED_PEBS_EVENTS 4
/*
* A debug store configuration.
......
......@@ -116,6 +116,7 @@
#define LBR_INFO_CYCLES 0xffff
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
......
......@@ -7,7 +7,7 @@
*/
#define INTEL_PMC_MAX_GENERIC 32
#define INTEL_PMC_MAX_FIXED 3
#define INTEL_PMC_MAX_FIXED 4
#define INTEL_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
......@@ -32,6 +32,8 @@
#define HSW_IN_TX (1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
......@@ -87,6 +89,12 @@
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
#define ARCH_PERFMON_EVENTS_COUNT 7
#define PEBS_DATACFG_MEMINFO BIT_ULL(0)
#define PEBS_DATACFG_GP BIT_ULL(1)
#define PEBS_DATACFG_XMMS BIT_ULL(2)
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
......@@ -176,6 +184,41 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* Adaptive PEBS v4
*/
struct pebs_basic {
u64 format_size;
u64 ip;
u64 applicable_counters;
u64 tsc;
};
struct pebs_meminfo {
u64 address;
u64 aux;
u64 latency;
u64 tsx_tuning;
};
struct pebs_gprs {
u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
u64 r8, r9, r10, r11, r12, r13, r14, r15;
};
struct pebs_xmm {
u64 xmm[16*2]; /* two entries for each register */
};
struct pebs_lbr_entry {
u64 from, to, info;
};
struct pebs_lbr {
struct pebs_lbr_entry lbr[0]; /* Variable length */
};
/*
* IBS cpuid feature detection
*/
......@@ -248,6 +291,11 @@ extern void perf_events_lapic_init(void);
#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
struct x86_perf_regs {
struct pt_regs regs;
u64 *xmm_regs;
};
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
......@@ -260,14 +308,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
*/
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ip = (__ip); \
(regs)->bp = caller_frame_pointer(); \
(regs)->sp = (unsigned long)__builtin_frame_address(0); \
(regs)->cs = __KERNEL_CS; \
regs->flags = 0; \
asm volatile( \
_ASM_MOV "%%"_ASM_SP ", %0\n" \
: "=m" ((regs)->sp) \
:: "memory" \
); \
}
struct perf_guest_switch_msr {
......
......@@ -98,19 +98,6 @@ struct stack_frame_ia32 {
u32 return_address;
};
static inline unsigned long caller_frame_pointer(void)
{
struct stack_frame *frame;
frame = __builtin_frame_address(0);
#ifdef CONFIG_FRAME_POINTER
frame = frame->next_frame;
#endif
return (unsigned long)frame;
}
void show_opcodes(struct pt_regs *regs, const char *loglvl);
void show_ip(struct pt_regs *regs, const char *loglvl);
#endif /* _ASM_X86_STACKTRACE_H */
......@@ -27,8 +27,29 @@ enum perf_event_x86_regs {
PERF_REG_X86_R13,
PERF_REG_X86_R14,
PERF_REG_X86_R15,
/* These are the limits for the GPRs. */
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
/* These all need two bits set because they are 128bit */
PERF_REG_X86_XMM0 = 32,
PERF_REG_X86_XMM1 = 34,
PERF_REG_X86_XMM2 = 36,
PERF_REG_X86_XMM3 = 38,
PERF_REG_X86_XMM4 = 40,
PERF_REG_X86_XMM5 = 42,
PERF_REG_X86_XMM6 = 44,
PERF_REG_X86_XMM7 = 46,
PERF_REG_X86_XMM8 = 48,
PERF_REG_X86_XMM9 = 50,
PERF_REG_X86_XMM10 = 52,
PERF_REG_X86_XMM11 = 54,
PERF_REG_X86_XMM12 = 56,
PERF_REG_X86_XMM13 = 58,
PERF_REG_X86_XMM14 = 60,
PERF_REG_X86_XMM15 = 62,
/* These include both GPRs and XMMX registers */
PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
};
#endif /* _ASM_X86_PERF_REGS_H */
......@@ -59,18 +59,34 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
struct x86_perf_regs *perf_regs;
if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
perf_regs = container_of(regs, struct x86_perf_regs, regs);
if (!perf_regs->xmm_regs)
return 0;
return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
}
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}
#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
#ifdef CONFIG_X86_32
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
(1ULL << PERF_REG_X86_R9) | \
(1ULL << PERF_REG_X86_R10) | \
(1ULL << PERF_REG_X86_R11) | \
(1ULL << PERF_REG_X86_R12) | \
(1ULL << PERF_REG_X86_R13) | \
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
......@@ -96,10 +112,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
if (mask & REG_NOSUPPORT)
if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
......
......@@ -463,7 +463,7 @@ enum perf_addr_filter_action_t {
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
* @inode: object file's inode for file-based filters
* @path: object file's path for file-based filters
* @offset: filter range offset
* @size: filter range size (size==0 means single address trigger)
* @action: filter/start/stop
......@@ -887,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
......@@ -1054,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
#endif
/*
* Take a snapshot of the regs. Skip ip and frame pointer to
* the nth caller. We only need a few of the regs:
* When generating a perf sample in-line, instead of from an interrupt /
* exception, we lack a pt_regs. This is typically used from software events
* like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
*
* We typically don't need a full set, but (for x86) do require:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
* - bp for callchains
* - eflags, for future purposes, just in case
* - sp for PERF_SAMPLE_CALLCHAIN
* - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
*
* NOTE: assumes @regs is otherwise already 0 filled; this is important for
* things like PERF_SAMPLE_REGS_INTR.
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
......
......@@ -2478,6 +2478,16 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_pmu_enable(cpuctx->ctx.pmu);
}
void perf_pmu_resched(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
perf_ctx_lock(cpuctx, task_ctx);
ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
perf_ctx_unlock(cpuctx, task_ctx);
}
/*
* Cross CPU call to install and enable a performance event
*
......@@ -11917,7 +11927,7 @@ static void __init perf_event_init_all_cpus(void)
}
}
void perf_swevent_init_cpu(unsigned int cpu)
static void perf_swevent_init_cpu(unsigned int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
......
......@@ -590,7 +590,7 @@ static void lockup_detector_reconfigure(void)
* Create the watchdog thread infrastructure and configure the detector(s).
*
* The threads are not unparked as watchdog_allowed_mask is empty. When
* the threads are sucessfully initialized, take the proper locks and
* the threads are successfully initialized, take the proper locks and
* unpark the threads in the watchdog_cpumask if the watchdog is enabled.
*/
static __init void lockup_detector_setup(void)
......
......@@ -67,6 +67,7 @@ FEATURE_TESTS_BASIC := \
sdt \
setns \
libaio \
libzstd \
disassembler-four-args
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
......@@ -120,6 +121,7 @@ FEATURE_DISPLAY ?= \
get_cpuid \
bpf \
libaio \
libzstd \
disassembler-four-args
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
......
......@@ -62,7 +62,8 @@ FILES= \
test-clang.bin \
test-llvm.bin \
test-llvm-version.bin \
test-libaio.bin
test-libaio.bin \
test-libzstd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
......@@ -301,6 +302,9 @@ $(OUTPUT)test-clang.bin:
$(OUTPUT)test-libaio.bin:
$(BUILD) -lrt
$(OUTPUT)test-libzstd.bin:
$(BUILD) -lzstd
###############################
clean:
......
......@@ -182,6 +182,10 @@
# include "test-disassembler-four-args.c"
#undef main
#define main main_test_zstd
# include "test-libzstd.c"
#undef main
int main(int argc, char *argv[])
{
main_test_libpython();
......@@ -224,6 +228,7 @@ int main(int argc, char *argv[])
main_test_libaio();
main_test_reallocarray();
main_test_disassembler_four_args();
main_test_libzstd();
return 0;
}
// SPDX-License-Identifier: GPL-2.0
#include <zstd.h>
int main(void)
{
ZSTD_CStream *cstream;
cstream = ZSTD_createCStream();
ZSTD_freeCStream(cstream);
return 0;
}
This diff is collapsed.
......@@ -92,8 +92,8 @@ struct tep_handle {
void tep_free_event(struct tep_event *event);
void tep_free_format_field(struct tep_format_field *field);
unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data);
#endif /* _PARSE_EVENTS_INT_H */
This diff is collapsed.
This diff is collapsed.
......@@ -269,7 +269,7 @@ void tep_print_plugins(struct trace_seq *s,
}
static void
load_plugin(struct tep_handle *pevent, const char *path,
load_plugin(struct tep_handle *tep, const char *path,
const char *file, void *data)
{
struct tep_plugin_list **plugin_list = data;
......@@ -316,7 +316,7 @@ load_plugin(struct tep_handle *pevent, const char *path,
*plugin_list = list;
pr_stat("registering plugin: %s", plugin);
func(pevent);
func(tep);
return;
out_free:
......@@ -324,9 +324,9 @@ load_plugin(struct tep_handle *pevent, const char *path,
}
static void
load_plugins_dir(struct tep_handle *pevent, const char *suffix,
load_plugins_dir(struct tep_handle *tep, const char *suffix,
const char *path,
void (*load_plugin)(struct tep_handle *pevent,
void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
......@@ -359,15 +359,15 @@ load_plugins_dir(struct tep_handle *pevent, const char *suffix,
if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
continue;
load_plugin(pevent, path, name, data);
load_plugin(tep, path, name, data);
}
closedir(dir);
}
static void
load_plugins(struct tep_handle *pevent, const char *suffix,
void (*load_plugin)(struct tep_handle *pevent,
load_plugins(struct tep_handle *tep, const char *suffix,
void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
......@@ -378,7 +378,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
char *envdir;
int ret;
if (pevent->flags & TEP_DISABLE_PLUGINS)
if (tep->flags & TEP_DISABLE_PLUGINS)
return;
/*
......@@ -386,8 +386,8 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
* check that first.
*/
#ifdef PLUGIN_DIR
if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS))
load_plugins_dir(pevent, suffix, PLUGIN_DIR,
if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
load_plugins_dir(tep, suffix, PLUGIN_DIR,
load_plugin, data);
#endif
......@@ -397,7 +397,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
*/
envdir = getenv("TRACEEVENT_PLUGIN_DIR");
if (envdir)
load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
load_plugins_dir(tep, suffix, envdir, load_plugin, data);
/*
* Now let the home directory override the environment
......@@ -413,22 +413,22 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
return;
}
load_plugins_dir(pevent, suffix, path, load_plugin, data);
load_plugins_dir(tep, suffix, path, load_plugin, data);
free(path);
}
struct tep_plugin_list*
tep_load_plugins(struct tep_handle *pevent)
tep_load_plugins(struct tep_handle *tep)
{
struct tep_plugin_list *list = NULL;
load_plugins(pevent, ".so", load_plugin, &list);
load_plugins(tep, ".so", load_plugin, &list);
return list;
}
void
tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent)
tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
{
tep_plugin_unload_func func;
struct tep_plugin_list *list;
......@@ -438,7 +438,7 @@ tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *peven
plugin_list = list->next;
func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
if (func)
func(pevent);
func(tep);
dlclose(list->handle);
free(list->name);
free(list);
......
......@@ -727,3 +727,52 @@ int kbuffer_start_of_data(struct kbuffer *kbuf)
{
return kbuf->start;
}
/**
* kbuffer_raw_get - get raw buffer info
* @kbuf: The kbuffer
* @subbuf: Start of mapped subbuffer
* @info: Info descriptor to fill in
*
* For debugging. This can return internals of the ring buffer.
* Expects to have info->next set to what it will read.
* The type, length and timestamp delta will be filled in, and
* @info->next will be updated to the next element.
* The @subbuf is used to know if the info is passed the end of
* data and NULL will be returned if it is.
*/
struct kbuffer_raw_info *
kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
{
unsigned long long flags;
unsigned long long delta;
unsigned int type_len;
unsigned int size;
int start;
int length;
void *ptr = info->next;
if (!kbuf || !subbuf)
return NULL;
if (kbuf->flags & KBUFFER_FL_LONG_8)
start = 16;
else
start = 12;
flags = read_long(kbuf, subbuf + 8);
size = (unsigned int)flags & COMMIT_MASK;
if (ptr < subbuf || ptr >= subbuf + start + size)
return NULL;
type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
info->next = ptr + length;
info->type = type_len;
info->delta = delta;
info->length = length;
return info;
}
......@@ -65,4 +65,17 @@ int kbuffer_subbuffer_size(struct kbuffer *kbuf);
void kbuffer_set_old_format(struct kbuffer *kbuf);
int kbuffer_start_of_data(struct kbuffer *kbuf);
/* Debugging */
struct kbuffer_raw_info {
int type;
int length;
unsigned long long delta;
void *next;
};
/* Read raw data */
struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf,
struct kbuffer_raw_info *info);
#endif /* _K_BUFFER_H */
This diff is collapsed.
......@@ -25,9 +25,9 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
return val ? (long long) le16toh(*val) : 0;
}
int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
tep_register_print_function(pevent,
tep_register_print_function(tep,
process___le16_to_cpup,
TEP_FUNC_ARG_INT,
"__le16_to_cpup",
......@@ -36,8 +36,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
tep_unregister_print_function(pevent, process___le16_to_cpup,
tep_unregister_print_function(tep, process___le16_to_cpup,
"__le16_to_cpup");
}
......@@ -126,7 +126,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
static int function_handler(struct trace_seq *s, struct tep_record *record,
struct tep_event *event, void *context)
{
struct tep_handle *pevent = event->pevent;
struct tep_handle *tep = event->tep;
unsigned long long function;
unsigned long long pfunction;
const char *func;
......@@ -136,12 +136,12 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
if (tep_get_field_val(s, event, "ip", record, &function, 1))
return trace_seq_putc(s, '!');
func = tep_find_function(pevent, function);
func = tep_find_function(tep, function);
if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
return trace_seq_putc(s, '!');
parent = tep_find_function(pevent, pfunction);
parent = tep_find_function(tep, pfunction);
if (parent && ftrace_indent->set)
index = add_and_get_index(parent, func, record->cpu);
......@@ -164,9 +164,9 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
return 0;
}
int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
tep_register_event_handler(pevent, -1, "ftrace", "function",
tep_register_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
tep_plugin_add_options("ftrace", plugin_options);
......@@ -174,11 +174,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
int i, x;
tep_unregister_event_handler(pevent, -1, "ftrace", "function",
tep_unregister_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
for (i = 0; i <= cpus; i++) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -87,17 +87,17 @@ static int drv_bss_info_changed(struct trace_seq *s,
return 0;
}
int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
tep_register_event_handler(pevent, -1, "mac80211",
tep_register_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
return 0;
}
void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
tep_unregister_event_handler(pevent, -1, "mac80211",
tep_unregister_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment