Commit a6a0683b authored by Viresh Kumar's avatar Viresh Kumar

arch: x86: Remove CONFIG_OPROFILE support

The "oprofile" user-space tools don't use the kernel OPROFILE support
any more, and haven't in a long time. User-space has been converted to
the perf interfaces.

Remove the old oprofile's architecture specific support.
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Acked-by: default avatarRobert Richter <rric@kernel.org>
Acked-by: default avatarWilliam Cohen <wcohen@redhat.com>
Acked-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2083fecd
......@@ -206,7 +206,6 @@ config X86
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_NMI
select HAVE_OPROFILE
select HAVE_OPTPROBES
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
......
......@@ -229,9 +229,6 @@ core-y += arch/x86/
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
# must be linked after kernel/
drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/x86/power/
......
......@@ -9,7 +9,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
......
......@@ -3,7 +3,7 @@
* local apic based NMI watchdog for various CPUs.
*
* This file also handles reservation of performance counters for coordination
* with other users (like oprofile).
* with other users.
*
* Note that these events normally don't tick when the CPU idles. This means
* the frequency varies with CPU load.
......@@ -105,15 +105,6 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
}
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return !test_bit(counter, perfctr_nmi_owner);
}
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
int reserve_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o nmi_timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
op_model_ppro.o op_model_p4.o
/**
* @file backtrace.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
* @author David Smith
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#ifdef CONFIG_COMPAT
static struct stack_frame_ia32 *
dump_user_backtrace_32(struct stack_frame_ia32 *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame_ia32 bufhead[2];
struct stack_frame_ia32 *fp;
unsigned long bytes;
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
if (bytes != 0)
return NULL;
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
oprofile_add_trace(bufhead[0].return_address);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (head >= fp)
return NULL;
return fp;
}
static inline int
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame_ia32 *head;
/* User process is IA32 */
if (!current || user_64bit_mode(regs))
return 0;
head = (struct stack_frame_ia32 *) regs->bp;
while (depth-- && head)
head = dump_user_backtrace_32(head);
return 1;
}
#else
static inline int
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
{
return 0;
}
#endif /* CONFIG_COMPAT */
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct stack_frame bufhead[2];
unsigned long bytes;
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
if (bytes != 0)
return NULL;
oprofile_add_trace(bufhead[0].return_address);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (head >= bufhead[0].next_frame)
return NULL;
return bufhead[0].next_frame;
}
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
if (!user_mode(regs)) {
struct unwind_state state;
unsigned long addr;
if (!depth)
return;
oprofile_add_trace(regs->ip);
if (!--depth)
return;
for (unwind_start(&state, current, regs, NULL);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr)
break;
oprofile_add_trace(addr);
if (!--depth)
break;
}
return;
}
if (x86_backtrace_32(regs, depth))
return;
while (depth-- && head)
head = dump_user_backtrace(head);
}
/**
* @file init.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/errno.h>
/*
* We support CPUs that have performance counters like the Pentium Pro
* with the NMI mode driver.
*/
#ifdef CONFIG_X86_LOCAL_APIC
extern int op_nmi_init(struct oprofile_operations *ops);
extern void op_nmi_exit(void);
#else
static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
static void op_nmi_exit(void) { }
#endif
extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = x86_backtrace;
return op_nmi_init(ops);
}
void oprofile_arch_exit(void)
{
op_nmi_exit();
}
This diff is collapsed.
/**
* @file op_counter.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
*/
#ifndef OP_COUNTER_H
#define OP_COUNTER_H
#define OP_MAX_COUNTER 32
/* Per-perfctr configuration as set via
* oprofilefs.
*/
struct op_counter_config {
unsigned long count;
unsigned long enabled;
unsigned long event;
unsigned long kernel;
unsigned long user;
unsigned long unit_mask;
unsigned long extra;
};
extern struct op_counter_config counter_config[];
#endif /* OP_COUNTER_H */
This diff is collapsed.
This diff is collapsed.
/*
* @file op_model_ppro.h
* Family 6 perfmon and architectural perfmon MSR operations
*
* @remark Copyright 2002 OProfile authors
* @remark Copyright 2008 Intel Corporation
* @remark Read the file COPYING
*
* @author John Levon
* @author Philippe Elie
* @author Graydon Hoare
* @author Andi Kleen
* @author Robert Richter <robert.richter@amd.com>
*/
#include <linux/oprofile.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/nmi.h>
#include "op_x86_model.h"
#include "op_counter.h"
static int num_counters = 2;
static int counter_width = 32;
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
static u64 reset_value[OP_MAX_COUNTER];
static void ppro_shutdown(struct op_msrs const * const msrs)
{
int i;
for (i = 0; i < num_counters; ++i) {
if (!msrs->counters[i].addr)
continue;
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
}
}
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
{
int i;
for (i = 0; i < num_counters; i++) {
if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
goto fail;
if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
goto fail;
}
/* both registers must be reserved */
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
continue;
fail:
if (!counter_config[i].enabled)
continue;
op_x86_warn_reserved(i);
ppro_shutdown(msrs);
return -EBUSY;
}
return 0;
}
static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs)
{
u64 val;
int i;
if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
union cpuid10_eax eax;
eax.full = cpuid_eax(0xa);
/*
* For Core2 (family 6, model 15), don't reset the
* counter width:
*/
if (!(eax.split.version_id == 0 &&
__this_cpu_read(cpu_info.x86) == 6 &&
__this_cpu_read(cpu_info.x86_model) == 15)) {
if (counter_width < eax.split.bit_width)
counter_width = eax.split.bit_width;
}
}
/* clear all counters */
for (i = 0; i < num_counters; ++i) {
if (!msrs->controls[i].addr)
continue;
rdmsrl(msrs->controls[i].addr, val);
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val);
/*
* avoid a false detection of ctr overflows in NMI *
* handler
*/
wrmsrl(msrs->counters[i].addr, -1LL);
}
/* enable active counters */
for (i = 0; i < num_counters; ++i) {
if (counter_config[i].enabled && msrs->counters[i].addr) {
reset_value[i] = counter_config[i].count;
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
rdmsrl(msrs->controls[i].addr, val);
val &= model->reserved;
val |= op_x86_get_ctrl(model, &counter_config[i]);
wrmsrl(msrs->controls[i].addr, val);
} else {
reset_value[i] = 0;
}
}
}
static int ppro_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
u64 val;
int i;
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
rdmsrl(msrs->counters[i].addr, val);
if (val & (1ULL << (counter_width - 1)))
continue;
oprofile_add_sample(regs, i);
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
}
/* Only P6 based Pentium M need to re-unmask the apic vector but it
* doesn't hurt other P6 variant */
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
/* We can't work out if we really handled an interrupt. We
* might have caught a *second* counter just after overflowing
* the interrupt for this counter then arrives
* and we don't find a counter that's overflowed, so we
* would return 0 and get dazed + confused. Instead we always
* assume we found an overflow. This sucks.
*/
return 1;
}
static void ppro_start(struct op_msrs const * const msrs)
{
u64 val;
int i;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
rdmsrl(msrs->controls[i].addr, val);
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
}
}
static void ppro_stop(struct op_msrs const * const msrs)
{
u64 val;
int i;
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
rdmsrl(msrs->controls[i].addr, val);
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(msrs->controls[i].addr, val);
}
}
struct op_x86_model_spec op_ppro_spec = {
.num_counters = 2,
.num_controls = 2,
.reserved = MSR_PPRO_EVENTSEL_RESERVED,
.fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
.start = &ppro_start,
.stop = &ppro_stop,
.shutdown = &ppro_shutdown
};
/*
* Architectural performance monitoring.
*
* Newer Intel CPUs (Core1+) have support for architectural
* events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
* The advantage of this is that it can be done without knowing about
* the specific CPU.
*/
static void arch_perfmon_setup_counters(void)
{
union cpuid10_eax eax;
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
}
num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
}
static int arch_perfmon_init(struct oprofile_operations *ignore)
{
arch_perfmon_setup_counters();
return 0;
}
struct op_x86_model_spec op_arch_perfmon_spec = {
.reserved = MSR_PPRO_EVENTSEL_RESERVED,
.init = &arch_perfmon_init,
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
/* user space does the cpuid check for available events */
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
.start = &ppro_start,
.stop = &ppro_stop,
.shutdown = &ppro_shutdown
};
/**
* @file op_x86_model.h
* interface to x86 model-specific MSR operations
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author Graydon Hoare
* @author Robert Richter <robert.richter@amd.com>
*/
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
#include <asm/types.h>
#include <asm/perf_event.h>
struct op_msr {
unsigned long addr;
u64 saved;
};
struct op_msrs {
struct op_msr *counters;
struct op_msr *controls;
struct op_msr *multiplex;
};
struct pt_regs;
struct oprofile_operations;
/* The model vtable abstracts the differences between
* various x86 CPU models' perfctr support.
*/
struct op_x86_model_spec {
unsigned int num_counters;
unsigned int num_controls;
unsigned int num_virt_counters;
u64 reserved;
u16 event_mask;
int (*init)(struct oprofile_operations *ops);
int (*fill_in_addresses)(struct op_msrs * const msrs);
void (*setup_ctrs)(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs);
int (*check_ctrs)(struct pt_regs * const regs,
struct op_msrs const * const msrs);
void (*start)(struct op_msrs const * const msrs);
void (*stop)(struct op_msrs const * const msrs);
void (*shutdown)(struct op_msrs const * const msrs);
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
void (*switch_ctrl)(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs);
#endif
};
struct op_counter_config;
static inline void op_x86_warn_in_use(int counter)
{
/*
* The warning indicates an already running counter. If
* oprofile doesn't collect data, then try using a different
* performance counter on your platform to monitor the desired
* event. Delete counter #%d from the desired event by editing
* the /usr/share/oprofile/%s/<cpu>/events file. If the event
* cannot be monitored by any other counter, contact your
* hardware or BIOS vendor.
*/
pr_warn("oprofile: counter #%d on cpu #%d may already be used\n",
counter, smp_processor_id());
}
static inline void op_x86_warn_reserved(int counter)
{
pr_warn("oprofile: counter #%d is already reserved\n", counter);
}
extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
struct op_counter_config *counter_config);
extern int op_x86_phys_to_virt(int phys);
extern int op_x86_virt_to_phys(int virt);
extern struct op_x86_model_spec op_ppro_spec;
extern struct op_x86_model_spec op_p4_spec;
extern struct op_x86_model_spec op_p4_ht2_spec;
extern struct op_x86_model_spec op_amd_spec;
extern struct op_x86_model_spec op_arch_perfmon_spec;
#endif /* OP_X86_MODEL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment