Commit 69e1242e authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Greg Kroah-Hartman

lttng wrappers

Implement wrappers for compatibility with older kernel versions and
kernels with had the libringbuffer (old) patchset applied.
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 848afbd7
#ifndef _LTT_WRAPPER_FTRACE_H
#define _LTT_WRAPPER_FTRACE_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
* available, else we need to have a kernel that exports this function to GPL
* modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/ftrace.h>
#ifdef CONFIG_KALLSYMS
#include <linux/kallsyms.h>
#include "kallsyms.h"
static inline
int wrapper_register_ftrace_function_probe(char *glob,
struct ftrace_probe_ops *ops, void *data)
{
int (*register_ftrace_function_probe_sym)(char *glob,
struct ftrace_probe_ops *ops, void *data);
register_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("register_ftrace_function_probe");
if (register_ftrace_function_probe_sym) {
return register_ftrace_function_probe_sym(glob, ops, data);
} else {
printk(KERN_WARNING "LTTng: register_ftrace_function_probe symbol lookup failed.\n");
return -EINVAL;
}
}
static inline
void wrapper_unregister_ftrace_function_probe(char *glob,
struct ftrace_probe_ops *ops, void *data)
{
void (*unregister_ftrace_function_probe_sym)(char *glob,
struct ftrace_probe_ops *ops, void *data);
unregister_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("unregister_ftrace_function_probe");
if (unregister_ftrace_function_probe_sym) {
unregister_ftrace_function_probe_sym(glob, ops, data);
} else {
printk(KERN_WARNING "LTTng: unregister_ftrace_function_probe symbol lookup failed.\n");
WARN_ON(1);
}
}
#else
static inline
int wrapper_register_ftrace_function_probe(char *glob,
struct ftrace_probe_ops *ops, void *data)
{
return register_ftrace_function_probe(glob, ops, data);
}
static inline
void wrapper_unregister_ftrace_function_probe(char *glob,
struct ftrace_probe_ops *ops, void *data)
{
return unregister_ftrace_function_probe(glob, ops, data);
}
#endif
#endif /* _LTT_WRAPPER_FTRACE_H */
/*
* wrapper/inline_memcpy.h
*
* Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
#define inline_memcpy memcpy
#endif
#ifndef _LTT_WRAPPER_KALLSYMS_H
#define _LTT_WRAPPER_KALLSYMS_H
/*
* Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
*
* wrapper around kallsyms_lookup_name. Implements arch-dependent code for
* arches where the address of the start of the function body is different
* from the pointer which can be used to call the function, e.g. ARM THUMB2.
*
* Dual LGPL v2.1/GPL v2 license.
*/
static inline
unsigned long kallsyms_lookup_funcptr(const char *name)
{
unsigned long addr;
addr = kallsyms_lookup_name(name);
#ifdef CONFIG_ARM
#ifdef CONFIG_THUMB2_KERNEL
if (addr)
addr |= 1; /* set bit 0 in address for thumb mode */
#endif
#endif
return addr;
}
#endif /* _LTT_WRAPPER_KALLSYMS_H */
#ifndef _LTT_WRAPPER_PERF_H
#define _LTT_WRAPPER_PERF_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/perf_event.h>
#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
static inline struct perf_event *
wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback)
{
return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
}
#else
static inline struct perf_event *
wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback)
{
return perf_event_create_kernel_counter(attr, cpu, task, callback);
}
#endif
#endif /* _LTT_WRAPPER_PERF_H */
#ifndef _LTTNG_WRAPPER_POLL_H
#define _LTTNG_WRAPPER_POLL_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/poll.h>
#define poll_wait_set_exclusive(poll_table)
#endif /* _LTTNG_WRAPPER_POLL_H */
#include "../../lib/ringbuffer/api.h"
#include "../../lib/ringbuffer/backend.h"
#include "../../wrapper/inline_memcpy.h"
#include "../../lib/ringbuffer/backend_internal.h"
#include "../../lib/ringbuffer/backend_types.h"
#include "../../lib/ringbuffer/config.h"
#include "../../lib/ringbuffer/frontend.h"
#include "../../lib/ringbuffer/frontend_api.h"
#include "../../lib/ringbuffer/frontend_internal.h"
#include "../../lib/ringbuffer/frontend_types.h"
#include "../../lib/ringbuffer/iterator.h"
#include "../../lib/ringbuffer/nohz.h"
#include "../../lib/ringbuffer/vatomic.h"
#include "../../lib/ringbuffer/vfs.h"
#ifndef _LTT_WRAPPER_SPINLOCK_H
#define _LTT_WRAPPER_SPINLOCK_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
#include <linux/string.h>
#define raw_spin_lock_init(lock) \
do { \
raw_spinlock_t __lock = __RAW_SPIN_LOCK_UNLOCKED; \
memcpy(lock, &__lock, sizeof(lock)); \
} while (0)
#define raw_spin_is_locked(lock) __raw_spin_is_locked(lock)
#endif
#endif /* _LTT_WRAPPER_SPINLOCK_H */
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
* available, else we need to have a kernel that exports this function to GPL
* modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifdef CONFIG_KALLSYMS
#include <linux/kallsyms.h>
#include <linux/fs.h>
#include <linux/splice.h>
#include "kallsyms.h"
static
ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd);
ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
if (!splice_to_pipe_sym)
splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
if (splice_to_pipe_sym) {
return splice_to_pipe_sym(pipe, spd);
} else {
printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
return -ENOSYS;
}
}
#else
#include <linux/fs.h>
#include <linux/splice.h>
ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
return splice_to_pipe(pipe, spd);
}
#endif
#ifndef _LTT_WRAPPER_SPLICE_H
#define _LTT_WRAPPER_SPLICE_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
* available, else we need to have a kernel that exports this function to GPL
* modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/splice.h>
ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd);
#ifndef PIPE_DEF_BUFFERS
#define PIPE_DEF_BUFFERS 16
#endif
#endif /* _LTT_WRAPPER_SPLICE_H */
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
* clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifndef _LTT_TRACE_CLOCK_H
#define _LTT_TRACE_CLOCK_H
#ifdef CONFIG_HAVE_TRACE_CLOCK
#include <linux/trace-clock.h>
#else /* CONFIG_HAVE_TRACE_CLOCK */
#include <linux/hardirq.h>
#include <linux/ktime.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
static inline u64 trace_clock_monotonic_wrapper(void)
{
ktime_t ktime;
/*
* Refuse to trace from NMIs with this wrapper, because an NMI could
* nest over the xtime write seqlock and deadlock.
*/
if (in_nmi())
return (u64) -EIO;
ktime = ktime_get();
return (u64) ktime.tv64;
}
static inline u32 trace_clock_read32(void)
{
return (u32) trace_clock_monotonic_wrapper();
}
static inline u64 trace_clock_read64(void)
{
return (u64) trace_clock_monotonic_wrapper();
}
static inline u64 trace_clock_frequency(void)
{
return (u64)NSEC_PER_SEC;
}
static inline u32 trace_clock_freq_scale(void)
{
return 1;
}
static inline int get_trace_clock(void)
{
printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
printk(KERN_WARNING " * NMIs will not be traced,\n");
printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
printk(KERN_WARNING " LTTng trace clocks.\n");
printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n");
printk(KERN_WARNING "in a near future.\n");
return 0;
}
static inline void put_trace_clock(void)
{
}
#endif /* CONFIG_HAVE_TRACE_CLOCK */
#endif /* _LTT_TRACE_CLOCK_H */
#ifndef _LTT_WRAPPER_UUID_H
#define _LTT_WRAPPER_UUID_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/version.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
#include <linux/uuid.h>
#else
#include <linux/random.h>
typedef struct {
__u8 b[16];
} uuid_le;
static inline
void uuid_le_gen(uuid_le *u)
{
generate_random_uuid(u->b);
}
#endif
#endif /* _LTT_WRAPPER_UUID_H */
#ifndef _LTT_WRAPPER_VMALLOC_H
#define _LTT_WRAPPER_VMALLOC_H
/*
* Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
*
* wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
* available, else we need to have a kernel that exports this function to GPL
* modules.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifdef CONFIG_KALLSYMS
#include <linux/kallsyms.h>
#include "kallsyms.h"
static inline
void wrapper_vmalloc_sync_all(void)
{
void (*vmalloc_sync_all_sym)(void);
vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
if (vmalloc_sync_all_sym) {
vmalloc_sync_all_sym();
} else {
#ifdef CONFIG_X86
/*
* Only x86 needs vmalloc_sync_all to make sure LTTng does not
* trigger recursive page faults.
*/
printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
#endif
}
}
#else
#include <linux/vmalloc.h>
static inline
void wrapper_vmalloc_sync_all(void)
{
return vmalloc_sync_all();
}
#endif
#endif /* _LTT_WRAPPER_VMALLOC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment