Commit e3f77734 authored by Nathan Scott's avatar Nathan Scott

[XFS] Change pagebuf to use the same ktrace implementation as XFS, instead of...

[XFS] Change pagebuf to use the same ktrace implementation as XFS, instead of reinventing that wheel.

SGI Modid: 2.5.x-xfs:slinx:162159a
parent 272a8d2c
......@@ -69,10 +69,6 @@
#include <pagebuf/page_buf.h>
#ifndef STATIC
#define STATIC static
#endif
/*
* State flag for unwritten extent buffers.
*
......
......@@ -58,11 +58,13 @@
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/suspend.h>
#include <linux/percpu.h>
#include <support/ktrace.h>
#include <support/debug.h>
#include <support/kmem.h>
#include "page_buf_internal.h"
#include "page_buf.h"
#define BBSHIFT 9
#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1)
......@@ -72,50 +74,7 @@
#endif
/*
* Debug code
*/
#ifdef PAGEBUF_TRACE
static spinlock_t pb_trace_lock = SPIN_LOCK_UNLOCKED;
struct pagebuf_trace_buf pb_trace;
EXPORT_SYMBOL(pb_trace);
EXPORT_SYMBOL(pb_trace_func);
#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1))
void
pb_trace_func(
page_buf_t *pb,
int event,
void *misc,
void *ra)
{
int j;
unsigned long flags;
if (!pb_params.debug.val) return;
if (ra == NULL) ra = (void *)__builtin_return_address(0);
spin_lock_irqsave(&pb_trace_lock, flags);
j = pb_trace.start;
pb_trace.start = CIRC_INC(j);
spin_unlock_irqrestore(&pb_trace_lock, flags);
pb_trace.buf[j].pb = (unsigned long) pb;
pb_trace.buf[j].event = event;
pb_trace.buf[j].flags = pb->pb_flags;
pb_trace.buf[j].hold = pb->pb_hold.counter;
pb_trace.buf[j].lock_value = pb->pb_sema.count.counter;
pb_trace.buf[j].task = (void *)current;
pb_trace.buf[j].misc = misc;
pb_trace.buf[j].ra = ra;
pb_trace.buf[j].offset = pb->pb_file_offset;
pb_trace.buf[j].size = pb->pb_buffer_length;
}
#endif /* PAGEBUF_TRACE */
/*
* File wide globals
* File wide globals
*/
STATIC kmem_cache_t *pagebuf_cache;
......@@ -129,7 +88,20 @@ STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
* /proc/sys/vm/pagebuf
*/
pagebuf_param_t pb_params = {
typedef struct pb_sysctl_val {
int min;
int val;
int max;
} pb_sysctl_val_t;
struct {
pb_sysctl_val_t flush_interval; /* interval between runs of the
* delwri flush daemon. */
pb_sysctl_val_t age_buffer; /* time for buffer to age before
* we flush it. */
pb_sysctl_val_t stats_clear; /* clear the pagebuf stats */
pb_sysctl_val_t debug; /* debug tracing on or off */
} pb_params = {
/* MIN DFLT MAX */
.flush_interval = { HZ/2, HZ, 30*HZ },
.age_buffer = { 1*HZ, 15*HZ, 300*HZ },
......@@ -137,12 +109,79 @@ pagebuf_param_t pb_params = {
.debug = { 0, 0, 1 },
};
enum {
PB_FLUSH_INT = 1,
PB_FLUSH_AGE = 2,
PB_STATS_CLEAR = 3,
PB_DEBUG = 4,
};
/*
* Pagebuf statistics variables
*/
struct pbstats {
u_int32_t pb_get;
u_int32_t pb_create;
u_int32_t pb_get_locked;
u_int32_t pb_get_locked_waited;
u_int32_t pb_busy_locked;
u_int32_t pb_miss_locked;
u_int32_t pb_page_retries;
u_int32_t pb_page_found;
u_int32_t pb_get_read;
} pbstats;
DEFINE_PER_CPU(struct pbstats, pbstats);
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define PB_STATS_INC(count) (__get_cpu_var(pbstats).count++)
/*
* Pagebuf debugging
*/
#ifdef PAGEBUF_TRACE
void
pagebuf_trace(
page_buf_t *pb,
char *id,
void *data,
void *ra)
{
if (!pb_params.debug.val)
return;
ktrace_enter(pagebuf_trace_buf,
pb, id,
(void *)(unsigned long)pb->pb_flags,
(void *)(unsigned long)pb->pb_hold.counter,
(void *)(unsigned long)pb->pb_sema.count.counter,
(void *)current,
data, ra,
(void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
(void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
(void *)(unsigned long)pb->pb_buffer_length,
NULL, NULL, NULL, NULL, NULL);
}
ktrace_t *pagebuf_trace_buf;
EXPORT_SYMBOL(pagebuf_trace_buf);
#define PAGEBUF_TRACE_SIZE 4096
#define PB_TRACE(pb, id, data) \
pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
#else
#define PB_TRACE(pb, id, data) do { } while (0)
#endif
#ifdef PAGEBUF_LOCK_TRACKING
# define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid)
# define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1)
# define PB_GET_OWNER(pb) ((pb)->pb_last_holder)
#else
# define PB_SET_OWNER(pb) do { } while (0)
# define PB_CLEAR_OWNER(pb) do { } while (0)
# define PB_GET_OWNER(pb) do { } while (0)
#endif
/*
* Pagebuf allocation / freeing.
*/
......@@ -198,11 +237,11 @@ typedef struct a_list {
void *vm_addr;
struct a_list *next;
} a_list_t;
STATIC a_list_t *as_free_head;
STATIC int as_list_len;
STATIC spinlock_t as_lock = SPIN_LOCK_UNLOCKED;
/*
* Try to batch vunmaps because they are costly.
*/
......@@ -246,7 +285,6 @@ purge_addresses(void)
}
}
/*
* Internal pagebuf object manipulation
*/
......@@ -285,7 +323,7 @@ _pagebuf_initialize(
init_waitqueue_head(&pb->pb_waiters);
PB_STATS_INC(pb_create);
PB_TRACE(pb, PB_TRACE_REC(get), target);
PB_TRACE(pb, "initialize", target);
}
/*
......@@ -352,7 +390,7 @@ _pagebuf_free_object(
{
page_buf_flags_t pb_flags = pb->pb_flags;
PB_TRACE(pb, PB_TRACE_REC(free_obj), 0);
PB_TRACE(pb, "free_object", 0);
pb->pb_flags |= PBF_FREED;
if (hash) {
......@@ -563,7 +601,7 @@ _pagebuf_lookup_pages(
}
}
PB_TRACE(pb, PB_TRACE_REC(look_pg), good_pages);
PB_TRACE(pb, "lookup_pages", (long)good_pages);
return rval;
}
......@@ -654,7 +692,7 @@ _pagebuf_find( /* find buffer for block */
if (not_locked) {
if (!(flags & PBF_TRYLOCK)) {
/* wait for buffer ownership */
PB_TRACE(pb, PB_TRACE_REC(get_lk), 0);
PB_TRACE(pb, "get_lock", 0);
pagebuf_lock(pb);
PB_STATS_INC(pb_get_locked_waited);
} else {
......@@ -681,7 +719,7 @@ _pagebuf_find( /* find buffer for block */
_PBF_ALL_PAGES_MAPPED | \
_PBF_ADDR_ALLOCATED | \
_PBF_MEM_ALLOCATED;
PB_TRACE(pb, PB_TRACE_REC(got_lk), 0);
PB_TRACE(pb, "got_lock", 0);
PB_STATS_INC(pb_get_locked);
return (pb);
}
......@@ -756,7 +794,7 @@ pagebuf_get( /* allocate a buffer */
if (flags & PBF_READ) {
if (PBF_NOT_DONE(pb)) {
PB_TRACE(pb, PB_TRACE_REC(get_read), flags);
PB_TRACE(pb, "get_read", (unsigned long)flags);
PB_STATS_INC(pb_get_read);
pagebuf_iostart(pb, flags);
} else if (flags & PBF_ASYNC) {
......@@ -774,7 +812,7 @@ pagebuf_get( /* allocate a buffer */
}
}
PB_TRACE(pb, PB_TRACE_REC(get_obj), flags);
PB_TRACE(pb, "get_done", (unsigned long)flags);
return (pb);
}
......@@ -939,7 +977,7 @@ pagebuf_get_no_daddr(
PB_CLEAR_OWNER(pb);
up(&pb->pb_sema); /* Return unlocked pagebuf */
PB_TRACE(pb, PB_TRACE_REC(no_daddr), rmem);
PB_TRACE(pb, "no_daddr", rmem);
return pb;
}
......@@ -958,7 +996,7 @@ pagebuf_hold(
page_buf_t *pb)
{
atomic_inc(&pb->pb_hold);
PB_TRACE(pb, PB_TRACE_REC(hold), 0);
PB_TRACE(pb, "hold", 0);
}
/*
......@@ -993,7 +1031,7 @@ pagebuf_rele(
{
pb_hash_t *h;
PB_TRACE(pb, PB_TRACE_REC(rele), pb->pb_relse);
PB_TRACE(pb, "rele", pb->pb_relse);
if (pb->pb_flags & _PBF_LOCKABLE) {
h = pb_hash(pb);
spin_lock(&h->pb_hash_lock);
......@@ -1064,7 +1102,7 @@ pagebuf_cond_lock( /* lock buffer, if not locked */
if (locked) {
PB_SET_OWNER(pb);
}
PB_TRACE(pb, PB_TRACE_REC(condlck), locked);
PB_TRACE(pb, "cond_lock", (long)locked);
return(locked ? 0 : -EBUSY);
}
......@@ -1095,12 +1133,12 @@ pagebuf_lock(
{
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_TRACE(pb, PB_TRACE_REC(lock), 0);
PB_TRACE(pb, "lock", 0);
if (atomic_read(&pb->pb_io_remaining))
blk_run_queues();
down(&pb->pb_sema);
PB_SET_OWNER(pb);
PB_TRACE(pb, PB_TRACE_REC(locked), 0);
PB_TRACE(pb, "locked", 0);
return 0;
}
......@@ -1118,7 +1156,7 @@ pagebuf_unlock( /* unlock buffer */
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_CLEAR_OWNER(pb);
up(&pb->pb_sema);
PB_TRACE(pb, PB_TRACE_REC(unlock), 0);
PB_TRACE(pb, "unlock", 0);
}
......@@ -1145,7 +1183,7 @@ pagebuf_pin(
page_buf_t *pb)
{
atomic_inc(&pb->pb_pin_count);
PB_TRACE(pb, PB_TRACE_REC(pin), pb->pb_pin_count.counter);
PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
}
/*
......@@ -1162,7 +1200,7 @@ pagebuf_unpin(
if (atomic_dec_and_test(&pb->pb_pin_count)) {
wake_up_all(&pb->pb_waiters);
}
PB_TRACE(pb, PB_TRACE_REC(unpin), pb->pb_pin_count.counter);
PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
}
int
......@@ -1241,7 +1279,7 @@ pagebuf_iodone(
pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
}
PB_TRACE(pb, PB_TRACE_REC(done), pb->pb_iodone);
PB_TRACE(pb, "iodone", pb->pb_iodone);
if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
if (schedule) {
......@@ -1267,7 +1305,7 @@ pagebuf_ioerror( /* mark/clear buffer error flag */
unsigned int error) /* error to store (0 if none) */
{
pb->pb_error = error;
PB_TRACE(pb, PB_TRACE_REC(ioerror), error);
PB_TRACE(pb, "ioerror", (unsigned long)error);
}
/*
......@@ -1291,7 +1329,7 @@ pagebuf_iostart( /* start I/O on a buffer */
{
int status = 0;
PB_TRACE(pb, PB_TRACE_REC(iostart), flags);
PB_TRACE(pb, "iostart", (unsigned long)flags);
if (flags & PBF_DELWRI) {
pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
......@@ -1519,7 +1557,7 @@ int
pagebuf_iorequest( /* start real I/O */
page_buf_t *pb) /* buffer to convey to device */
{
PB_TRACE(pb, PB_TRACE_REC(ioreq), 0);
PB_TRACE(pb, "iorequest", 0);
if (pb->pb_flags & PBF_DELWRI) {
pagebuf_delwri_queue(pb, 1);
......@@ -1555,11 +1593,11 @@ int
pagebuf_iowait(
page_buf_t *pb)
{
PB_TRACE(pb, PB_TRACE_REC(iowait), 0);
PB_TRACE(pb, "iowait", 0);
if (atomic_read(&pb->pb_io_remaining))
blk_run_queues();
down(&pb->pb_iodonesema);
PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error);
PB_TRACE(pb, "iowaited", (long)pb->pb_error);
return pb->pb_error;
}
......@@ -1650,7 +1688,7 @@ pagebuf_delwri_queue(
page_buf_t *pb,
int unlock)
{
PB_TRACE(pb, PB_TRACE_REC(delwri_q), unlock);
PB_TRACE(pb, "delwri_q", (long)unlock);
spin_lock(&pbd_delwrite_lock);
/* If already in the queue, dequeue and place at tail */
if (!list_empty(&pb->pb_list)) {
......@@ -1673,7 +1711,7 @@ void
pagebuf_delwri_dequeue(
page_buf_t *pb)
{
PB_TRACE(pb, PB_TRACE_REC(delwri_uq), 0);
PB_TRACE(pb, "delwri_uq", 0);
spin_lock(&pbd_delwrite_lock);
list_del_init(&pb->pb_list);
pb->pb_flags &= ~PBF_DELWRI;
......@@ -1740,7 +1778,7 @@ pagebuf_daemon(
list_for_each_safe(curr, next, &pbd_delwrite_queue) {
pb = list_entry(curr, page_buf_t, pb_list);
PB_TRACE(pb, PB_TRACE_REC(walkq1), pagebuf_ispin(pb));
PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
if ((pb->pb_flags & PBF_DELWRI) && !pagebuf_ispin(pb) &&
(((pb->pb_flags & _PBF_LOCKABLE) == 0) ||
......@@ -1813,7 +1851,7 @@ pagebuf_delwri_flush(
continue;
}
PB_TRACE(pb, PB_TRACE_REC(walkq2), pagebuf_ispin(pb));
PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
if (pagebuf_ispin(pb)) {
pincount++;
continue;
......@@ -2035,11 +2073,7 @@ pagebuf_init(void)
}
#ifdef PAGEBUF_TRACE
pb_trace.buf = (pagebuf_trace_t *)kmalloc(
PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t), GFP_KERNEL);
memset(pb_trace.buf, 0, PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t));
pb_trace.start = 0;
pb_trace.end = PB_TRACE_BUFSIZE - 1;
pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
#endif
pagebuf_daemon_start();
......
......@@ -321,4 +321,16 @@ extern void pagebuf_delwri_dequeue(
extern int pagebuf_init(void);
extern void pagebuf_terminate(void);
#ifdef PAGEBUF_TRACE
extern ktrace_t *pagebuf_trace_buf;
extern void pagebuf_trace(
page_buf_t *, /* buffer being traced */
char *, /* description of operation */
void *, /* arbitrary diagnostic value */
void *); /* return address */
#else
# define pagebuf_trace(pb, id, ptr, ra) do { } while (0)
#endif
#endif /* __PAGE_BUF_H__ */
......@@ -44,6 +44,10 @@
extern void icmn_err(int, char *, va_list);
extern void cmn_err(int, char *, ...);
#ifndef STATIC
# define STATIC static
#endif
#ifdef DEBUG
# ifdef lint
# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */
......
......@@ -179,6 +179,7 @@ ktrace_enter(
void *val15)
{
static lock_t wrap_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
int index;
ktrace_entry_t *ktep;
......@@ -187,11 +188,11 @@ ktrace_enter(
/*
* Grab an entry by pushing the index up to the next one.
*/
spin_lock(&wrap_lock);
spin_lock_irqsave(&wrap_lock, flags);
index = ktp->kt_index;
if (++ktp->kt_index == ktp->kt_nentries)
ktp->kt_index = 0;
spin_unlock(&wrap_lock);
spin_unlock_irqrestore(&wrap_lock, flags);
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
ktp->kt_rollover = 1;
......
......@@ -231,18 +231,11 @@ static inline void xfs_buf_relse(page_buf_t *bp)
pagebuf_rele(bp);
}
#define xfs_bpin(bp) pagebuf_pin(bp)
#define xfs_bunpin(bp) pagebuf_unpin(bp)
#ifdef PAGEBUF_TRACE
# define PB_DEFINE_TRACES
# include <pagebuf/page_buf_trace.h>
# define xfs_buftrace(id, bp) PB_TRACE(bp, PB_TRACE_REC(external), (void *)id)
#else
# define xfs_buftrace(id, bp) do { } while (0)
#endif
#define xfs_buftrace(id, bp) \
pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
#define xfs_biodone(pb) \
pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment