Commit dcf092b4 authored by Christoph Hellwig's avatar Christoph Hellwig

[XFS] merge page_buf_private_t into page_buf_t

SGI Modid: 2.5.x-xfs:slinx:134949a
parent 6ff86498
......@@ -104,7 +104,7 @@ pb_trace_func(
pb_trace.buf[j].event = event;
pb_trace.buf[j].flags = pb->pb_flags;
pb_trace.buf[j].hold = pb->pb_hold.counter;
pb_trace.buf[j].lock_value = PBP(pb)->pb_sema.count.counter;
pb_trace.buf[j].lock_value = pb->pb_sema.count.counter;
pb_trace.buf[j].task = (void *)current;
pb_trace.buf[j].misc = misc;
pb_trace.buf[j].ra = ra;
......@@ -271,12 +271,12 @@ _pagebuf_initialize(
*/
flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
memset(pb, 0, sizeof(page_buf_private_t));
memset(pb, 0, sizeof(page_buf_t));
atomic_set(&pb->pb_hold, 1);
init_MUTEX_LOCKED(&pb->pb_iodonesema);
INIT_LIST_HEAD(&pb->pb_list);
INIT_LIST_HEAD(&pb->pb_hash_list);
init_MUTEX_LOCKED(&PBP(pb)->pb_sema); /* held, no waiters */
init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
PB_SET_OWNER(pb);
pb->pb_target = target;
pb->pb_file_offset = range_base;
......@@ -288,8 +288,8 @@ _pagebuf_initialize(
pb->pb_buffer_length = pb->pb_count_desired = range_length;
pb->pb_flags = flags | PBF_NONE;
pb->pb_bn = PAGE_BUF_DADDR_NULL;
atomic_set(&PBP(pb)->pb_pin_count, 0);
init_waitqueue_head(&PBP(pb)->pb_waiters);
atomic_set(&pb->pb_pin_count, 0);
init_waitqueue_head(&pb->pb_waiters);
PB_STATS_INC(pbstats.pb_create);
PB_TRACE(pb, PB_TRACE_REC(get), target);
......@@ -656,7 +656,7 @@ _pagebuf_find( /* find buffer for block */
* if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore.
*/
not_locked = down_trylock(&PBP(pb)->pb_sema);
not_locked = down_trylock(&pb->pb_sema);
if (not_locked) {
if (!(flags & PBF_TRYLOCK)) {
/* wait for buffer ownership */
......@@ -951,7 +951,7 @@ pagebuf_get_no_daddr(
/* otherwise pagebuf_free just ignores it */
pb->pb_flags |= _PBF_MEM_ALLOCATED;
PB_CLEAR_OWNER(pb);
up(&PBP(pb)->pb_sema); /* Return unlocked pagebuf */
up(&pb->pb_sema); /* Return unlocked pagebuf */
PB_TRACE(pb, PB_TRACE_REC(no_daddr), rmem);
......@@ -1069,8 +1069,8 @@ void
pagebuf_pin(
page_buf_t *pb)
{
atomic_inc(&PBP(pb)->pb_pin_count);
PB_TRACE(pb, PB_TRACE_REC(pin), PBP(pb)->pb_pin_count.counter);
atomic_inc(&pb->pb_pin_count);
PB_TRACE(pb, PB_TRACE_REC(pin), pb->pb_pin_count.counter);
}
/*
......@@ -1084,17 +1084,17 @@ void
pagebuf_unpin(
page_buf_t *pb)
{
if (atomic_dec_and_test(&PBP(pb)->pb_pin_count)) {
wake_up_all(&PBP(pb)->pb_waiters);
if (atomic_dec_and_test(&pb->pb_pin_count)) {
wake_up_all(&pb->pb_waiters);
}
PB_TRACE(pb, PB_TRACE_REC(unpin), PBP(pb)->pb_pin_count.counter);
PB_TRACE(pb, PB_TRACE_REC(unpin), pb->pb_pin_count.counter);
}
int
pagebuf_ispin(
page_buf_t *pb)
{
return atomic_read(&PBP(pb)->pb_pin_count);
return atomic_read(&pb->pb_pin_count);
}
/*
......@@ -1110,19 +1110,19 @@ _pagebuf_wait_unpin(
{
DECLARE_WAITQUEUE (wait, current);
if (atomic_read(&PBP(pb)->pb_pin_count) == 0)
if (atomic_read(&pb->pb_pin_count) == 0)
return;
add_wait_queue(&PBP(pb)->pb_waiters, &wait);
add_wait_queue(&pb->pb_waiters, &wait);
for (;;) {
current->state = TASK_UNINTERRUPTIBLE;
if (atomic_read(&PBP(pb)->pb_pin_count) == 0) {
if (atomic_read(&pb->pb_pin_count) == 0) {
break;
}
pagebuf_run_queues(pb);
schedule();
}
remove_wait_queue(&PBP(pb)->pb_waiters, &wait);
remove_wait_queue(&pb->pb_waiters, &wait);
current->state = TASK_RUNNING;
}
......@@ -1566,7 +1566,7 @@ pagebuf_delwri_queue(
}
list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
PBP(pb)->pb_flushtime = jiffies + pb_params.p_un.age_buffer;
pb->pb_flushtime = jiffies + pb_params.p_un.age_buffer;
spin_unlock(&pbd_delwrite_lock);
if (unlock && (pb->pb_flags & _PBF_LOCKABLE)) {
......@@ -1649,8 +1649,8 @@ pagebuf_daemon(
(((pb->pb_flags & _PBF_LOCKABLE) == 0) ||
!pagebuf_cond_lock(pb))) {
if (!force_flush && time_before(jiffies,
PBP(pb)->pb_flushtime)) {
if (!force_flush &&
time_before(jiffies, pb->pb_flushtime)) {
pagebuf_unlock(pb);
break;
}
......@@ -1913,8 +1913,7 @@ pagebuf_init(void)
"fs/pagebuf/stat", 0, 0, pagebuf_readstats, NULL);
#endif
pagebuf_cache = kmem_cache_create("page_buf_t",
sizeof(page_buf_private_t), 0,
pagebuf_cache = kmem_cache_create("page_buf_t", sizeof(page_buf_t), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (pagebuf_cache == NULL) {
printk("pagebuf: couldn't init pagebuf cache\n");
......
......@@ -195,6 +195,10 @@ typedef int (*page_buf_bdstrat_t)(struct page_buf_s *);
#define PB_PAGES 4
typedef struct page_buf_s {
struct semaphore pb_sema; /* semaphore for lockables */
unsigned long pb_flushtime; /* time to flush pagebuf */
atomic_t pb_pin_count; /* pin count */
wait_queue_head_t pb_waiters; /* unpin waiters */
struct list_head pb_list;
page_buf_flags_t pb_flags; /* status flags */
struct list_head pb_hash_list;
......@@ -221,6 +225,9 @@ typedef struct page_buf_s {
unsigned char pb_hash_index; /* hash table index */
struct page **pb_pages; /* array of page pointers */
struct page *pb_page_array[PB_PAGES]; /* inline pages */
#ifdef PAGEBUF_LOCK_TRACKING
int pb_last_holder;
#endif
} page_buf_t;
......
......@@ -48,24 +48,10 @@
#define page_has_buffers(page) ((page)->buffers)
#endif
typedef struct page_buf_private_s {
page_buf_t pb_common; /* public part of structure */
struct semaphore pb_sema; /* semaphore for lockables */
unsigned long pb_flushtime; /* time to flush pagebuf */
atomic_t pb_pin_count; /* pin count */
wait_queue_head_t pb_waiters; /* unpin waiters */
#ifdef PAGEBUF_LOCK_TRACKING
int pb_last_holder;
#endif
} page_buf_private_t;
#define PBC(pb) (&((pb)->pb_common))
#define PBP(pb) ((page_buf_private_t *) (pb))
#ifdef PAGEBUF_LOCK_TRACKING
#define PB_SET_OWNER(pb) (PBP(pb)->pb_last_holder = current->pid)
#define PB_CLEAR_OWNER(pb) (PBP(pb)->pb_last_holder = -1)
#define PB_GET_OWNER(pb) (PBP(pb)->pb_last_holder)
#define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid)
#define PB_CLEAR_OWNER(pb) (pb->pb_last_holder = -1)
#define PB_GET_OWNER(pb) (pb->pb_last_holder)
#else
#define PB_SET_OWNER(pb)
#define PB_CLEAR_OWNER(pb)
......
......@@ -75,7 +75,7 @@ pagebuf_cond_lock( /* lock buffer, if not locked */
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
locked = down_trylock(&PBP(pb)->pb_sema) == 0;
locked = down_trylock(&pb->pb_sema) == 0;
if (locked) {
PB_SET_OWNER(pb);
}
......@@ -95,7 +95,7 @@ pagebuf_lock_value(
page_buf_t *pb)
{
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
return(atomic_read(&PBP(pb)->pb_sema.count));
return(atomic_read(&pb->pb_sema.count));
}
/*
......@@ -114,7 +114,7 @@ pagebuf_lock(
PB_TRACE(pb, PB_TRACE_REC(lock), 0);
pagebuf_run_queues(pb);
down(&PBP(pb)->pb_sema);
down(&pb->pb_sema);
PB_SET_OWNER(pb);
PB_TRACE(pb, PB_TRACE_REC(locked), 0);
return 0;
......@@ -133,6 +133,6 @@ pagebuf_unlock( /* unlock buffer */
{
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_CLEAR_OWNER(pb);
up(&PBP(pb)->pb_sema);
up(&pb->pb_sema);
PB_TRACE(pb, PB_TRACE_REC(unlock), 0);
}
......@@ -1793,7 +1793,7 @@ kdbm_pb_flags(int argc, const char **argv, const char **envp, struct pt_regs *re
static int
kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs)
{
page_buf_private_t bp;
page_buf_t bp;
unsigned long addr;
long offset=0;
int nextarg;
......@@ -1808,43 +1808,43 @@ kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs)
return diag;
kdb_printf("page_buf_t at 0x%lx\n", addr);
kdb_printf(" pb_flags %s\n", pb_flags(bp.pb_common.pb_flags));
kdb_printf(" pb_flags %s\n", pb_flags(bp.pb_flags));
kdb_printf(" pb_target 0x%p pb_hold %d pb_next 0x%p pb_prev 0x%p\n",
bp.pb_common.pb_target, bp.pb_common.pb_hold.counter,
bp.pb_common.pb_list.next, bp.pb_common.pb_list.prev);
bp.pb_target, bp.pb_hold.counter,
bp.pb_list.next, bp.pb_list.prev);
kdb_printf(" pb_hash_index %d pb_hash_next 0x%p pb_hash_prev 0x%p\n",
bp.pb_common.pb_hash_index,
bp.pb_common.pb_hash_list.next,
bp.pb_common.pb_hash_list.prev);
bp.pb_hash_index,
bp.pb_hash_list.next,
bp.pb_hash_list.prev);
kdb_printf(" pb_file_offset 0x%llx pb_buffer_length 0x%llx pb_addr 0x%p\n",
(unsigned long long) bp.pb_common.pb_file_offset,
(unsigned long long) bp.pb_common.pb_buffer_length,
bp.pb_common.pb_addr);
(unsigned long long) bp.pb_file_offset,
(unsigned long long) bp.pb_buffer_length,
bp.pb_addr);
kdb_printf(" pb_bn 0x%Lx pb_count_desired 0x%lx\n",
bp.pb_common.pb_bn,
(unsigned long) bp.pb_common.pb_count_desired);
bp.pb_bn,
(unsigned long) bp.pb_count_desired);
kdb_printf(" pb_io_remaining %d pb_error %u\n",
bp.pb_common.pb_io_remaining.counter,
bp.pb_common.pb_error);
bp.pb_io_remaining.counter,
bp.pb_error);
kdb_printf(" pb_page_count %u pb_offset 0x%x pb_pages 0x%p\n",
bp.pb_common.pb_page_count, bp.pb_common.pb_offset,
bp.pb_common.pb_pages);
bp.pb_page_count, bp.pb_offset,
bp.pb_pages);
#ifdef PAGEBUF_LOCK_TRACKING
kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d) last holder %d\n",
bp.pb_common.pb_iodonesema.count.counter,
bp.pb_common.pb_iodonesema.sleepers,
bp.pb_iodonesema.count.counter,
bp.pb_iodonesema.sleepers,
bp.pb_sema.count.counter, bp.pb_sema.sleepers,
bp.pb_pin_count.counter, bp.pb_last_holder);
#else
kdb_printf(" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d)\n",
bp.pb_common.pb_iodonesema.count.counter,
bp.pb_common.pb_iodonesema.sleepers,
bp.pb_iodonesema.count.counter,
bp.pb_iodonesema.sleepers,
bp.pb_sema.count.counter, bp.pb_sema.sleepers,
bp.pb_pin_count.counter);
#endif
if (bp.pb_common.pb_fspriv || bp.pb_common.pb_fspriv2) {
if (bp.pb_fspriv || bp.pb_fspriv2) {
kdb_printf( "pb_fspriv 0x%p pb_fspriv2 0x%p\n",
bp.pb_common.pb_fspriv, bp.pb_common.pb_fspriv2);
bp.pb_fspriv, bp.pb_fspriv2);
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment