Commit 704db23b authored by Linus Torvalds's avatar Linus Torvalds

Merge penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/hlist

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 356a8fd1 18bc0cec
......@@ -47,16 +47,8 @@ static kmem_cache_t *dentry_cache;
static unsigned int d_hash_mask;
static unsigned int d_hash_shift;
static struct list_head *dentry_hashtable;
static struct hlist_head *dentry_hashtable;
static LIST_HEAD(dentry_unused);
static int max_dentries;
static void * hashtable_end;
static inline int is_bucket(void * addr)
{
return ((addr < (void *)dentry_hashtable)
|| (addr > hashtable_end) ? 0 : 1);
}
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
......@@ -292,6 +284,7 @@ struct dentry * d_find_alias(struct inode *inode)
while (next != head) {
tmp = next;
next = tmp->next;
prefetch(next);
alias = list_entry(tmp, struct dentry, d_alias);
if (!d_unhashed(alias)) {
if (alias->d_flags & DCACHE_DISCONNECTED)
......@@ -378,6 +371,7 @@ static void prune_dcache(int count)
if (tmp == &dentry_unused)
break;
list_del_init(tmp);
prefetch(dentry_unused.prev);
dentry_stat.nr_unused--;
dentry = list_entry(tmp, struct dentry, d_lru);
......@@ -603,15 +597,15 @@ void shrink_dcache_parent(struct dentry * parent)
* done under dcache_lock.
*
*/
void shrink_dcache_anon(struct list_head *head)
void shrink_dcache_anon(struct hlist_head *head)
{
struct list_head *lp;
struct hlist_node *lp;
int found;
do {
found = 0;
spin_lock(&dcache_lock);
list_for_each(lp, head) {
struct dentry *this = list_entry(lp, struct dentry, d_hash);
hlist_for_each(lp, head) {
struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
list_del(&this->d_lru);
/* don't add non zero d_count dentries
......@@ -727,7 +721,7 @@ struct dentry * d_alloc(struct dentry * parent, const struct qstr *name)
dentry->d_mounted = 0;
dentry->d_cookie = NULL;
dentry->d_bucket = NULL;
INIT_LIST_HEAD(&dentry->d_hash);
INIT_HLIST_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
INIT_LIST_HEAD(&dentry->d_alias);
......@@ -797,7 +791,7 @@ struct dentry * d_alloc_root(struct inode * root_inode)
return res;
}
static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
static inline struct hlist_head * d_hash(struct dentry * parent, unsigned long hash)
{
hash += (unsigned long) parent / L1_CACHE_BYTES;
hash = hash ^ (hash >> D_HASHBITS);
......@@ -860,7 +854,7 @@ struct dentry * d_alloc_anon(struct inode *inode)
res->d_flags |= DCACHE_DISCONNECTED;
res->d_vfs_flags &= ~DCACHE_UNHASHED;
list_add(&res->d_alias, &inode->i_dentry);
list_add(&res->d_hash, &inode->i_sb->s_anon);
hlist_add_head(&res->d_hash, &inode->i_sb->s_anon);
spin_unlock(&res->d_lock);
}
inode = NULL; /* don't drop reference */
......@@ -947,21 +941,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
struct list_head *head = d_hash(parent,hash);
struct hlist_head *head = d_hash(parent,hash);
struct dentry *found = NULL;
struct list_head *tmp;
int lookup_count = 0;
struct hlist_node *node;
rcu_read_lock();
/* lookup is terminated when flow reaches any bucket head */
for(tmp = head->next; !is_bucket(tmp); tmp = tmp->next) {
hlist_for_each (node, head) {
struct dentry *dentry;
unsigned long move_count;
struct qstr * qstr;
prefetch(node->next);
smp_read_barrier_depends();
dentry = list_entry(tmp, struct dentry, d_hash);
dentry = hlist_entry(node, struct dentry, d_hash);
/* if lookup ends up in a different bucket
* due to concurrent rename, fail it
......@@ -969,12 +963,6 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
if (unlikely(dentry->d_bucket != head))
break;
/* to avoid race if dentry keep coming back to original
* bucket due to double moves
*/
if (unlikely(++lookup_count > max_dentries))
break;
/*
* We must take a snapshot of d_move_count followed by
* read memory barrier before any search key comparison
......@@ -1034,7 +1022,8 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
unsigned long dent_addr = (unsigned long) dentry;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = 0x0F;
struct list_head *base, *lhp;
struct hlist_head *base;
struct hlist_node *lhp;
if (dent_addr < min_addr)
goto out;
......@@ -1050,12 +1039,13 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
goto out;
spin_lock(&dcache_lock);
lhp = base = d_hash(dparent, dentry->d_name.hash);
while ((lhp = lhp->next) != base) {
base = d_hash(dparent, dentry->d_name.hash);
hlist_for_each(lhp,base) {
prefetch(lhp->next);
/* read_barrier_depends() not required for d_hash list
* as it is parsed under dcache_lock
*/
if (dentry == list_entry(lhp, struct dentry, d_hash)) {
if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
__dget_locked(dentry);
spin_unlock(&dcache_lock);
return 1;
......@@ -1116,12 +1106,11 @@ void d_delete(struct dentry * dentry)
void d_rehash(struct dentry * entry)
{
struct list_head *list = d_hash(entry->d_parent, entry->d_name.hash);
struct hlist_head *list = d_hash(entry->d_parent, entry->d_name.hash);
spin_lock(&dcache_lock);
if (!list_empty(&entry->d_hash) && !d_unhashed(entry)) BUG();
entry->d_vfs_flags &= ~DCACHE_UNHASHED;
entry->d_bucket = list;
list_add_rcu(&entry->d_hash, list);
hlist_add_head_rcu(&entry->d_hash, list);
spin_unlock(&dcache_lock);
}
......@@ -1174,10 +1163,6 @@ static inline void switch_names(struct dentry * dentry, struct dentry * target)
* We could be nicer about the deleted file, and let it show
* up under the name it got deleted rather than the name that
* deleted it.
*
* Careful with the hash switch. The hash switch depends on
* the fact that any list-entry can be a head of the list.
* Think about it.
*/
/**
......@@ -1200,8 +1185,8 @@ void d_move(struct dentry * dentry, struct dentry * target)
/* Move the dentry to the target hash queue, if on different bucket */
if (dentry->d_bucket != target->d_bucket) {
dentry->d_bucket = target->d_bucket;
list_del_rcu(&dentry->d_hash);
list_add_rcu(&dentry->d_hash, &target->d_hash);
hlist_del_rcu(&dentry->d_hash);
hlist_add_head_rcu(&dentry->d_hash, target->d_bucket);
}
/* Unhash the target: dput() will then get rid of it */
......@@ -1284,6 +1269,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
continue;
}
parent = dentry->d_parent;
prefetch(parent);
namelen = dentry->d_name.len;
buflen -= namelen + 1;
if (buflen < 0)
......@@ -1503,7 +1489,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
static void __init dcache_init(unsigned long mempages)
{
struct list_head *d;
struct hlist_head *d;
unsigned long order;
unsigned int nr_hash;
int i;
......@@ -1524,15 +1510,12 @@ static void __init dcache_init(unsigned long mempages)
if (!dentry_cache)
panic("Cannot create dentry cache");
/* approximate maximum number of dentries in one hash bucket */
max_dentries = (mempages * (PAGE_SIZE / sizeof(struct dentry)));
set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
#if PAGE_SHIFT < 13
mempages >>= (13 - PAGE_SHIFT);
#endif
mempages *= sizeof(struct list_head);
mempages *= sizeof(struct hlist_head);
for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
;
......@@ -1540,7 +1523,7 @@ static void __init dcache_init(unsigned long mempages)
unsigned long tmp;
nr_hash = (1UL << order) * PAGE_SIZE /
sizeof(struct list_head);
sizeof(struct hlist_head);
d_hash_mask = (nr_hash - 1);
tmp = nr_hash;
......@@ -1548,7 +1531,7 @@ static void __init dcache_init(unsigned long mempages)
while ((tmp >>= 1UL) != 0UL)
d_hash_shift++;
dentry_hashtable = (struct list_head *)
dentry_hashtable = (struct hlist_head *)
__get_free_pages(GFP_ATOMIC, order);
} while (dentry_hashtable == NULL && --order >= 0);
......@@ -1558,12 +1541,10 @@ static void __init dcache_init(unsigned long mempages)
if (!dentry_hashtable)
panic("Failed to allocate dcache hash table\n");
hashtable_end = dentry_hashtable + nr_hash;
d = dentry_hashtable;
i = nr_hash;
do {
INIT_LIST_HEAD(d);
INIT_HLIST_HEAD(d);
d++;
i--;
} while (i);
......
......@@ -90,7 +90,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* Only add valid (hashed) inodes to the superblock's
* dirty list. Add blockdev inodes as well.
*/
if (list_empty(&inode->i_hash) && !S_ISBLK(inode->i_mode))
if (hlist_unhashed(&inode->i_hash) && !S_ISBLK(inode->i_mode))
goto out;
/*
......
......@@ -189,7 +189,7 @@ void truncate_hugepages(struct address_space *mapping, loff_t lstart)
static void hugetlbfs_delete_inode(struct inode *inode)
{
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
list_del_init(&inode->i_list);
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
......@@ -208,7 +208,7 @@ static void hugetlbfs_forget_inode(struct inode *inode)
{
struct super_block *super_block = inode->i_sb;
if (list_empty(&inode->i_hash))
if (hlist_unhashed(&inode->i_hash))
goto out_truncate;
if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
......@@ -223,7 +223,7 @@ static void hugetlbfs_forget_inode(struct inode *inode)
/* write_inode_now() ? */
inodes_stat.nr_unused--;
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
out_truncate:
list_del_init(&inode->i_list);
inode->i_state |= I_FREEING;
......
......@@ -69,8 +69,8 @@ static unsigned int i_hash_shift;
LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused);
static struct list_head *inode_hashtable;
static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
static struct hlist_head *inode_hashtable;
static HLIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
/*
* A simple spinlock to protect the list manipulations.
......@@ -172,7 +172,7 @@ void destroy_inode(struct inode *inode)
void inode_init_once(struct inode *inode)
{
memset(inode, 0, sizeof(*inode));
INIT_LIST_HEAD(&inode->i_hash);
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_data.clean_pages);
INIT_LIST_HEAD(&inode->i_data.dirty_pages);
INIT_LIST_HEAD(&inode->i_data.locked_pages);
......@@ -294,7 +294,7 @@ static int invalidate_list(struct list_head *head, struct super_block * sb, stru
continue;
invalidate_inode_buffers(inode);
if (!atomic_read(&inode->i_count)) {
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
list_del(&inode->i_list);
list_add(&inode->i_list, dispose);
inode->i_state |= I_FREEING;
......@@ -435,7 +435,7 @@ static void prune_icache(int nr_to_scan)
if (!can_unuse(inode))
continue;
}
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
list_move(&inode->i_list, &freeable);
inode->i_state |= I_FREEING;
nr_pruned++;
......@@ -476,50 +476,42 @@ static int shrink_icache_memory(int nr, unsigned int gfp_mask)
* by hand after calling find_inode now! This simplifies iunique and won't
* add any additional branch in the common code.
*/
static struct inode * find_inode(struct super_block * sb, struct list_head *head, int (*test)(struct inode *, void *), void *data)
static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data)
{
struct list_head *tmp;
struct inode * inode;
struct hlist_node *node;
struct inode * inode = NULL;
tmp = head;
for (;;) {
tmp = tmp->next;
inode = NULL;
if (tmp == head)
break;
inode = list_entry(tmp, struct inode, i_hash);
hlist_for_each (node, head) {
prefetch(node->next);
inode = hlist_entry(node, struct inode, i_hash);
if (inode->i_sb != sb)
continue;
if (!test(inode, data))
continue;
break;
}
return inode;
return node ? inode : NULL;
}
/*
* find_inode_fast is the fast path version of find_inode, see the comment at
* iget_locked for details.
*/
static struct inode * find_inode_fast(struct super_block * sb, struct list_head *head, unsigned long ino)
static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino)
{
struct list_head *tmp;
struct inode * inode;
struct hlist_node *node;
struct inode * inode = NULL;
tmp = head;
for (;;) {
tmp = tmp->next;
inode = NULL;
if (tmp == head)
break;
inode = list_entry(tmp, struct inode, i_hash);
hlist_for_each (node, head) {
prefetch(node->next);
inode = list_entry(node, struct inode, i_hash);
if (inode->i_ino != ino)
continue;
if (inode->i_sb != sb)
continue;
break;
}
return inode;
return node ? inode : NULL;
}
/**
......@@ -569,7 +561,7 @@ EXPORT_SYMBOL(unlock_new_inode);
* We no longer cache the sb_flags in i_flags - see fs.h
* -- rmk@arm.uk.linux.org
*/
static struct inode * get_new_inode(struct super_block *sb, struct list_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data)
{
struct inode * inode;
......@@ -586,7 +578,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct list_head *he
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_hash, head);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
......@@ -619,7 +611,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct list_head *he
* get_new_inode_fast is the fast path version of get_new_inode, see the
* comment at iget_locked for details.
*/
static struct inode * get_new_inode_fast(struct super_block *sb, struct list_head *head, unsigned long ino)
static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino)
{
struct inode * inode;
......@@ -634,7 +626,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct list_hea
inode->i_ino = ino;
inodes_stat.nr_inodes++;
list_add(&inode->i_list, &inode_in_use);
list_add(&inode->i_hash, head);
hlist_add_head(&inode->i_hash, head);
inode->i_state = I_LOCK|I_NEW;
spin_unlock(&inode_lock);
......@@ -686,7 +678,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
static ino_t counter = 0;
struct inode *inode;
struct list_head * head;
struct hlist_head * head;
ino_t res;
spin_lock(&inode_lock);
retry:
......@@ -740,7 +732,7 @@ struct inode *igrab(struct inode *inode)
* Note, @test is called with the inode_lock held, so can't sleep.
*/
static inline struct inode *ifind(struct super_block *sb,
struct list_head *head, int (*test)(struct inode *, void *),
struct hlist_head *head, int (*test)(struct inode *, void *),
void *data)
{
struct inode *inode;
......@@ -772,7 +764,7 @@ static inline struct inode *ifind(struct super_block *sb,
* Otherwise NULL is returned.
*/
static inline struct inode *ifind_fast(struct super_block *sb,
struct list_head *head, unsigned long ino)
struct hlist_head *head, unsigned long ino)
{
struct inode *inode;
......@@ -810,7 +802,7 @@ static inline struct inode *ifind_fast(struct super_block *sb,
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
struct list_head *head = inode_hashtable + hash(sb, hashval);
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
return ifind(sb, head, test, data);
}
......@@ -832,7 +824,7 @@ EXPORT_SYMBOL(ilookup5);
*/
struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct list_head *head = inode_hashtable + hash(sb, ino);
struct hlist_head *head = inode_hashtable + hash(sb, ino);
return ifind_fast(sb, head, ino);
}
......@@ -864,7 +856,7 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *), void *data)
{
struct list_head *head = inode_hashtable + hash(sb, hashval);
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
inode = ifind(sb, head, test, data);
......@@ -897,7 +889,7 @@ EXPORT_SYMBOL(iget5_locked);
*/
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct list_head *head = inode_hashtable + hash(sb, ino);
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
inode = ifind_fast(sb, head, ino);
......@@ -923,11 +915,11 @@ EXPORT_SYMBOL(iget_locked);
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
struct list_head *head = &anon_hash_chain;
struct hlist_head *head = &anon_hash_chain;
if (inode->i_sb)
head = inode_hashtable + hash(inode->i_sb, hashval);
spin_lock(&inode_lock);
list_add(&inode->i_hash, head);
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode_lock);
}
......@@ -941,7 +933,7 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
void remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_lock);
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
spin_unlock(&inode_lock);
}
......@@ -949,7 +941,7 @@ void generic_delete_inode(struct inode *inode)
{
struct super_operations *op = inode->i_sb->s_op;
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
list_del_init(&inode->i_list);
inode->i_state|=I_FREEING;
inodes_stat.nr_inodes--;
......@@ -978,7 +970,7 @@ static void generic_forget_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (!list_empty(&inode->i_hash)) {
if (!hlist_unhashed(&inode->i_hash)) {
if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
list_del(&inode->i_list);
list_add(&inode->i_list, &inode_unused);
......@@ -990,7 +982,7 @@ static void generic_forget_inode(struct inode *inode)
write_inode_now(inode, 1);
spin_lock(&inode_lock);
inodes_stat.nr_unused--;
list_del_init(&inode->i_hash);
hlist_del_init(&inode->i_hash);
}
list_del_init(&inode->i_list);
inode->i_state|=I_FREEING;
......@@ -1236,7 +1228,7 @@ void wake_up_inode(struct inode *inode)
*/
void __init inode_init(unsigned long mempages)
{
struct list_head *head;
struct hlist_head *head;
unsigned long order;
unsigned int nr_hash;
int i;
......@@ -1253,7 +1245,7 @@ void __init inode_init(unsigned long mempages)
unsigned long tmp;
nr_hash = (1UL << order) * PAGE_SIZE /
sizeof(struct list_head);
sizeof(struct hlist_head);
i_hash_mask = (nr_hash - 1);
tmp = nr_hash;
......@@ -1261,7 +1253,7 @@ void __init inode_init(unsigned long mempages)
while ((tmp >>= 1UL) != 0UL)
i_hash_shift++;
inode_hashtable = (struct list_head *)
inode_hashtable = (struct hlist_head *)
__get_free_pages(GFP_ATOMIC, order);
} while (inode_hashtable == NULL && --order >= 0);
......@@ -1274,7 +1266,7 @@ void __init inode_init(unsigned long mempages)
head = inode_hashtable;
i = nr_hash;
do {
INIT_LIST_HEAD(head);
INIT_HLIST_HEAD(head);
head++;
i--;
} while (i);
......
......@@ -63,7 +63,7 @@ static struct super_block *alloc_super(void)
INIT_LIST_HEAD(&s->s_io);
INIT_LIST_HEAD(&s->s_files);
INIT_LIST_HEAD(&s->s_instances);
INIT_LIST_HEAD(&s->s_anon);
INIT_HLIST_HEAD(&s->s_anon);
init_rwsem(&s->s_umount);
sema_init(&s->s_lock, 1);
down_write(&s->s_umount);
......
......@@ -76,25 +76,25 @@ struct dentry {
atomic_t d_count;
unsigned long d_vfs_flags; /* moved here to be on same cacheline */
spinlock_t d_lock; /* per dentry lock */
unsigned int d_flags;
unsigned long d_move_count; /* to indicated moved dentry while lockless lookup */
struct inode * d_inode; /* Where the name belongs to - NULL is negative */
struct dentry * d_parent; /* parent directory */
struct list_head * d_bucket; /* lookup hash bucket */
struct list_head d_hash; /* lookup hash list */
struct list_head d_lru; /* LRU list */
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
int d_mounted;
struct qstr d_name;
struct qstr * d_qstr; /* quick str ptr used in lockless lookup and concurrent d_move */
unsigned long d_time; /* used by d_revalidate */
struct dentry_operations *d_op;
struct super_block * d_sb; /* The root of the dentry tree */
unsigned int d_flags;
int d_mounted;
void * d_fsdata; /* fs-specific data */
struct rcu_head d_rcu;
struct dcookie_struct * d_cookie; /* cookie, if any */
unsigned long d_move_count; /* to indicated moved dentry while lockless lookup */
struct qstr * d_qstr; /* quick str ptr used in lockless lookup and concurrent d_move */
struct dentry * d_parent; /* parent directory */
struct qstr d_name;
struct hlist_node d_hash; /* lookup hash list */
struct hlist_head * d_bucket; /* lookup hash bucket */
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
} ____cacheline_aligned;
......@@ -171,7 +171,7 @@ extern rwlock_t dparent_lock;
static __inline__ void __d_drop(struct dentry * dentry)
{
dentry->d_vfs_flags |= DCACHE_UNHASHED;
list_del_rcu(&dentry->d_hash);
hlist_del_rcu(&dentry->d_hash);
}
static __inline__ void d_drop(struct dentry * dentry)
......@@ -198,7 +198,7 @@ extern struct dentry * d_alloc_anon(struct inode *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_anon(struct list_head *);
extern void shrink_dcache_anon(struct hlist_head *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
......
......@@ -354,7 +354,7 @@ struct block_device {
};
struct inode {
struct list_head i_hash;
struct hlist_node i_hash;
struct list_head i_list;
struct list_head i_dentry;
unsigned long i_ino;
......@@ -602,7 +602,7 @@ struct super_block {
struct list_head s_dirty; /* dirty inodes */
struct list_head s_io; /* parked for writeback */
struct list_head s_anon; /* anonymous dentries for (nfs) exporting */
struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_files;
struct block_device *s_bdev;
......
......@@ -319,6 +319,98 @@ static inline void list_splice_init(struct list_head *list,
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, ({ read_barrier_depends(); 0;}), n = pos->next)
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
static __inline__ int hlist_unhashed(struct hlist_node *h)
{
return !h->pprev;
}
static __inline__ int hlist_empty(struct hlist_head *h)
{
return !h->first;
}
static __inline__ void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static __inline__ void hlist_del(struct hlist_node *n)
{
if (n->pprev)
__hlist_del(n);
}
#define hlist_del_rcu hlist_del /* list_del_rcu is identical too? */
static __inline__ void hlist_del_init(struct hlist_node *n)
{
if (n->pprev) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static __inline__ void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}
static __inline__ void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
smp_wmb();
if (first)
first->pprev = &n->next;
h->first = n;
}
/* next must be != NULL */
static __inline__ void hlist_add_before(struct hlist_node *n, struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
/* Cannot easily do prefetch unfortunately */
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos; \
pos = pos->next)
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment