Commit 4d4ef9ab authored by Eric Sesterhenn's avatar Eric Sesterhenn Committed by Adrian Bunk

BUG_ON() Conversion in fs/hfs/

this changes if() BUG(); constructs to BUG_ON() which is
cleaner, contains unlikely() and can better optimized away.
Signed-off-by: default avatarEric Sesterhenn <snakebyte@gmx.de>
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
parent 28133c7b
......@@ -306,8 +306,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash)
;
if (!*p)
BUG();
BUG_ON(!*p);
*p = node->next_hash;
node->tree->node_hash_cnt--;
}
......@@ -415,8 +414,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, num);
spin_unlock(&tree->hash_lock);
if (node)
BUG();
BUG_ON(node);
node = __hfs_bnode_create(tree, num);
if (!node)
return ERR_PTR(-ENOMEM);
......@@ -459,8 +457,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt));
if (!atomic_read(&node->refcnt))
BUG();
BUG_ON(!atomic_read(&node->refcnt));
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
return;
for (i = 0; i < tree->pages_per_bnode; i++) {
......
......@@ -36,8 +36,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
tree->inode = iget_locked(sb, id);
if (!tree->inode)
goto free_tree;
if (!(tree->inode->i_state & I_NEW))
BUG();
BUG_ON(!(tree->inode->i_state & I_NEW));
{
struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
HFS_I(tree->inode)->flags = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment