Commit 5331be09 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6:
  JFS: Remove incorrect kgdb define
  JFS: call io_schedule() instead of schedule() to avoid deadlock
  JFS: Add lockdep annotations
  JFS: Avoid BUG() on a damaged file system
parents d3f8fd76 7220c017
...@@ -182,9 +182,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock, ...@@ -182,9 +182,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
* Take appropriate lock on inode * Take appropriate lock on inode
*/ */
if (create) if (create)
IWRITE_LOCK(ip); IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
else else
IREAD_LOCK(ip); IREAD_LOCK(ip, RDWRLOCK_NORMAL);
if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
(!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
...@@ -359,7 +359,7 @@ void jfs_truncate(struct inode *ip) ...@@ -359,7 +359,7 @@ void jfs_truncate(struct inode *ip)
nobh_truncate_page(ip->i_mapping, ip->i_size); nobh_truncate_page(ip->i_mapping, ip->i_size);
IWRITE_LOCK(ip); IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
jfs_truncate_nolock(ip, ip->i_size); jfs_truncate_nolock(ip, ip->i_size);
IWRITE_UNLOCK(ip); IWRITE_UNLOCK(ip);
} }
...@@ -39,10 +39,6 @@ extern void jfs_proc_clean(void); ...@@ -39,10 +39,6 @@ extern void jfs_proc_clean(void);
/* /*
* assert with traditional printf/panic * assert with traditional printf/panic
*/ */
#ifdef CONFIG_KERNEL_ASSERTS
/* kgdb stuff */
#define assert(p) KERNEL_ASSERT(#p, p)
#else
#define assert(p) do { \ #define assert(p) do { \
if (!(p)) { \ if (!(p)) { \
printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \ printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \
...@@ -50,7 +46,6 @@ extern void jfs_proc_clean(void); ...@@ -50,7 +46,6 @@ extern void jfs_proc_clean(void);
BUG(); \ BUG(); \
} \ } \
} while (0) } while (0)
#endif
/* /*
* debug ON * debug ON
......
...@@ -337,7 +337,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks) ...@@ -337,7 +337,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be freed better be within the mapsize. */ /* block to be freed better be within the mapsize. */
if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
...@@ -733,7 +733,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) ...@@ -733,7 +733,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
* allocation group size, try to allocate anywhere. * allocation group size, try to allocate anywhere.
*/ */
if (l2nb > bmp->db_agl2size) { if (l2nb > bmp->db_agl2size) {
IWRITE_LOCK(ipbmap); IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
rc = dbAllocAny(bmp, nblocks, l2nb, results); rc = dbAllocAny(bmp, nblocks, l2nb, results);
...@@ -774,7 +774,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) ...@@ -774,7 +774,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
* the hint using a tiered strategy. * the hint using a tiered strategy.
*/ */
if (nblocks <= BPERDMAP) { if (nblocks <= BPERDMAP) {
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* get the buffer for the dmap containing the hint. /* get the buffer for the dmap containing the hint.
*/ */
...@@ -844,7 +844,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) ...@@ -844,7 +844,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
/* try to satisfy the allocation request with blocks within /* try to satisfy the allocation request with blocks within
* the same allocation group as the hint. * the same allocation group as the hint.
*/ */
IWRITE_LOCK(ipbmap); IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
goto write_unlock; goto write_unlock;
...@@ -856,7 +856,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) ...@@ -856,7 +856,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
* Let dbNextAG recommend a preferred allocation group * Let dbNextAG recommend a preferred allocation group
*/ */
agno = dbNextAG(ipbmap); agno = dbNextAG(ipbmap);
IWRITE_LOCK(ipbmap); IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
/* Try to allocate within this allocation group. if that fails, try to /* Try to allocate within this allocation group. if that fails, try to
* allocate anywhere in the map. * allocate anywhere in the map.
...@@ -900,7 +900,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks) ...@@ -900,7 +900,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
s64 lblkno; s64 lblkno;
struct metapage *mp; struct metapage *mp;
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* /*
* validate extent request: * validate extent request:
...@@ -1050,7 +1050,7 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks) ...@@ -1050,7 +1050,7 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
*/ */
extblkno = lastblkno + 1; extblkno = lastblkno + 1;
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* better be within the file system */ /* better be within the file system */
bmp = sbi->bmap; bmp = sbi->bmap;
...@@ -3116,7 +3116,7 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks) ...@@ -3116,7 +3116,7 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap); IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
/* block to be allocated better be within the mapsize. */ /* block to be allocated better be within the mapsize. */
ASSERT(nblocks <= bmp->db_mapsize - blkno); ASSERT(nblocks <= bmp->db_mapsize - blkno);
......
...@@ -331,7 +331,7 @@ int diRead(struct inode *ip) ...@@ -331,7 +331,7 @@ int diRead(struct inode *ip)
/* read the iag */ /* read the iag */
imap = JFS_IP(ipimap)->i_imap; imap = JFS_IP(ipimap)->i_imap;
IREAD_LOCK(ipimap); IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
rc = diIAGRead(imap, iagno, &mp); rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap); IREAD_UNLOCK(ipimap);
if (rc) { if (rc) {
...@@ -920,7 +920,7 @@ int diFree(struct inode *ip) ...@@ -920,7 +920,7 @@ int diFree(struct inode *ip)
/* Obtain read lock in imap inode. Don't release it until we have /* Obtain read lock in imap inode. Don't release it until we have
* read all of the IAG's that we are going to. * read all of the IAG's that we are going to.
*/ */
IREAD_LOCK(ipimap); IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag. /* read the iag.
*/ */
...@@ -1415,7 +1415,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) ...@@ -1415,7 +1415,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
AG_LOCK(imap, agno); AG_LOCK(imap, agno);
/* Get read lock on imap inode */ /* Get read lock on imap inode */
IREAD_LOCK(ipimap); IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* get the iag number and read the iag */ /* get the iag number and read the iag */
iagno = INOTOIAG(inum); iagno = INOTOIAG(inum);
...@@ -1808,7 +1808,7 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip) ...@@ -1808,7 +1808,7 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
return -ENOSPC; return -ENOSPC;
/* obtain read lock on imap inode */ /* obtain read lock on imap inode */
IREAD_LOCK(imap->im_ipimap); IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
/* read the iag at the head of the list. /* read the iag at the head of the list.
*/ */
...@@ -1946,7 +1946,7 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip) ...@@ -1946,7 +1946,7 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
} else { } else {
/* read the iag. /* read the iag.
*/ */
IREAD_LOCK(imap->im_ipimap); IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
if ((rc = diIAGRead(imap, iagno, &mp))) { if ((rc = diIAGRead(imap, iagno, &mp))) {
IREAD_UNLOCK(imap->im_ipimap); IREAD_UNLOCK(imap->im_ipimap);
jfs_error(ip->i_sb, "diAllocExt: error reading iag"); jfs_error(ip->i_sb, "diAllocExt: error reading iag");
...@@ -2509,7 +2509,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) ...@@ -2509,7 +2509,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
*/ */
/* acquire inode map lock */ /* acquire inode map lock */
IWRITE_LOCK(ipimap); IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) { if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
IWRITE_UNLOCK(ipimap); IWRITE_UNLOCK(ipimap);
...@@ -2648,7 +2648,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp) ...@@ -2648,7 +2648,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
} }
/* obtain read lock on map */ /* obtain read lock on map */
IREAD_LOCK(ipimap); IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
/* read the iag */ /* read the iag */
if ((rc = diIAGRead(imap, iagno, &mp))) { if ((rc = diIAGRead(imap, iagno, &mp))) {
...@@ -2779,7 +2779,7 @@ diUpdatePMap(struct inode *ipimap, ...@@ -2779,7 +2779,7 @@ diUpdatePMap(struct inode *ipimap,
return -EIO; return -EIO;
} }
/* read the iag */ /* read the iag */
IREAD_LOCK(ipimap); IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
rc = diIAGRead(imap, iagno, &mp); rc = diIAGRead(imap, iagno, &mp);
IREAD_UNLOCK(ipimap); IREAD_UNLOCK(ipimap);
if (rc) if (rc)
......
...@@ -109,9 +109,11 @@ struct jfs_inode_info { ...@@ -109,9 +109,11 @@ struct jfs_inode_info {
#define JFS_ACL_NOT_CACHED ((void *)-1) #define JFS_ACL_NOT_CACHED ((void *)-1)
#define IREAD_LOCK(ip) down_read(&JFS_IP(ip)->rdwrlock) #define IREAD_LOCK(ip, subclass) \
down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock) #define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
#define IWRITE_LOCK(ip) down_write(&JFS_IP(ip)->rdwrlock) #define IWRITE_LOCK(ip, subclass) \
down_write_nested(&JFS_IP(ip)->rdwrlock, subclass)
#define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock) #define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock)
/* /*
...@@ -127,6 +129,29 @@ enum cflags { ...@@ -127,6 +129,29 @@ enum cflags {
COMMIT_Synclist, /* metadata pages on group commit synclist */ COMMIT_Synclist, /* metadata pages on group commit synclist */
}; };
/*
* commit_mutex nesting subclasses:
*/
enum commit_mutex_class
{
COMMIT_MUTEX_PARENT,
COMMIT_MUTEX_CHILD,
COMMIT_MUTEX_SECOND_PARENT, /* Renaming */
COMMIT_MUTEX_VICTIM /* Inode being unlinked due to rename */
};
/*
* rdwrlock subclasses:
* The dmap inode may be locked while a normal inode or the imap inode are
* locked.
*/
enum rdwrlock_class
{
RDWRLOCK_NORMAL,
RDWRLOCK_IMAP,
RDWRLOCK_DMAP
};
#define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag)) #define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag))
#define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag)) #define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag))
#define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag)) #define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag))
......
...@@ -42,7 +42,7 @@ do { \ ...@@ -42,7 +42,7 @@ do { \
if (cond) \ if (cond) \
break; \ break; \
unlock_cmd; \ unlock_cmd; \
schedule(); \ io_schedule(); \
lock_cmd; \ lock_cmd; \
} \ } \
current->state = TASK_RUNNING; \ current->state = TASK_RUNNING; \
......
...@@ -56,7 +56,7 @@ static inline void __lock_metapage(struct metapage *mp) ...@@ -56,7 +56,7 @@ static inline void __lock_metapage(struct metapage *mp)
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
if (metapage_locked(mp)) { if (metapage_locked(mp)) {
unlock_page(mp->page); unlock_page(mp->page);
schedule(); io_schedule();
lock_page(mp->page); lock_page(mp->page);
} }
} while (trylock_metapage(mp)); } while (trylock_metapage(mp));
......
...@@ -135,7 +135,7 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event) ...@@ -135,7 +135,7 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
add_wait_queue(event, &wait); add_wait_queue(event, &wait);
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
TXN_UNLOCK(); TXN_UNLOCK();
schedule(); io_schedule();
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
remove_wait_queue(event, &wait); remove_wait_queue(event, &wait);
} }
......
...@@ -757,6 +757,11 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp, ...@@ -757,6 +757,11 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
nsplit = 0; nsplit = 0;
/* push (bn, index) of the parent page/entry */ /* push (bn, index) of the parent page/entry */
if (BT_STACK_FULL(btstack)) {
jfs_error(ip->i_sb, "stack overrun in xtSearch!");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(btstack, bn, index); BT_PUSH(btstack, bn, index);
/* get the child page block number */ /* get the child page block number */
...@@ -3915,6 +3920,11 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) ...@@ -3915,6 +3920,11 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
*/ */
getChild: getChild:
/* save current parent entry for the child page */ /* save current parent entry for the child page */
if (BT_STACK_FULL(&btstack)) {
jfs_error(ip->i_sb, "stack overrun in xtTruncate!");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(&btstack, bn, index); BT_PUSH(&btstack, bn, index);
/* get child page */ /* get child page */
...@@ -4112,6 +4122,11 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size) ...@@ -4112,6 +4122,11 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
*/ */
getChild: getChild:
/* save current parent entry for the child page */ /* save current parent entry for the child page */
if (BT_STACK_FULL(&btstack)) {
jfs_error(ip->i_sb, "stack overrun in xtTruncate_pmap!");
XT_PUTPAGE(mp);
return -EIO;
}
BT_PUSH(&btstack, bn, index); BT_PUSH(&btstack, bn, index);
/* get child page */ /* get child page */
......
...@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, ...@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
tid = txBegin(dip->i_sb, 0); tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(dip)->commit_mutex); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dip); rc = jfs_init_acl(tid, ip, dip);
if (rc) if (rc)
...@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) ...@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
tid = txBegin(dip->i_sb, 0); tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(dip)->commit_mutex); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dip); rc = jfs_init_acl(tid, ip, dip);
if (rc) if (rc)
...@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) ...@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
tid = txBegin(dip->i_sb, 0); tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(dip)->commit_mutex); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
iplist[0] = dip; iplist[0] = dip;
iplist[1] = ip; iplist[1] = ip;
...@@ -483,12 +483,12 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry) ...@@ -483,12 +483,12 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
if ((rc = get_UCSname(&dname, dentry))) if ((rc = get_UCSname(&dname, dentry)))
goto out; goto out;
IWRITE_LOCK(ip); IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
tid = txBegin(dip->i_sb, 0); tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(dip)->commit_mutex); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
iplist[0] = dip; iplist[0] = dip;
iplist[1] = ip; iplist[1] = ip;
...@@ -802,8 +802,8 @@ static int jfs_link(struct dentry *old_dentry, ...@@ -802,8 +802,8 @@ static int jfs_link(struct dentry *old_dentry,
tid = txBegin(ip->i_sb, 0); tid = txBegin(ip->i_sb, 0);
mutex_lock(&JFS_IP(dir)->commit_mutex); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
/* /*
* scan parent directory for entry/freespace * scan parent directory for entry/freespace
...@@ -913,8 +913,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, ...@@ -913,8 +913,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
tid = txBegin(dip->i_sb, 0); tid = txBegin(dip->i_sb, 0);
mutex_lock(&JFS_IP(dip)->commit_mutex); mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_security(tid, ip, dip); rc = jfs_init_security(tid, ip, dip);
if (rc) if (rc)
...@@ -1127,7 +1127,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1127,7 +1127,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out3; goto out3;
} }
} else if (new_ip) { } else if (new_ip) {
IWRITE_LOCK(new_ip); IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */ /* Init inode for quota operations. */
DQUOT_INIT(new_ip); DQUOT_INIT(new_ip);
} }
...@@ -1137,13 +1137,21 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -1137,13 +1137,21 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/ */
tid = txBegin(new_dir->i_sb, 0); tid = txBegin(new_dir->i_sb, 0);
mutex_lock(&JFS_IP(new_dir)->commit_mutex); /*
mutex_lock(&JFS_IP(old_ip)->commit_mutex); * How do we know the locking is safe from deadlocks?
* The vfs does the hard part for us. Any time we are taking nested
* commit_mutexes, the vfs already has i_mutex held on the parent.
* Here, the vfs has already taken i_mutex on both old_dir and new_dir.
*/
mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD);
if (old_dir != new_dir) if (old_dir != new_dir)
mutex_lock(&JFS_IP(old_dir)->commit_mutex); mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex,
COMMIT_MUTEX_SECOND_PARENT);
if (new_ip) { if (new_ip) {
mutex_lock(&JFS_IP(new_ip)->commit_mutex); mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex,
COMMIT_MUTEX_VICTIM);
/* /*
* Change existing directory entry to new inode number * Change existing directory entry to new inode number
*/ */
...@@ -1357,8 +1365,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, ...@@ -1357,8 +1365,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
tid = txBegin(dir->i_sb, 0); tid = txBegin(dir->i_sb, 0);
mutex_lock(&JFS_IP(dir)->commit_mutex); mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
mutex_lock(&JFS_IP(ip)->commit_mutex); mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
rc = jfs_init_acl(tid, ip, dir); rc = jfs_init_acl(tid, ip, dir);
if (rc) if (rc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment