Commit c03aa9f6 authored by Jan Kara's avatar Jan Kara

udf: Avoid infinite loop when processing indirect ICBs

We did not implement any bound on number of indirect ICBs we follow when
loading inode. Thus corrupted medium could cause kernel to go into an
infinite loop, possibly causing a stack overflow.

Fix the possible stack overflow by removing recursion from
__udf_read_inode() and limit number of indirect ICBs we follow to avoid
infinite loops.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent bb7720a0
...@@ -1270,6 +1270,13 @@ int udf_setsize(struct inode *inode, loff_t newsize) ...@@ -1270,6 +1270,13 @@ int udf_setsize(struct inode *inode, loff_t newsize)
return 0; return 0;
} }
/*
* Maximum length of linked list formed by ICB hierarchy. The chosen number is
* arbitrary - just that we hopefully don't limit any real use of rewritten
* inode on write-once media but avoid looping for too long on corrupted media.
*/
#define UDF_MAX_ICB_NESTING 1024
static void __udf_read_inode(struct inode *inode) static void __udf_read_inode(struct inode *inode)
{ {
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
...@@ -1279,7 +1286,9 @@ static void __udf_read_inode(struct inode *inode) ...@@ -1279,7 +1286,9 @@ static void __udf_read_inode(struct inode *inode)
struct udf_inode_info *iinfo = UDF_I(inode); struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb); struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
unsigned int link_count; unsigned int link_count;
unsigned int indirections = 0;
reread:
/* /*
* Set defaults, but the inode is still incomplete! * Set defaults, but the inode is still incomplete!
* Note: get_new_inode() sets the following on a new inode: * Note: get_new_inode() sets the following on a new inode:
...@@ -1317,28 +1326,26 @@ static void __udf_read_inode(struct inode *inode) ...@@ -1317,28 +1326,26 @@ static void __udf_read_inode(struct inode *inode)
ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
&ident); &ident);
if (ident == TAG_IDENT_IE && ibh) { if (ident == TAG_IDENT_IE && ibh) {
struct buffer_head *nbh = NULL;
struct kernel_lb_addr loc; struct kernel_lb_addr loc;
struct indirectEntry *ie; struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data; ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation); loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength && if (ie->indirectICB.extLength) {
(nbh = udf_read_ptagged(inode->i_sb, &loc, 0, brelse(bh);
&ident))) { brelse(ibh);
if (ident == TAG_IDENT_FE || memcpy(&iinfo->i_location, &loc,
ident == TAG_IDENT_EFE) { sizeof(struct kernel_lb_addr));
memcpy(&iinfo->i_location, if (++indirections > UDF_MAX_ICB_NESTING) {
&loc, udf_err(inode->i_sb,
sizeof(struct kernel_lb_addr)); "too many ICBs in ICB hierarchy"
brelse(bh); " (max %d supported)\n",
brelse(ibh); UDF_MAX_ICB_NESTING);
brelse(nbh); make_bad_inode(inode);
__udf_read_inode(inode);
return; return;
} }
brelse(nbh); goto reread;
} }
} }
brelse(ibh); brelse(ibh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment