Commit f3f106da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_v4.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull misc fs fixes from Jan Kara:

 - make UDF to properly mount media created by Win7

 - make isofs to properly refuse devices with large physical block size

 - fix a Spectre gadget in quotactl(2)

 - fix a warning in fsnotify code hit by syzkaller

* tag 'for_v4.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  udf: Fix mounting of Win7 created UDF filesystems
  udf: Remove dead code from udf_find_fileset()
  fs/quota: Fix spectre gadget in do_quotactl
  fs/quota: Replace XQM_MAXQUOTAS usage with MAXQUOTAS
  isofs: reject hardware sector size > 2048 bytes
  fsnotify: fix false positive warning on inode delete
parents ff81a521 ee4af50c
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/mpage.h> #include <linux/mpage.h>
#include <linux/user_namespace.h> #include <linux/user_namespace.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/blkdev.h>
#include "isofs.h" #include "isofs.h"
#include "zisofs.h" #include "zisofs.h"
...@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) ...@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
/* /*
* What if bugger tells us to go beyond page size? * What if bugger tells us to go beyond page size?
*/ */
if (bdev_logical_block_size(s->s_bdev) > 2048) {
printk(KERN_WARNING
"ISOFS: unsupported/invalid hardware sector size %d\n",
bdev_logical_block_size(s->s_bdev));
goto out_freesbi;
}
opt.blocksize = sb_min_blocksize(s, opt.blocksize); opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */ sbi->s_high_sierra = 0; /* default is iso9660 */
......
...@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) ...@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
struct fsnotify_mark *mark; struct fsnotify_mark *mark;
assert_spin_locked(&conn->lock); assert_spin_locked(&conn->lock);
/* We can get detached connector here when inode is getting unlinked. */
if (!fsnotify_valid_obj_type(conn->type))
return;
hlist_for_each_entry(mark, &conn->list, obj_list) { hlist_for_each_entry(mark, &conn->list, obj_list) {
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
new_mask |= mark->mask; new_mask |= mark->mask;
} }
if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
return;
*fsnotify_conn_mask_p(conn) = new_mask; *fsnotify_conn_mask_p(conn) = new_mask;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd, static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id) qid_t id)
...@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) ...@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo uinfo; struct if_dqinfo uinfo;
int ret; int ret;
/* This checks whether qc_state has enough entries... */
BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
if (!sb->s_qcop->get_state) if (!sb->s_qcop->get_state)
return -ENOSYS; return -ENOSYS;
ret = sb->s_qcop->get_state(sb, &state); ret = sb->s_qcop->get_state(sb, &state);
...@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) ...@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
* GETXSTATE quotactl has space for just one set of time limits so * GETXSTATE quotactl has space for just one set of time limits so
* report them for the first enabled quota type * report them for the first enabled quota type
*/ */
for (type = 0; type < XQM_MAXQUOTAS; type++) for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED) if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break; break;
BUG_ON(type == XQM_MAXQUOTAS); BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
...@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) ...@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
* GETXSTATV quotactl has space for just one set of time limits so * GETXSTATV quotactl has space for just one set of time limits so
* report them for the first enabled quota type * report them for the first enabled quota type
*/ */
for (type = 0; type < XQM_MAXQUOTAS; type++) for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED) if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break; break;
BUG_ON(type == XQM_MAXQUOTAS); BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
...@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, ...@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{ {
int ret; int ret;
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) if (type >= MAXQUOTAS)
return -EINVAL; return -EINVAL;
type = array_index_nospec(type, MAXQUOTAS);
/* /*
* Quota not supported on this fs? Check this before s_quota_types * Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all. * since they needn't be set if quota is not supported at all.
......
...@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, ...@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *root) struct kernel_lb_addr *root)
{ {
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
long lastblock;
uint16_t ident; uint16_t ident;
struct udf_sb_info *sbi;
if (fileset->logicalBlockNum != 0xFFFFFFFF || if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) { fileset->partitionReferenceNum != 0xFFFF) {
...@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, ...@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
return 1; return 1;
} }
}
sbi = UDF_SB(sb);
if (!bh) {
/* Search backwards through the partitions */
struct kernel_lb_addr newfileset;
/* --> cvg: FIXME - is it reasonable? */
return 1;
for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
(newfileset.partitionReferenceNum != 0xFFFF &&
fileset->logicalBlockNum == 0xFFFFFFFF &&
fileset->partitionReferenceNum == 0xFFFF);
newfileset.partitionReferenceNum--) {
lastblock = sbi->s_partmaps
[newfileset.partitionReferenceNum]
.s_partition_len;
newfileset.logicalBlockNum = 0;
do {
bh = udf_read_ptagged(sb, &newfileset, 0,
&ident);
if (!bh) {
newfileset.logicalBlockNum++;
continue;
}
switch (ident) {
case TAG_IDENT_SBD:
{
struct spaceBitmapDesc *sp;
sp = (struct spaceBitmapDesc *)
bh->b_data;
newfileset.logicalBlockNum += 1 +
((le32_to_cpu(sp->numOfBytes) +
sizeof(struct spaceBitmapDesc)
- 1) >> sb->s_blocksize_bits);
brelse(bh);
break;
}
case TAG_IDENT_FSD:
*fileset = newfileset;
break;
default:
newfileset.logicalBlockNum++;
brelse(bh);
bh = NULL;
break;
}
} while (newfileset.logicalBlockNum < lastblock &&
fileset->logicalBlockNum == 0xFFFFFFFF &&
fileset->partitionReferenceNum == 0xFFFF);
}
}
if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%u, partition=%u\n", udf_debug("Fileset at block=%u, partition=%u\n",
fileset->logicalBlockNum, fileset->logicalBlockNum,
fileset->partitionReferenceNum); fileset->partitionReferenceNum);
sbi->s_partition = fileset->partitionReferenceNum; UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root); udf_load_fileset(sb, bh, root);
brelse(bh); brelse(bh);
return 0; return 0;
...@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ ...@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
*/ */
#define PART_DESC_ALLOC_STEP 32 #define PART_DESC_ALLOC_STEP 32
struct part_desc_seq_scan_data {
struct udf_vds_record rec;
u32 partnum;
};
struct desc_seq_scan_data { struct desc_seq_scan_data {
struct udf_vds_record vds[VDS_POS_LENGTH]; struct udf_vds_record vds[VDS_POS_LENGTH];
unsigned int size_part_descs; unsigned int size_part_descs;
struct udf_vds_record *part_descs_loc; unsigned int num_part_descs;
struct part_desc_seq_scan_data *part_descs_loc;
}; };
static struct udf_vds_record *handle_partition_descriptor( static struct udf_vds_record *handle_partition_descriptor(
...@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( ...@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
{ {
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
int partnum; int partnum;
int i;
partnum = le16_to_cpu(desc->partitionNumber); partnum = le16_to_cpu(desc->partitionNumber);
if (partnum >= data->size_part_descs) { for (i = 0; i < data->num_part_descs; i++)
struct udf_vds_record *new_loc; if (partnum == data->part_descs_loc[i].partnum)
return &(data->part_descs_loc[i].rec);
if (data->num_part_descs >= data->size_part_descs) {
struct part_desc_seq_scan_data *new_loc;
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
...@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( ...@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
data->part_descs_loc = new_loc; data->part_descs_loc = new_loc;
data->size_part_descs = new_size; data->size_part_descs = new_size;
} }
return &(data->part_descs_loc[partnum]); return &(data->part_descs_loc[data->num_part_descs++].rec);
} }
...@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence( ...@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
data.size_part_descs = PART_DESC_ALLOC_STEP; data.size_part_descs = PART_DESC_ALLOC_STEP;
data.num_part_descs = 0;
data.part_descs_loc = kcalloc(data.size_part_descs, data.part_descs_loc = kcalloc(data.size_part_descs,
sizeof(*data.part_descs_loc), sizeof(*data.part_descs_loc),
GFP_KERNEL); GFP_KERNEL);
...@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence( ...@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
* are in it. * are in it.
*/ */
for (; (!done && block <= lastblock); block++) { for (; (!done && block <= lastblock); block++) {
bh = udf_read_tagged(sb, block, block, &ident); bh = udf_read_tagged(sb, block, block, &ident);
if (!bh) if (!bh)
break; break;
...@@ -1730,14 +1680,11 @@ static noinline int udf_process_sequence( ...@@ -1730,14 +1680,11 @@ static noinline int udf_process_sequence(
} }
/* Now handle prevailing Partition Descriptors */ /* Now handle prevailing Partition Descriptors */
for (i = 0; i < data.size_part_descs; i++) { for (i = 0; i < data.num_part_descs; i++) {
if (data.part_descs_loc[i].block) { ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
ret = udf_load_partdesc(sb,
data.part_descs_loc[i].block);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
}
return 0; return 0;
} }
......
...@@ -408,13 +408,7 @@ struct qc_type_state { ...@@ -408,13 +408,7 @@ struct qc_type_state {
struct qc_state { struct qc_state {
unsigned int s_incoredqs; /* Number of dquots in core */ unsigned int s_incoredqs; /* Number of dquots in core */
/* struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
* Per quota type information. The array should really have
* max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
* quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
* supports project quotas, this can be changed to MAXQUOTAS
*/
struct qc_type_state s_state[XQM_MAXQUOTAS];
}; };
/* Structure for communicating via ->set_info */ /* Structure for communicating via ->set_info */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment