super.c 155 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/super.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
23
#include <linux/vmalloc.h>
24 25 26
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
27
#include <linux/backing-dev.h>
28 29
#include <linux/parser.h>
#include <linux/buffer_head.h>
30
#include <linux/exportfs.h>
31 32 33 34 35 36
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
Theodore Ts'o's avatar
Theodore Ts'o committed
37
#include <linux/ctype.h>
Vignesh Babu's avatar
Vignesh Babu committed
38
#include <linux/log2.h>
39
#include <linux/crc16.h>
Dan Magenheimer's avatar
Dan Magenheimer committed
40
#include <linux/cleancache.h>
41
#include <linux/user_namespace.h>
42 43
#include <asm/uaccess.h>

44 45 46
#include <linux/kthread.h>
#include <linux/freezer.h>

47
#include "ext4.h"
48
#include "ext4_extents.h"	/* Needed for trace points definition */
49
#include "ext4_jbd2.h"
50 51
#include "xattr.h"
#include "acl.h"
52
#include "mballoc.h"
53

54 55 56
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

57 58
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
59
static int ext4_mballoc_ready;
60
static struct ratelimit_state ext4_mount_msg_ratelimit;
61

62
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
63
			     unsigned long journal_devnum);
64
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
65
static int ext4_commit_super(struct super_block *sb, int sync);
66 67 68 69
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
70
static int ext4_sync_fs(struct super_block *sb, int wait);
71 72
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
73 74
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
Al Viro's avatar
Al Viro committed
75 76
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
77 78
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
79
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
80 81
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
82
static void ext4_clear_request_list(void);
83

84 85 86 87
static bool userns_mounts = false;
module_param(userns_mounts, bool, 0644);
MODULE_PARM_DESC(userns_mounts, "Allow mounts from unprivileged user namespaces");

Jan Kara's avatar
Jan Kara committed
88
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
89 90 91 92 93
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
94
	.fs_flags	= FS_REQUIRES_DEV | FS_USERNS_MOUNT,
95
};
96
MODULE_ALIAS_FS("ext2");
97
MODULE_ALIAS("ext2");
98 99 100 101 102 103
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


104 105 106
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
Al Viro's avatar
Al Viro committed
107
	.mount		= ext4_mount,
108
	.kill_sb	= kill_block_super,
109
	.fs_flags	= FS_REQUIRES_DEV | FS_USERNS_MOUNT,
110
};
111
MODULE_ALIAS_FS("ext3");
112
MODULE_ALIAS("ext3");
113
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
Laurent Vivier's avatar
Laurent Vivier committed
114

115 116 117
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
118
	if (!ext4_has_feature_metadata_csum(sb))
119 120 121 122 123
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

124 125 126 127 128 129 130 131 132 133 134 135
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

136 137
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
138
{
139
	if (!ext4_has_metadata_csum(sb))
140 141 142 143 144
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

145
void ext4_superblock_csum_set(struct super_block *sb)
146
{
147 148
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

149
	if (!ext4_has_metadata_csum(sb))
150 151 152 153 154
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

155 156 157 158
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

159
	ret = kmalloc(size, flags | __GFP_NOWARN);
160 161 162 163 164 165 166 167 168
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

169
	ret = kzalloc(size, flags | __GFP_NOWARN);
170 171 172 173 174
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

175 176
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
177
{
178
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
179
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
180
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
181 182
}

183 184
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
185
{
186
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
187
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
188
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
189 190
}

191 192
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
193
{
194
	return le32_to_cpu(bg->bg_inode_table_lo) |
195
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
196
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
197 198
}

199 200
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
201 202 203
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
204
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
205 206 207 208 209 210 211
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
212
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
213 214 215 216 217 218 219
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
220
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
221 222 223 224 225 226 227
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
228
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
229 230
}

231 232
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
233
{
234
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
235 236
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
237 238
}

239 240
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
241
{
242
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
243 244
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
245 246
}

247 248
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
249
{
250
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
251 252
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
253 254
}

255 256
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

287

288 289 290 291 292 293
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
294 295
	if (bdev_read_only(sb->s_bdev))
		return;
296 297 298 299 300 301 302 303 304 305 306 307
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
308 309 310 311 312 313
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
314
	le32_add_cpu(&es->s_error_count, 1);
315 316 317 318 319 320 321 322 323
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

Bobi Jam's avatar
Bobi Jam committed
340 341 342 343 344
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
345
	struct ext4_journal_cb_entry	*jce;
Bobi Jam's avatar
Bobi Jam committed
346

347
	BUG_ON(txn->t_state == T_FINISHED);
Bobi Jam's avatar
Bobi Jam committed
348
	spin_lock(&sbi->s_md_lock);
349 350 351
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
Bobi Jam's avatar
Bobi Jam committed
352 353 354 355 356 357 358
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
359

360 361 362 363
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
364
 * superblock.  That is not possible on ext4, because we may have other
365 366 367 368 369
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
370
 * We'll just use the jbd2_journal_abort() error code to record an error in
371
 * the journal instead.  On recovery, the journal will complain about
372 373 374
 * that error until we've noted it down and cleared it.
 */

375
static void ext4_handle_error(struct super_block *sb)
376 377 378 379
{
	if (sb->s_flags & MS_RDONLY)
		return;

380
	if (!test_opt(sb, ERRORS_CONT)) {
381
		journal_t *journal = EXT4_SB(sb)->s_journal;
382

383
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
384
		if (journal)
385
			jbd2_journal_abort(journal, -EIO);
386
	}
387
	if (test_opt(sb, ERRORS_RO)) {
388
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
389 390 391 392 393
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
394 395
		sb->s_flags |= MS_RDONLY;
	}
396 397 398 399
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
400
		panic("EXT4-fs (device %s): panic forced after error\n",
401
			sb->s_id);
402
	}
403 404
}

405 406 407 408
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

409
void __ext4_error(struct super_block *sb, const char *function,
410
		  unsigned int line, const char *fmt, ...)
411
{
Joe Perches's avatar
Joe Perches committed
412
	struct va_format vaf;
413 414
	va_list args;

415 416 417 418 419 420 421 422 423
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
424
	save_error_info(sb, function, line);
425
	ext4_handle_error(sb);
426 427
}

428 429 430
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
431 432
{
	va_list args;
433
	struct va_format vaf;
434
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
435

436 437
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
454
	save_error_info(inode->i_sb, function, line);
455 456 457
	ext4_handle_error(inode->i_sb);
}

458 459 460
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
461 462
{
	va_list args;
463
	struct va_format vaf;
464
	struct ext4_super_block *es;
Al Viro's avatar
Al Viro committed
465
	struct inode *inode = file_inode(file);
466 467
	char pathname[80], *path;

468 469
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
470
	if (ext4_error_ratelimit(inode->i_sb)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
471
		path = file_path(file, pathname, sizeof(pathname));
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
491
	save_error_info(inode->i_sb, function, line);
492 493 494
	ext4_handle_error(inode->i_sb);
}

495 496
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
497 498 499 500
{
	char *errstr = NULL;

	switch (errno) {
501 502 503 504 505 506
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
507 508 509 510 511 512 513
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
514 515
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

535
/* __ext4_std_error decodes expected errors from journaling functions
536 537
 * automatically and invokes the appropriate error response.  */

538 539
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
540 541 542 543 544 545 546 547 548 549 550
{
	char nbuf[16];
	const char *errstr;

	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
	if (errno == -EROFS && journal_current_handle() == NULL &&
	    (sb->s_flags & MS_RDONLY))
		return;

551 552 553 554 555
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
556

557
	save_error_info(sb, function, line);
558
	ext4_handle_error(sb);
559 560 561
}

/*
562
 * ext4_abort is a much stronger failure handler than ext4_error.  The
563 564 565 566 567 568 569 570
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

571
void __ext4_abort(struct super_block *sb, const char *function,
572
		unsigned int line, const char *fmt, ...)
573 574 575
{
	va_list args;

576
	save_error_info(sb, function, line);
577
	va_start(args, fmt);
578 579
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
	       function, line);
580 581 582 583
	vprintk(fmt, args);
	printk("\n");
	va_end(args);

584 585 586
	if ((sb->s_flags & MS_RDONLY) == 0) {
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
587 588 589 590 591 592
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
		sb->s_flags |= MS_RDONLY;
593 594 595 596
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
597 598 599 600
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
601
		panic("EXT4-fs panic from previous error\n");
602
	}
603 604
}

605 606
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
607
{
Joe Perches's avatar
Joe Perches committed
608
	struct va_format vaf;
609 610
	va_list args;

611 612 613
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

614
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
615 616 617
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
618 619 620
	va_end(args);
}

621 622 623 624
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

625
void __ext4_warning(struct super_block *sb, const char *function,
626
		    unsigned int line, const char *fmt, ...)
627
{
Joe Perches's avatar
Joe Perches committed
628
	struct va_format vaf;
629 630
	va_list args;

631
	if (!ext4_warning_ratelimit(sb))
632 633
		return;

634
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
635 636 637 638
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
639 640 641
	va_end(args);
}

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

660 661 662 663
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
664 665 666
__releases(bitlock)
__acquires(bitlock)
{
Joe Perches's avatar
Joe Perches committed
667
	struct va_format vaf;
668 669 670
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

671 672 673
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
Joe Perches's avatar
Joe Perches committed
674

675 676 677 678 679 680 681 682 683 684 685 686 687 688
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
689 690

	if (test_opt(sb, ERRORS_CONT)) {
691
		ext4_commit_super(sb, 0);
692 693
		return;
	}
694

695 696 697 698 699 700 701 702
	ext4_unlock_group(sb, grp);
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
Lucas De Marchi's avatar
Lucas De Marchi committed
703
	 * ext4_grp_locked_error() to distinguish between the
704 705 706 707 708 709 710 711
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

712
void ext4_update_dynamic_rev(struct super_block *sb)
713
{
714
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
715

716
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
717 718
		return;

719
	ext4_warning(sb,
720 721
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
722
		     EXT4_DYNAMIC_REV);
723

724 725 726
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
727 728 729 730 731 732 733 734 735 736 737 738 739
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
740
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
741 742 743 744
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

745
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
746 747 748 749 750
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
751
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
752 753 754 755 756 757 758
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
Al Viro's avatar
Al Viro committed
759
static void ext4_blkdev_put(struct block_device *bdev)
760
{
Al Viro's avatar
Al Viro committed
761
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
762 763
}

Al Viro's avatar
Al Viro committed
764
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
765 766 767 768
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
Al Viro's avatar
Al Viro committed
769
		ext4_blkdev_put(bdev);
770 771 772 773 774 775
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
776
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
777 778
}

779
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
780 781 782
{
	struct list_head *l;

783 784
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
785 786 787 788 789 790 791 792 793 794 795 796

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

797
static void ext4_put_super(struct super_block *sb)
798
{
799 800
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
801
	int aborted = 0;
802
	int i, err;
803

804
	ext4_unregister_li_request(sb);
805 806
	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);

807 808
	flush_workqueue(sbi->rsv_conversion_wq);
	destroy_workqueue(sbi->rsv_conversion_wq);
809

810
	if (sbi->s_journal) {
811
		aborted = is_journal_aborted(sbi->s_journal);
812 813
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
814
		if ((err < 0) && !aborted)
815
			ext4_abort(sb, "Couldn't clean up the journal");
816
	}
817

818
	ext4_unregister_sysfs(sb);
819
	ext4_es_unregister_shrinker(sbi);
820
	del_timer_sync(&sbi->s_err_report);
821 822 823 824 825
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);
	ext4_xattr_put_super(sb);

826
	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
827
		ext4_clear_feature_journal_needs_recovery(sb);
828 829
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
830
	if (!(sb->s_flags & MS_RDONLY))
831 832
		ext4_commit_super(sb, 1);

833 834
	for (i = 0; i < sbi->s_gdb_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
835 836
	kvfree(sbi->s_group_desc);
	kvfree(sbi->s_flex_groups);
837
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
838 839
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
840
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
841 842
	brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
843
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
844 845 846 847 848 849 850 851 852 853 854
		kfree(sbi->s_qf_names[i]);
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

855
	sync_blockdev(sb->s_bdev);
856
	invalidate_bdev(sb->s_bdev);
857 858 859 860 861 862 863
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
864
		invalidate_bdev(sbi->journal_bdev);
865
		ext4_blkdev_remove(sbi);
866
	}
867 868 869 870
	if (sbi->s_mb_cache) {
		ext4_xattr_destroy_cache(sbi->s_mb_cache);
		sbi->s_mb_cache = NULL;
	}
871 872
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
873
	sb->s_fs_info = NULL;
Theodore Ts'o's avatar
Theodore Ts'o committed
874 875 876 877 878 879
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
880 881
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
882
	kfree(sbi->s_blockgroup_lock);
883 884 885
	kfree(sbi);
}

886
static struct kmem_cache *ext4_inode_cachep;
887 888 889 890

/*
 * Called inside transaction, so use GFP_NOFS
 */
891
static struct inode *ext4_alloc_inode(struct super_block *sb)
892
{
893
	struct ext4_inode_info *ei;
894

895
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
896 897
	if (!ei)
		return NULL;
898

899
	ei->vfs_inode.i_version = 1;
900
	spin_lock_init(&ei->i_raw_lock);
901 902
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
903 904
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
905
	INIT_LIST_HEAD(&ei->i_es_list);
906
	ei->i_es_all_nr = 0;
907
	ei->i_es_shk_nr = 0;
908
	ei->i_es_shrink_lblk = 0;
909 910 911
	ei->i_reserved_data_blocks = 0;
	ei->i_reserved_meta_blocks = 0;
	ei->i_allocated_meta_blocks = 0;
912
	ei->i_da_metadata_calc_len = 0;
913
	ei->i_da_metadata_calc_last_lblock = 0;
914
	spin_lock_init(&(ei->i_block_reservation_lock));
915 916
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
917
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
918
#endif
919
	ei->jinode = NULL;
920
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
921
	spin_lock_init(&ei->i_completed_io_lock);
922 923
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
924
	atomic_set(&ei->i_ioend_count, 0);
925
	atomic_set(&ei->i_unwritten, 0);
926
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
927
#ifdef CONFIG_EXT4_FS_ENCRYPTION
928
	ei->i_crypt_info = NULL;
929
#endif
930 931 932
	return &ei->vfs_inode;
}

933 934 935 936 937 938 939 940
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

Nick Piggin's avatar
Nick Piggin committed
941 942 943 944 945 946
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

947
static void ext4_destroy_inode(struct inode *inode)
948
{
949
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
950 951 952
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
953 954 955 956 957
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
Nick Piggin's avatar
Nick Piggin committed
958
	call_rcu(&inode->i_rcu, ext4_i_callback);
959 960
}

961
static void init_once(void *foo)
962
{
963
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
964

965 966
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
967
	init_rwsem(&ei->i_data_sem);
968
	init_rwsem(&ei->i_mmap_sem);
969
	inode_init_once(&ei->vfs_inode);
970 971
}

972
static int __init init_inodecache(void)
973
{
974 975
	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
					     sizeof(struct ext4_inode_info),
976 977
					     0, (SLAB_RECLAIM_ACCOUNT|
						SLAB_MEM_SPREAD),
978
					     init_once);
979
	if (ext4_inode_cachep == NULL)
980 981 982 983 984 985
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
986 987 988 989 990
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
991
	kmem_cache_destroy(ext4_inode_cachep);
992 993
}

Al Viro's avatar
Al Viro committed
994
void ext4_clear_inode(struct inode *inode)
995
{
Al Viro's avatar
Al Viro committed
996
	invalidate_inode_buffers(inode);
997
	clear_inode(inode);
998
	dquot_drop(inode);
999
	ext4_discard_preallocations(inode);
1000
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1001 1002 1003 1004 1005 1006
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1007 1008
#ifdef CONFIG_EXT4_FS_ENCRYPTION
	if (EXT4_I(inode)->i_crypt_info)
1009
		ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info);
1010
#endif
1011 1012
}

Christoph Hellwig's avatar
Christoph Hellwig committed
1013
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1014
					u64 ino, u32 generation)
1015 1016 1017
{
	struct inode *inode;

1018
	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1019
		return ERR_PTR(-ESTALE);
1020
	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1021 1022 1023 1024
		return ERR_PTR(-ESTALE);

	/* iget isn't really right if the inode is currently unallocated!!
	 *
1025
	 * ext4_read_inode will return a bad_inode if the inode had been
1026 1027 1028 1029 1030
	 * deleted, so we should be safe.
	 *
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1031
	inode = ext4_iget_normal(sb, ino);
1032 1033 1034
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1035 1036 1037
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
Christoph Hellwig's avatar
Christoph Hellwig committed
1038 1039 1040 1041 1042

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1043
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1044 1045 1046 1047 1048 1049
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1050
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1051 1052 1053
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1054 1055
}

1056 1057 1058 1059 1060 1061
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1062 1063
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1064 1065 1066 1067 1068 1069 1070 1071
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1072
						wait & ~__GFP_DIRECT_RECLAIM);
1073 1074 1075
	return try_to_free_buffers(page);
}

1076
#ifdef CONFIG_QUOTA
1077
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
1078
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
1079

1080 1081 1082 1083 1084
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1085
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1086
			 struct path *path);
1087
static int ext4_quota_off(struct super_block *sb, int type);
1088 1089
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1090
			       size_t len, loff_t off);
1091
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1092
				const char *data, size_t len, loff_t off);
1093 1094 1095
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1096

1097 1098 1099 1100 1101
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1102
static const struct dquot_operations ext4_quota_operations = {
1103
	.get_reserved_space = ext4_get_reserved_space,
1104 1105 1106 1107
	.write_dquot	= ext4_write_dquot,
	.acquire_dquot	= ext4_acquire_dquot,
	.release_dquot	= ext4_release_dquot,
	.mark_dirty	= ext4_mark_dquot_dirty,
1108 1109 1110
	.write_info	= ext4_write_info,
	.alloc_dquot	= dquot_alloc,
	.destroy_dquot	= dquot_destroy,
1111 1112
};

1113
static const struct quotactl_ops ext4_qctl_operations = {
1114
	.quota_on	= ext4_quota_on,
1115
	.quota_off	= ext4_quota_off,
1116
	.quota_sync	= dquot_quota_sync,
1117
	.get_state	= dquot_get_state,
1118 1119 1120
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
	.set_dqblk	= dquot_set_dqblk
1121 1122 1123
};
#endif

1124
static const struct super_operations ext4_sops = {
1125 1126 1127 1128
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1129
	.drop_inode	= ext4_drop_inode,
Al Viro's avatar
Al Viro committed
1130
	.evict_inode	= ext4_evict_inode,
1131 1132
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1133 1134
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1135 1136 1137
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1138
#ifdef CONFIG_QUOTA
1139 1140
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
1141
	.get_dquots	= ext4_get_dquots,
1142
#endif
1143
	.bdev_try_to_free_page = bdev_try_to_free_page,
1144 1145
};

1146
static const struct export_operations ext4_export_ops = {
Christoph Hellwig's avatar
Christoph Hellwig committed
1147 1148
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1149
	.get_parent = ext4_get_parent,
1150 1151 1152 1153 1154
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1155
	Opt_nouid32, Opt_debug, Opt_removed,
1156
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1157
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1158 1159
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1160
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1161
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1162
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1163
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Theodore Ts'o's avatar
Theodore Ts'o committed
1164
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
Ross Zwisler's avatar
Ross Zwisler committed
1165
	Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax,
1166
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1167
	Opt_lazytime, Opt_nolazytime,
1168
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1169
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1170
	Opt_dioread_nolock, Opt_dioread_lock,
1171
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1172
	Opt_max_dir_size_kb, Opt_nojournal_checksum,
1173 1174
};

1175
static const match_table_t tokens = {
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1190 1191
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1192 1193 1194 1195
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1196
	{Opt_noload, "norecovery"},
1197
	{Opt_noload, "noload"},
1198 1199
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1200
	{Opt_commit, "commit=%u"},
1201 1202
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1203
	{Opt_journal_dev, "journal_dev=%u"},
1204
	{Opt_journal_path, "journal_path=%s"},
1205
	{Opt_journal_checksum, "journal_checksum"},
1206
	{Opt_nojournal_checksum, "nojournal_checksum"},
1207
	{Opt_journal_async_commit, "journal_async_commit"},
1208 1209 1210 1211
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1212 1213
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1214 1215 1216 1217 1218 1219
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1220
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1221 1222 1223 1224 1225
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
	{Opt_barrier, "barrier=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1226 1227
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1228
	{Opt_i_version, "i_version"},
Ross Zwisler's avatar
Ross Zwisler committed
1229
	{Opt_dax, "dax"},
1230
	{Opt_stripe, "stripe=%u"},
1231
	{Opt_delalloc, "delalloc"},
1232 1233
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1234
	{Opt_nodelalloc, "nodelalloc"},
1235 1236
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1237 1238
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1239
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1240
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1241
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1242 1243
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1244 1245
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1246 1247
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1248 1249 1250
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1251
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1252
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1253 1254 1255 1256 1257
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
Josef Bacik's avatar
Josef Bacik committed
1258
	{Opt_err, NULL},
1259 1260
};

1261
static ext4_fsblk_t get_sb_block(void **data)
1262
{
1263
	ext4_fsblk_t	sb_block;
1264 1265 1266 1267
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1268

1269
	options += 3;
1270
	/* TODO: use simple_strtoll with >32bit ext4 */
1271 1272
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1273
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1274 1275 1276 1277 1278 1279
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1280

1281 1282 1283
	return sb_block;
}

1284
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1285 1286
static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1287

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1288 1289 1290 1291 1292
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *qname;
1293
	int ret = -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1294 1295 1296 1297 1298 1299

	if (sb_any_quota_loaded(sb) &&
		!sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1300
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1301
	}
1302
	if (ext4_has_feature_quota(sb)) {
1303 1304 1305
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1306
	}
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1307 1308 1309 1310
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1311
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1312
	}
1313 1314 1315 1316 1317 1318 1319 1320
	if (sbi->s_qf_names[qtype]) {
		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1321
	}
1322
	if (strchr(qname, '/')) {
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1323 1324
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1325
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1326
	}
1327
	sbi->s_qf_names[qtype] = qname;
1328
	set_opt(sb, QUOTA);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1329
	return 1;
1330 1331 1332
errout:
	kfree(qname);
	return ret;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sb_any_quota_loaded(sb) &&
		sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1344
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1345
	}
1346
	kfree(sbi->s_qf_names[qtype]);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1347 1348 1349 1350 1351
	sbi->s_qf_names[qtype] = NULL;
	return 1;
}
#endif

1352 1353 1354 1355 1356 1357
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1358
#ifdef CONFIG_QUOTA
1359 1360 1361 1362 1363
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1364
#endif
1365
#define MOPT_DATAJ	0x0080
1366 1367 1368
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1369
#define MOPT_STRING	0x0400
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1382 1383 1384 1385
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1386 1387
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1388 1389 1390
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1391
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1392 1393
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1394
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1395
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1396
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1397
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1398
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1399
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1400 1401 1402
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1403 1404 1405 1406
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
	 MOPT_NO_EXT2 | MOPT_SET},
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
	 MOPT_NO_EXT2 | MOPT_CLEAR},
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
Ross Zwisler's avatar
Ross Zwisler committed
1417
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1418
	{Opt_stripe, 0, MOPT_GTE0},
1419 1420
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1421 1422 1423
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1424 1425 1426 1427
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1428 1429
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
Theodore Ts'o's avatar
Theodore Ts'o committed
1430
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1431 1432
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1433
#else
1434 1435
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1436
#endif
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
		       EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q},
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1453
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1454
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1455 1456 1457 1458 1459 1460 1461 1462 1463
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1464 1465
	kuid_t uid;
	kgid_t gid;
1466 1467
	int arg = 0;

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1478
	switch (token) {
1479 1480 1481 1482
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1483 1484 1485
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1486
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1487 1488 1489 1490 1491 1492 1493
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
		sb->s_flags |= MS_I_VERSION;
		return 1;
1494 1495 1496 1497 1498 1499
	case Opt_lazytime:
		sb->s_flags |= MS_LAZYTIME;
		return 1;
	case Opt_nolazytime:
		sb->s_flags &= ~MS_LAZYTIME;
		return 1;
1500 1501
	}

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1523 1524 1525 1526 1527 1528 1529
	if (token == Opt_err_panic && !capable(CAP_SYS_ADMIN)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" not allowed for unprivileged mounts",
			 opt);
		return -1;
	}

1530
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1531 1532 1533
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1534 1535 1536
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1537 1538
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1539 1540 1541
		} else
			return -1;
	}
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1561 1562 1563 1564
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1565
			return -1;
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
1578
		uid = make_kuid(sb->s_user_ns, arg);
1579 1580
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1581 1582
			return -1;
		}
1583 1584
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
1585
		gid = make_kgid(sb->s_user_ns, arg);
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
		/*
		 * Refuse access for unprivileged mounts if the user does
		 * not have rw access to the journal device via the supplied
		 * path.
		 */
		if (!capable(CAP_SYS_ADMIN) &&
		    inode_permission(d_inode(path.dentry), MAY_READ|MAY_WRITE)) {
			ext4_msg(sb, KERN_ERR,
				 "error: Insufficient access to journal path %s",
				 journal_path);
			return -1;
		}

1637
		journal_inode = d_inode(path.dentry);
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1649 1650 1651 1652 1653 1654 1655 1656
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1657 1658 1659 1660 1661 1662 1663 1664 1665
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1666 1667 1668 1669 1670
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1671
				ext4_msg(sb, KERN_ERR,
1672 1673
					 "Cannot change data mode on remount");
				return -1;
1674
			}
1675
		} else {
1676 1677
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1678
		}
1679 1680 1681 1682 1683 1684 1685 1686
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1687
		if (ext4_has_feature_quota(sb)) {
1688 1689
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1690
				 "when QUOTA feature is enabled");
1691
			return 1;
1692
		}
1693
		sbi->s_jquota_fmt = m->mount_opt;
Ross Zwisler's avatar
Ross Zwisler committed
1694 1695
#endif
	} else if (token == Opt_dax) {
1696 1697 1698 1699 1700
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
Ross Zwisler's avatar
Ross Zwisler committed
1701 1702
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
#endif
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1719
	}
1720
	return 1;
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
1743
		args[0].to = args[0].from = NULL;
1744 1745 1746 1747
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
1748 1749
	}
#ifdef CONFIG_QUOTA
1750
	if (ext4_has_feature_quota(sb) &&
1751
	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
1752 1753 1754 1755 1756
		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
			 "mount options ignored.");
		clear_opt(sb, USRQUOTA);
		clear_opt(sb, GRPQUOTA);
	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1757
		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1758
			clear_opt(sb, USRQUOTA);
1759

1760
		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1761
			clear_opt(sb, GRPQUOTA);
1762

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1763
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1764 1765
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
1766 1767 1768 1769
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
1770 1771
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
1772 1773 1774 1775
			return 0;
		}
	}
#endif
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

		if (blocksize < PAGE_CACHE_SIZE) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
1786 1787 1788 1789 1790 1791
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
			 "in data=ordered mode");
		return 0;
	}
1792 1793 1794
	return 1;
}

1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

	if (sbi->s_qf_names[USRQUOTA])
1819
		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1820 1821

	if (sbi->s_qf_names[GRPQUOTA])
1822
		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1823 1824 1825
#endif
}

1826 1827
static const char *token2str(int token)
{
1828
	const struct match_token *t;
1829 1830 1831 1832 1833 1834 1835

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

1836 1837 1838 1839 1840
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
1841 1842
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
1843 1844 1845
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
1846
	int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
1847
	const struct mount_opts *m;
1848
	char sep = nodefs ? '\n' : ',';
1849

1850 1851
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
1852 1853

	if (sbi->s_sb_block != 1)
1854 1855 1856 1857 1858 1859 1860
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
1861
		if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
1862 1863 1864 1865 1866 1867
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
1868
	}
1869

1870
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(sb->s_user_ns, EXT4_DEF_RESUID)) ||
1871
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
1872
		SEQ_OPTS_PRINT("resuid=%u",
1873 1874
				from_kuid_munged(sb->s_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(sb->s_user_ns, EXT4_DEF_RESGID)) ||
1875
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
1876
		SEQ_OPTS_PRINT("resgid=%u",
1877
				from_kgid_munged(sb->s_user_ns, sbi->s_resgid));
1878
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
1879 1880
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
1881
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
1882
		SEQ_OPTS_PUTS("errors=continue");
1883
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
1884
		SEQ_OPTS_PUTS("errors=panic");
1885
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
1886
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
1887
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
1888
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
1889
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
1890
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
1891
	if (sb->s_flags & MS_I_VERSION)
1892
		SEQ_OPTS_PUTS("i_version");
1893
	if (nodefs || sbi->s_stripe)
1894
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
1895
	if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
1896 1897 1898 1899 1900 1901 1902
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
1903 1904
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
1905 1906
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
1907

1908 1909
	if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
1910
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
1911 1912
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
1913 1914 1915 1916 1917

	ext4_show_quota_options(seq, sb);
	return 0;
}

1918 1919 1920 1921 1922
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

1923
int ext4_seq_options_show(struct seq_file *seq, void *offset)
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
{
	struct super_block *sb = seq->private;
	int rc;

	seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

1934
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1935 1936
			    int read_only)
{
1937
	struct ext4_sb_info *sbi = EXT4_SB(sb);
1938 1939
	int res = 0;

1940
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
1941 1942
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
1943 1944 1945
		res = MS_RDONLY;
	}
	if (read_only)
1946
		goto done;
1947
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
1948 1949
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
1950
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
1951 1952 1953
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
1954
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
1955 1956
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
1957 1958 1959
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
1960 1961 1962
	else if (le32_to_cpu(es->s_checkinterval) &&
		(le32_to_cpu(es->s_lastcheck) +
			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
1963 1964 1965
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
1966
	if (!sbi->s_journal)
1967
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1968
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1969
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
Marcin Slusarz's avatar
Marcin Slusarz committed
1970
	le16_add_cpu(&es->s_mnt_count, 1);
1971
	es->s_mtime = cpu_to_le32(get_seconds());
1972
	ext4_update_dynamic_rev(sb);
1973
	if (sbi->s_journal)
1974
		ext4_set_feature_journal_needs_recovery(sb);
1975

1976
	ext4_commit_super(sb, 1);
1977
done:
1978
	if (test_opt(sb, DEBUG))
1979
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
1980
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
1981 1982
			sb->s_blocksize,
			sbi->s_groups_count,
1983 1984
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
1985
			sbi->s_mount_opt, sbi->s_mount_opt2);
1986

Dan Magenheimer's avatar
Dan Magenheimer committed
1987
	cleancache_init_fs(sb);
1988 1989 1990
	return res;
}

1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
	new_groups = ext4_kvzalloc(size, GFP_KERNEL);
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
Al Viro's avatar
Al Viro committed
2016
		kvfree(sbi->s_flex_groups);
2017 2018 2019 2020 2021 2022
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2023 2024 2025 2026 2027
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2028
	int i, err;
2029

2030
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2031
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2032 2033 2034 2035
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2036 2037
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2038
		goto failed;
2039 2040

	for (i = 0; i < sbi->s_groups_count; i++) {
2041
		gdp = ext4_get_group_desc(sb, i, NULL);
2042 2043

		flex_group = ext4_flex_group(sbi, i);
2044 2045
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2046 2047
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2048 2049
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2050 2051 2052 2053 2054 2055 2056
	}

	return 1;
failed:
	return 0;
}

2057
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2058
				   struct ext4_group_desc *gdp)
2059
{
2060
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
2061
	__u16 crc = 0;
2062
	__le32 le_group = cpu_to_le32(block_group);
2063
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2064

2065
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2066 2067
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2068
		__u16 dummy_csum = 0;
2069 2070 2071

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2072 2073 2074 2075 2076 2077 2078
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2079 2080 2081

		crc = csum32 & 0xFFFF;
		goto out;
2082 2083
	}

2084
	/* old crc16 code */
2085
	if (!ext4_has_feature_gdt_csum(sb))
2086 2087
		return 0;

2088 2089 2090 2091 2092
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2093
	if (ext4_has_feature_64bit(sb) &&
2094 2095 2096 2097 2098 2099
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
2100 2101 2102
	return cpu_to_le16(crc);
}

2103
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2104 2105
				struct ext4_group_desc *gdp)
{
2106
	if (ext4_has_group_desc_csum(sb) &&
2107
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2108 2109 2110 2111 2112
		return 0;

	return 1;
}

2113 2114 2115 2116 2117
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2118
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2119 2120
}

2121
/* Called at mount-time, super-block is locked */
2122
static int ext4_check_descriptors(struct super_block *sb,
2123
				  ext4_fsblk_t sb_block,
2124
				  ext4_group_t *first_not_zeroed)
2125
{
2126 2127 2128
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
Laurent Vivier's avatar
Laurent Vivier committed
2129 2130 2131
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
2132
	int flexbg_flag = 0;
2133
	ext4_group_t i, grp = sbi->s_groups_count;
2134

2135
	if (ext4_has_feature_flex_bg(sb))
2136 2137
		flexbg_flag = 1;

2138
	ext4_debug("Checking group descriptors");
2139

2140 2141 2142
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

2143
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
Laurent Vivier's avatar
Laurent Vivier committed
2144
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2145 2146
		else
			last_block = first_block +
2147
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2148

2149 2150 2151 2152
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2153
		block_bitmap = ext4_block_bitmap(sb, gdp);
2154 2155 2156 2157 2158
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
		}
2159
		if (block_bitmap < first_block || block_bitmap > last_block) {
2160
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2161
			       "Block bitmap for group %u not in group "
2162
			       "(block %llu)!", i, block_bitmap);
2163 2164
			return 0;
		}
2165
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2166 2167 2168 2169 2170
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
		}
2171
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2172
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2173
			       "Inode bitmap for group %u not in group "
2174
			       "(block %llu)!", i, inode_bitmap);
2175 2176
			return 0;
		}
2177
		inode_table = ext4_inode_table(sb, gdp);
2178 2179 2180 2181 2182
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
		}
Laurent Vivier's avatar
Laurent Vivier committed
2183
		if (inode_table < first_block ||
2184
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2185
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2186
			       "Inode table for group %u not in group "
2187
			       "(block %llu)!", i, inode_table);
2188 2189
			return 0;
		}
2190
		ext4_lock_group(sb, i);
2191
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2192 2193
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2194
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2195
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2196
			if (!(sb->s_flags & MS_RDONLY)) {
2197
				ext4_unlock_group(sb, i);
2198
				return 0;
2199
			}
2200
		}
2201
		ext4_unlock_group(sb, i);
2202 2203
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2204
	}
2205 2206
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2207 2208 2209
	return 1;
}

2210
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2223
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2224 2225 2226
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2227 2228
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2229 2230 2231 2232
{
	unsigned int s_flags = sb->s_flags;
	int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
2233
	int quota_update = 0;
2234 2235 2236 2237 2238 2239 2240
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2241
	if (bdev_read_only(sb->s_bdev)) {
2242 2243
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2244 2245 2246
		return;
	}

2247 2248 2249 2250 2251 2252 2253
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2254
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2255 2256
		/* don't clear list on RO mount w/ errors */
		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
2257
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2258
				  "clearing orphan list.\n");
2259 2260
			es->s_last_orphan = 0;
		}
2261 2262 2263 2264 2265
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

	if (s_flags & MS_RDONLY) {
2266
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2267 2268 2269 2270 2271
		sb->s_flags &= ~MS_RDONLY;
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sb->s_flags |= MS_ACTIVE;
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
	if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
Jan Kara's avatar
Jan Kara committed
2288
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2289 2290
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2291 2292 2293 2294

			if (!ret)
				quota_update = 1;
			else
2295 2296
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
2297
					"quota: type %d: error %d", i, ret);
2298 2299 2300 2301 2302 2303 2304
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2315 2316
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2317 2318 2319 2320
			es->s_last_orphan = 0;
			break;
		}

2321
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2322
		dquot_initialize(inode);
2323
		if (inode->i_nlink) {
2324 2325 2326 2327
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2328
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2329
				  inode->i_ino, inode->i_size);
2330
			mutex_lock(&inode->i_mutex);
2331
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2332
			ext4_truncate(inode);
2333
			mutex_unlock(&inode->i_mutex);
2334 2335
			nr_truncates++;
		} else {
2336 2337 2338 2339
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2340 2341 2342 2343 2344 2345 2346
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2347
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2348 2349

	if (nr_orphans)
2350 2351
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2352
	if (nr_truncates)
2353 2354
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2355
#ifdef CONFIG_QUOTA
2356 2357 2358 2359 2360 2361
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
2362 2363 2364 2365
	}
#endif
	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
}
2366

2367 2368 2369 2370 2371 2372 2373
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2374 2375 2376 2377 2378 2379
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2380 2381
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2382
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2383 2384 2385 2386 2387
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2388
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2389
		/*
2390
		 * CONFIG_LBDAF is not enabled implies the inode
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2401 2402 2403 2404 2405 2406
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2407 2408 2409 2410 2411 2412 2413 2414
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2415 2416

/*
2417
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2418 2419
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2420
 */
2421
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2422
{
2423
	loff_t res = EXT4_NDIR_BLOCKS;
2424 2425
	int meta_blocks;
	loff_t upper_limit;
2426 2427 2428 2429 2430 2431
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2432 2433
	 */

2434
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2435
		/*
2436
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2437 2438
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2439 2440 2441 2442 2443 2444 2445
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
2446 2447 2448 2449 2450 2451
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2465 2466 2467 2468 2469 2470 2471

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2472 2473 2474 2475

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2476 2477 2478
	return res;
}

2479
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2480
				   ext4_fsblk_t logical_sb_block, int nr)
2481
{
2482
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2483
	ext4_group_t bg, first_meta_bg;
2484 2485 2486 2487
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2488
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2489
		return logical_sb_block + nr + 1;
2490
	bg = sbi->s_desc_per_block * nr;
2491
	if (ext4_bg_has_super(sb, bg))
2492
		has_super = 1;
2493

2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
		has_super++;

2504
	return (has_super + ext4_group_first_block_no(sb, bg));
2505 2506
}

2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2523
	int ret;
2524 2525

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2526 2527 2528 2529 2530 2531 2532
		ret = sbi->s_stripe;
	else if (stripe_width <= sbi->s_blocks_per_group)
		ret = stripe_width;
	else if (stride <= sbi->s_blocks_per_group)
		ret = stride;
	else
		ret = 0;
2533

2534 2535 2536 2537 2538 2539
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2540

2541
	return ret;
2542
}
2543

2544 2545 2546 2547 2548 2549 2550 2551
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2552
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2564
	if (ext4_has_feature_readonly(sb)) {
2565 2566 2567 2568 2569
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
		sb->s_flags |= MS_RDONLY;
		return 1;
	}

2570
	/* Check that feature set is OK for a read-write mount */
2571
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2572 2573 2574 2575 2576 2577 2578 2579 2580 2581
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2582
	if (ext4_has_feature_huge_file(sb)) {
2583 2584 2585 2586 2587 2588 2589
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2590
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2591 2592 2593 2594 2595
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2596 2597

#ifndef CONFIG_QUOTA
2598
	if (ext4_has_feature_quota(sb) && !readonly) {
2599 2600 2601 2602 2603 2604
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
#endif  /* CONFIG_QUOTA */
2605 2606 2607
	return 1;
}

2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
static void print_daily_error_info(unsigned long arg)
{
	struct super_block *sb = (struct super_block *) arg;
	struct ext4_sb_info *sbi;
	struct ext4_super_block *es;

	sbi = EXT4_SB(sb);
	es = sbi->s_es;

	if (es->s_error_count)
2622 2623
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2624 2625
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2626
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
		       sb->s_id, le32_to_cpu(es->s_first_error_time),
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
			printk(": inode %u",
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
			printk(": block %llu", (unsigned long long)
			       le64_to_cpu(es->s_first_error_block));
		printk("\n");
	}
	if (es->s_last_error_time) {
2640
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
		       sb->s_id, le32_to_cpu(es->s_last_error_time),
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
			printk(": inode %u",
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
			printk(": block %llu", (unsigned long long)
			       le64_to_cpu(es->s_last_error_block));
		printk("\n");
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

2668
	sb_start_write(sb);
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2680
	if (group >= ngroups)
2681 2682 2683 2684 2685 2686 2687
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
2688 2689
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
2690 2691 2692 2693 2694
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
2695
	sb_end_write(sb);
2696 2697 2698 2699 2700 2701

	return ret;
}

/*
 * Remove lr_request from the list_request and free the
2702
 * request structure. Should be called with li_list_mtx held
2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
2720 2721 2722
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
2723
		return;
2724
	}
2725 2726

	mutex_lock(&ext4_li_info->li_list_mtx);
2727
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2728
	mutex_unlock(&ext4_li_info->li_list_mtx);
2729
	mutex_unlock(&ext4_li_mtx);
2730 2731
}

2732 2733
static struct task_struct *ext4_lazyinit_task;

2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
2748
	unsigned long next_wakeup, cur;
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}

		list_for_each_safe(pos, n, &eli->li_request_list) {
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

2766 2767 2768 2769 2770 2771
			if (time_after_eq(jiffies, elr->lr_next_sched)) {
				if (ext4_run_li_request(elr) != 0) {
					/* error, remove the lazy_init job */
					ext4_remove_li_request(elr);
					continue;
				}
2772 2773 2774 2775 2776 2777 2778
			}

			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

2779
		try_to_freeze();
2780

2781 2782
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
2783
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
2784 2785 2786 2787
			cond_resched();
			continue;
		}

2788 2789
		schedule_timeout_interruptible(next_wakeup - cur);

2790 2791 2792 2793
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
2836 2837 2838 2839
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
2840 2841 2842
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
2843
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
2912 2913
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
2914 2915 2916
	return elr;
}

2917 2918
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
2919 2920
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2921
	struct ext4_li_request *elr = NULL;
2922
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
2923
	int ret = 0;
2924

2925
	mutex_lock(&ext4_li_mtx);
2926 2927 2928 2929 2930 2931
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
2932
		goto out;
2933
	}
2934 2935 2936

	if (first_not_zeroed == ngroups ||
	    (sb->s_flags & MS_RDONLY) ||
2937
	    !test_opt(sb, INIT_INODE_TABLE))
2938
		goto out;
2939 2940

	elr = ext4_li_request_new(sb, first_not_zeroed);
2941 2942 2943 2944
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
2957 2958 2959 2960 2961 2962
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
2963 2964 2965 2966 2967 2968 2969

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
2970 2971
	mutex_unlock(&ext4_li_mtx);
	if (ret)
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
2986
	if (!ext4_li_info || !ext4_lazyinit_task)
2987 2988
		return;

2989
	kthread_stop(ext4_lazyinit_task);
2990 2991
}

2992 2993 2994 2995 2996 2997
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

2998
	if (ext4_has_metadata_csum(sb)) {
2999
		/* journal checksum v3 */
3000
		compat = 0;
3001
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3002 3003 3004 3005 3006 3007
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3008 3009 3010 3011
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3024 3025
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3026 3027 3028 3029 3030
	}

	return ret;
}

3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3055
	if (!ext4_has_feature_bigalloc(sb))
3056 3057 3058
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3088 3089 3090 3091 3092
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3093
		}
3094 3095 3096
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3113
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3141 3142
	/* Add the internal journal blocks as well */
	if (sbi->s_journal && !sbi->journal_bdev)
3143
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3144

3145 3146 3147 3148 3149 3150
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3151
static void ext4_set_resv_clusters(struct super_block *sb)
Lukas Czerner's avatar
Lukas Czerner committed
3152 3153
{
	ext4_fsblk_t resv_clusters;
3154
	struct ext4_sb_info *sbi = EXT4_SB(sb);
Lukas Czerner's avatar
Lukas Czerner committed
3155

3156 3157 3158 3159 3160 3161
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3162
	if (!ext4_has_feature_extents(sb))
3163
		return;
Lukas Czerner's avatar
Lukas Czerner committed
3164 3165 3166 3167
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3168
	 * unwritten extents in delalloc path. In most cases such
Lukas Czerner's avatar
Lukas Czerner committed
3169 3170 3171
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3172 3173
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
Lukas Czerner's avatar
Lukas Czerner committed
3174 3175 3176 3177

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3178
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
Lukas Czerner's avatar
Lukas Czerner committed
3179 3180
}

3181
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3182
{
3183
	char *orig_data = kstrdup(data, GFP_KERNEL);
3184
	struct buffer_head *bh;
3185
	struct ext4_super_block *es = NULL;
3186
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3187 3188
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3189
	ext4_fsblk_t logical_sb_block;
3190 3191 3192 3193
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3194
	const char *descr;
3195
	int ret = -ENOMEM;
3196
	int blocksize, clustersize;
3197 3198
	unsigned int db_count;
	unsigned int i;
3199
	int needs_recovery, has_huge_files, has_bigalloc;
Laurent Vivier's avatar
Laurent Vivier committed
3200
	__u64 blocks_count;
3201
	int err = 0;
3202
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3203
	ext4_group_t first_not_zeroed;
3204

3205 3206 3207
	if (!userns_mounts && !capable(CAP_SYS_ADMIN))
		return -EPERM;

3208 3209
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3210 3211 3212

	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3213 3214 3215
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3216
	sb->s_fs_info = sbi;
3217
	sbi->s_sb = sb;
3218
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
Miklos Szeredi's avatar
Miklos Szeredi committed
3219
	sbi->s_sb_block = sb_block;
3220 3221 3222
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3223

3224
	/* Cleanup superblock name */
3225
	strreplace(sb->s_id, '/', '!');
3226

3227
	/* -EINVAL is default */
3228
	ret = -EINVAL;
3229
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3230
	if (!blocksize) {
3231
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3232 3233 3234 3235
		goto out_fail;
	}

	/*
3236
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3237 3238
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3239
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3240 3241
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3242
	} else {
3243
		logical_sb_block = sb_block;
3244 3245
	}

3246
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3247
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3248 3249 3250 3251
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3252
	 *       some ext4 macro-instructions depend on its value
3253
	 */
3254
	es = (struct ext4_super_block *) (bh->b_data + offset);
3255 3256
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3257 3258
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
3259
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3260

3261
	/* Warn if metadata_csum and gdt_csum are both set. */
3262 3263
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3264
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3265 3266
			     "redundant flags; please run fsck.");

3267 3268 3269 3270 3271 3272 3273 3274
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3275
	/* Load the checksum driver */
3276
	if (ext4_has_feature_metadata_csum(sb)) {
3277 3278 3279 3280 3281 3282 3283 3284 3285
		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
		if (IS_ERR(sbi->s_chksum_driver)) {
			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
			ret = PTR_ERR(sbi->s_chksum_driver);
			sbi->s_chksum_driver = NULL;
			goto failed_mount;
		}
	}

3286 3287 3288 3289 3290
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3291
		ret = -EFSBADCRC;
3292 3293 3294 3295
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3296
	if (ext4_has_feature_csum_seed(sb))
3297 3298
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
	else if (ext4_has_metadata_csum(sb))
3299 3300 3301
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3302 3303
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3304
	set_opt(sb, INIT_INODE_TABLE);
3305
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3306
		set_opt(sb, DEBUG);
3307
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3308
		set_opt(sb, GRPID);
3309
	if (def_mount_opts & EXT4_DEFM_UID16)
3310
		set_opt(sb, NO_UID32);
3311 3312
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
Theodore Ts'o's avatar
Theodore Ts'o committed
3313
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3314
	set_opt(sb, POSIX_ACL);
3315
#endif
3316 3317 3318 3319
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3320
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3321
		set_opt(sb, JOURNAL_DATA);
3322
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3323
		set_opt(sb, ORDERED_DATA);
3324
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3325
		set_opt(sb, WRITEBACK_DATA);
3326

3327 3328 3329
	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) {
		if (!capable(CAP_SYS_ADMIN))
			goto failed_mount;
3330
		set_opt(sb, ERRORS_PANIC);
3331
	} else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) {
3332
		set_opt(sb, ERRORS_CONT);
3333
	} else {
3334
		set_opt(sb, ERRORS_RO);
3335
	}
3336 3337
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3338
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3339
		set_opt(sb, DISCARD);
3340

3341 3342 3343 3344 3345 3346
	sbi->s_resuid = make_kuid(sb->s_user_ns, le16_to_cpu(es->s_def_resuid));
	if (!uid_valid(sbi->s_resuid))
		sbi->s_resuid = make_kuid(sb->s_user_ns, EXT4_DEF_RESUID);
	sbi->s_resgid = make_kgid(sb->s_user_ns, le16_to_cpu(es->s_def_resgid));
	if (!gid_valid(sbi->s_resgid))
		sbi->s_resgid = make_kgid(sb->s_user_ns, EXT4_DEF_RESGID);
3347 3348 3349
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3350

3351
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3352
		set_opt(sb, BARRIER);
3353

3354 3355 3356 3357
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3358
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3359
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3360
		set_opt(sb, DELALLOC);
3361

3362 3363 3364 3365 3366 3367
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3381
	}
3382
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3383
	if (!parse_options((char *) data, sb, &journal_devnum,
3384
			   &journal_ioprio, 0))
3385 3386
		goto failed_mount;

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3398
				 "both data=journal and dioread_nolock");
3399 3400
			goto failed_mount;
		}
Ross Zwisler's avatar
Ross Zwisler committed
3401 3402 3403 3404 3405
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3406 3407 3408 3409 3410
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3411 3412
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3413 3414
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3415 3416
	}

3417
	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3418
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3419

3420
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3421 3422 3423
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3424 3425 3426
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3427

3428 3429
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3430
		if (ext4_has_feature_64bit(sb)) {
3431 3432 3433 3434 3435 3436
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
	}

3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3459 3460 3461 3462 3463
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3464
	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
3465
		goto failed_mount;
3466

3467
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3468 3469
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3470
		ext4_msg(sb, KERN_ERR,
3471 3472 3473 3474 3475 3476 3477 3478 3479
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3480 3481 3482
		goto failed_mount;
	}

3483 3484 3485 3486 3487 3488 3489
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

Ross Zwisler's avatar
Ross Zwisler committed
3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
		if (blocksize != PAGE_SIZE) {
			ext4_msg(sb, KERN_ERR,
					"error: unsupported blocksize for dax");
			goto failed_mount;
		}
		if (!sb->s_bdev->bd_disk->fops->direct_access) {
			ext4_msg(sb, KERN_ERR,
					"error: device does not support dax");
			goto failed_mount;
		}
	}

3503
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3504 3505 3506 3507 3508
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3509
	if (sb->s_blocksize != blocksize) {
3510 3511
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3512
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3513
					blocksize);
3514 3515 3516
			goto failed_mount;
		}

3517
		brelse(bh);
3518 3519
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3520
		bh = sb_bread_unmovable(sb, logical_sb_block);
3521
		if (!bh) {
3522 3523
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3524 3525
			goto failed_mount;
		}
3526
		es = (struct ext4_super_block *)(bh->b_data + offset);
3527
		sbi->s_es = es;
3528
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3529 3530
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3531 3532 3533 3534
			goto failed_mount;
		}
	}

3535
	has_huge_files = ext4_has_feature_huge_file(sb);
3536 3537 3538
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3539

3540 3541 3542
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3543 3544 3545
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3546
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
Vignesh Babu's avatar
Vignesh Babu committed
3547
		    (!is_power_of_2(sbi->s_inode_size)) ||
3548
		    (sbi->s_inode_size > blocksize)) {
3549 3550
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3551
			       sbi->s_inode_size);
3552 3553
			goto failed_mount;
		}
Kalpak Shah's avatar
Kalpak Shah committed
3554 3555
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3556
	}
3557

3558
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3559
	if (ext4_has_feature_64bit(sb)) {
3560
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3561
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
vignesh babu's avatar
vignesh babu committed
3562
		    !is_power_of_2(sbi->s_desc_size)) {
3563 3564
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3565 3566 3567 3568 3569
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3570

3571 3572
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3573

3574
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3575
	if (sbi->s_inodes_per_block == 0)
3576
		goto cantfind_ext4;
3577 3578 3579 3580 3581 3582
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3583 3584
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3585
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3586 3587
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3588 3589
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3590

3591
	for (i = 0; i < 4; i++)
3592 3593
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3594
	if (ext4_has_feature_dir_index(sb)) {
3595 3596 3597 3598
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3599
#ifdef __CHAR_UNSIGNED__
3600 3601 3602 3603
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3604
#else
3605 3606 3607
			if (!(sb->s_flags & MS_RDONLY))
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3608
#endif
3609
		}
3610
	}
3611

3612 3613
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3614
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3615 3616 3617 3618 3619 3620 3621
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
3622 3623 3624 3625 3626 3627 3628
		if (le32_to_cpu(es->s_log_cluster_size) >
		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid log cluster size: %u",
				 le32_to_cpu(es->s_log_cluster_size));
			goto failed_mount;
		}
3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
			ext4_warning(sb, "fragment/cluster size (%d) != "
				     "block size (%d)", clustersize,
				     blocksize);
			clustersize = blocksize;
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
3662
	}
3663 3664
	sbi->s_cluster_ratio = clustersize / blocksize;

3665 3666 3667 3668
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

3669 3670 3671 3672
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
3673
	err = generic_check_addressable(sb->s_blocksize_bits,
3674
					ext4_blocks_count(es));
3675
	if (err) {
3676
		ext4_msg(sb, KERN_ERR, "filesystem"
3677
			 " too large to mount safely on this system");
3678
		if (sizeof(sector_t) < 8)
3679
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3680 3681 3682
		goto failed_mount;
	}

3683 3684
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
3685

3686 3687 3688
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3689 3690
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
3691 3692 3693 3694
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

3695 3696 3697 3698 3699
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3700
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3701 3702 3703
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
3704 3705
		goto failed_mount;
	}
Laurent Vivier's avatar
Laurent Vivier committed
3706 3707 3708 3709
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3710
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3711
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3712
		       "(block count %llu, first data block %u, "
3713
		       "blocks per group %lu)", sbi->s_groups_count,
3714 3715 3716 3717 3718
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
Laurent Vivier's avatar
Laurent Vivier committed
3719
	sbi->s_groups_count = blocks_count;
3720 3721
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3722 3723
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
3724
	if (ext4_has_feature_meta_bg(sb)) {
3725
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3726 3727 3728 3729 3730 3731 3732
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
3733 3734 3735
	sbi->s_group_desc = ext4_kvmalloc(db_count *
					  sizeof(struct buffer_head *),
					  GFP_KERNEL);
3736
	if (sbi->s_group_desc == NULL) {
3737
		ext4_msg(sb, KERN_ERR, "not enough memory");
3738
		ret = -ENOMEM;
3739 3740 3741
		goto failed_mount;
	}

3742
	bgl_lock_init(sbi->s_blockgroup_lock);
3743 3744

	for (i = 0; i < db_count; i++) {
3745
		block = descriptor_loc(sb, logical_sb_block, i);
3746
		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3747
		if (!sbi->s_group_desc[i]) {
3748 3749
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
3750 3751 3752 3753
			db_count = i;
			goto failed_mount2;
		}
	}
3754
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3755
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3756
		ret = -EFSCORRUPTED;
3757
		goto failed_mount2;
3758
	}
3759

3760
	sbi->s_gdb_count = db_count;
3761 3762 3763
	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
	spin_lock_init(&sbi->s_next_gen_lock);

3764 3765
	setup_timer(&sbi->s_err_report, print_daily_error_info,
		(unsigned long) sb);
3766

3767
	/* Register extent status tree shrinker */
3768
	if (ext4_es_register_shrinker(sbi))
3769 3770
		goto failed_mount3;

3771
	sbi->s_stripe = ext4_get_stripe_size(sbi);
3772
	sbi->s_extent_max_zeroout_kb = 32;
3773

3774 3775 3776
	/*
	 * set up enough so that it can read an inode
	 */
3777
	sb->s_op = &ext4_sops;
3778 3779
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
3780
#ifdef CONFIG_QUOTA
3781
	sb->dq_op = &ext4_quota_operations;
3782
	if (ext4_has_feature_quota(sb))
3783
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
3784 3785
	else
		sb->s_qcop = &ext4_qctl_operations;
3786
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
3787
#endif
3788 3789
	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));

3790
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
3791
	mutex_init(&sbi->s_orphan_lock);
3792 3793 3794 3795

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
3796
			  ext4_has_feature_journal_needs_recovery(sb));
3797

3798
	if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
3799
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
3800
			goto failed_mount3a;
3801

3802 3803 3804 3805
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
3806
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
3807 3808
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
3809
			goto failed_mount3a;
3810
	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
3811
		   ext4_has_feature_journal_needs_recovery(sb)) {
3812 3813
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
3814
		goto failed_mount_wq;
3815
	} else {
3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
		clear_opt(sb, JOURNAL_CHECKSUM);
3841
		clear_opt(sb, DATA_FLAGS);
3842 3843 3844
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
3845 3846
	}

3847
	if (ext4_has_feature_64bit(sb) &&
3848 3849
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
3850
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
3851
		goto failed_mount_wq;
3852 3853
	}

3854 3855 3856 3857
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
3858
	}
3859

3860 3861 3862 3863 3864
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
3865 3866 3867
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
3868 3869
		if (jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
3870
			set_opt(sb, ORDERED_DATA);
3871
		else
3872
			set_opt(sb, JOURNAL_DATA);
3873 3874
		break;

3875 3876
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
3877 3878
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
3879 3880
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
3881
			goto failed_mount_wq;
3882 3883 3884 3885
		}
	default:
		break;
	}
3886
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3887

Bobi Jam's avatar
Bobi Jam committed
3888 3889
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

3890
no_journal:
3891 3892 3893 3894 3895 3896 3897 3898
	if (ext4_mballoc_ready) {
		sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
		if (!sbi->s_mb_cache) {
			ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
			goto failed_mount_wq;
		}
	}

3899
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
3900 3901 3902 3903 3904 3905
	    (blocksize != PAGE_CACHE_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

3906 3907 3908
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
3909 3910 3911
		ext4_commit_super(sb, 1);
	}

3912 3913 3914 3915 3916 3917 3918
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
3919 3920
		err = ext4_calculate_overhead(sb);
		if (err)
3921 3922 3923
			goto failed_mount_wq;
	}

Tejun Heo's avatar
Tejun Heo committed
3924 3925 3926 3927
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
3928 3929 3930 3931
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
3932
		ret = -ENOMEM;
3933 3934 3935
		goto failed_mount4;
	}

3936
	/*
3937
	 * The jbd2_journal_load will have done any necessary log recovery,
3938 3939 3940
	 * so we can safely mount the rest of the filesystem now.
	 */

3941 3942
	root = ext4_iget(sb, EXT4_ROOT_INO);
	if (IS_ERR(root)) {
3943
		ext4_msg(sb, KERN_ERR, "get root inode failed");
3944
		ret = PTR_ERR(root);
3945
		root = NULL;
3946 3947 3948
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
3949
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
Al Viro's avatar
Al Viro committed
3950
		iput(root);
3951 3952
		goto failed_mount4;
	}
3953
	sb->s_root = d_make_root(root);
3954
	if (!sb->s_root) {
3955
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
3956 3957 3958
		ret = -ENOMEM;
		goto failed_mount4;
	}
3959

3960 3961
	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
		sb->s_flags |= MS_RDONLY;
Kalpak Shah's avatar
Kalpak Shah committed
3962 3963 3964 3965 3966

	/* determine the minimum size of new large inodes, if present */
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						     EXT4_GOOD_OLD_INODE_SIZE;
3967
		if (ext4_has_feature_extra_isize(sb)) {
Kalpak Shah's avatar
Kalpak Shah committed
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						       EXT4_GOOD_OLD_INODE_SIZE;
3983 3984
		ext4_msg(sb, KERN_INFO, "required extra inode space not"
			 "available");
Kalpak Shah's avatar
Kalpak Shah committed
3985 3986
	}

3987
	ext4_set_resv_clusters(sb);
Lukas Czerner's avatar
Lukas Czerner committed
3988

3989 3990
	err = ext4_setup_system_zone(sb);
	if (err) {
3991
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
3992
			 "zone (%d)", err);
3993 3994 3995 3996 3997 3998 3999 4000
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4001
		goto failed_mount5;
4002 4003
	}

4004 4005 4006
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4007 4008
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4009 4010 4011
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4012 4013
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4014 4015 4016
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4017
					  ext4_count_dirs(sb), GFP_KERNEL);
4018
	if (!err)
4019 4020
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4021 4022 4023 4024 4025
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4026
	if (ext4_has_feature_flex_bg(sb))
4027 4028 4029 4030 4031 4032 4033
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4034 4035
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4036
		goto failed_mount6;
4037

4038
	err = ext4_register_sysfs(sb);
4039 4040
	if (err)
		goto failed_mount7;
Theodore Ts'o's avatar
Theodore Ts'o committed
4041

4042 4043
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4044
	if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
4045 4046 4047 4048 4049 4050
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4051 4052 4053
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4054
	if (needs_recovery) {
4055
		ext4_msg(sb, KERN_INFO, "recovery complete");
4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4068 4069 4070 4071 4072 4073 4074 4075
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4076 4077
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4078 4079 4080
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4081
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4082

4083 4084
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4085

4086 4087 4088 4089 4090
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4091
	kfree(orig_data);
4092 4093
	return 0;

4094
cantfind_ext4:
4095
	if (!silent)
4096
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4097 4098
	goto failed_mount;

4099 4100
#ifdef CONFIG_QUOTA
failed_mount8:
4101
	ext4_unregister_sysfs(sb);
4102
#endif
4103 4104 4105
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4106
	ext4_mb_release(sb);
4107
	if (sbi->s_flex_groups)
Al Viro's avatar
Al Viro committed
4108
		kvfree(sbi->s_flex_groups);
4109 4110 4111 4112
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4113
failed_mount5:
4114 4115 4116
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
Al Viro's avatar
Al Viro committed
4117
	dput(sb->s_root);
4118
	sb->s_root = NULL;
Al Viro's avatar
Al Viro committed
4119
failed_mount4:
4120
	ext4_msg(sb, KERN_ERR, "mount failed");
4121 4122
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4123
failed_mount_wq:
4124 4125 4126 4127
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4128
failed_mount3a:
4129
	ext4_es_unregister_shrinker(sbi);
4130
failed_mount3:
4131
	del_timer_sync(&sbi->s_err_report);
4132 4133
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4134 4135 4136
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
4137
	kvfree(sbi->s_group_desc);
4138
failed_mount:
4139 4140
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4141
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
4142
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4143 4144
		kfree(sbi->s_qf_names[i]);
#endif
4145
	ext4_blkdev_remove(sbi);
4146 4147
	brelse(bh);
out_fail:
4148
	/* sb->s_user_ns will be put when sb is destroyed */
4149
	sb->s_fs_info = NULL;
4150
	kfree(sbi->s_blockgroup_lock);
4151
out_free_base:
4152
	kfree(sbi);
4153
	kfree(orig_data);
4154
	return err ? err : ret;
4155 4156 4157 4158 4159 4160 4161
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4162
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4163
{
4164
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4165

4166 4167 4168
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4169

4170
	write_lock(&journal->j_state_lock);
4171
	if (test_opt(sb, BARRIER))
4172
		journal->j_flags |= JBD2_BARRIER;
4173
	else
4174
		journal->j_flags &= ~JBD2_BARRIER;
4175 4176 4177 4178
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4179
	write_unlock(&journal->j_state_lock);
4180 4181
}

4182
static journal_t *ext4_get_journal(struct super_block *sb,
4183 4184 4185 4186 4187
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

4188
	BUG_ON(!ext4_has_feature_journal(sb));
4189

4190 4191 4192 4193
	/* First, test for the existence of a valid inode on disk.  Bad
	 * things happen if we iget() an unused inode, as the subsequent
	 * iput() will try to delete it. */

4194 4195
	journal_inode = ext4_iget(sb, journal_inum);
	if (IS_ERR(journal_inode)) {
4196
		ext4_msg(sb, KERN_ERR, "no journal found");
4197 4198 4199 4200 4201
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4202
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4203 4204 4205
		return NULL;
	}

4206
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4207
		  journal_inode, journal_inode->i_size);
4208
	if (!S_ISREG(journal_inode->i_mode)) {
4209
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4210 4211 4212 4213
		iput(journal_inode);
		return NULL;
	}

4214
	journal = jbd2_journal_init_inode(journal_inode);
4215
	if (!journal) {
4216
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4217 4218 4219 4220
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4221
	ext4_init_journal_params(sb, journal);
4222 4223 4224
	return journal;
}

4225
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4226 4227
				       dev_t j_dev)
{
4228
	struct buffer_head *bh;
4229
	journal_t *journal;
4230 4231
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4232
	int hblock, blocksize;
4233
	ext4_fsblk_t sb_block;
4234
	unsigned long offset;
4235
	struct ext4_super_block *es;
4236 4237
	struct block_device *bdev;

4238
	BUG_ON(!ext4_has_feature_journal(sb));
4239

4240
	bdev = ext4_blkdev_get(j_dev, sb);
4241 4242 4243 4244
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4245
	hblock = bdev_logical_block_size(bdev);
4246
	if (blocksize < hblock) {
4247 4248
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4249 4250 4251
		goto out_bdev;
	}

4252 4253
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4254 4255
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4256 4257
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4258 4259 4260
		goto out_bdev;
	}

4261
	es = (struct ext4_super_block *) (bh->b_data + offset);
4262
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4263
	    !(le32_to_cpu(es->s_feature_incompat) &
4264
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4265 4266
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4267 4268 4269 4270
		brelse(bh);
		goto out_bdev;
	}

4271 4272 4273 4274 4275 4276 4277 4278 4279
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4280
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4281
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4282 4283 4284 4285
		brelse(bh);
		goto out_bdev;
	}

Laurent Vivier's avatar
Laurent Vivier committed
4286
	len = ext4_blocks_count(es);
4287 4288 4289
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4290
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4291 4292
					start, len, blocksize);
	if (!journal) {
4293
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4294 4295 4296
		goto out_bdev;
	}
	journal->j_private = sb;
4297
	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4298 4299
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4300
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4301 4302 4303
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4304 4305
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4306 4307 4308
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4309 4310
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4311
	return journal;
4312

4313
out_journal:
4314
	jbd2_journal_destroy(journal);
4315
out_bdev:
4316
	ext4_blkdev_put(bdev);
4317 4318 4319
	return NULL;
}

4320 4321
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4322 4323 4324 4325 4326 4327 4328 4329
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4330
	BUG_ON(!ext4_has_feature_journal(sb));
4331

4332 4333
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4334 4335
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4347
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4348
		if (sb->s_flags & MS_RDONLY) {
4349 4350
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4351
			if (really_read_only) {
4352 4353
				ext4_msg(sb, KERN_ERR, "write access "
					"unavailable, cannot proceed");
4354 4355
				return -EROFS;
			}
4356 4357
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4358 4359 4360 4361
		}
	}

	if (journal_inum && journal_dev) {
4362 4363
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4364 4365 4366 4367
		return -EINVAL;
	}

	if (journal_inum) {
4368
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4369 4370
			return -EINVAL;
	} else {
4371
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4372 4373 4374
			return -EINVAL;
	}

4375
	if (!(journal->j_flags & JBD2_BARRIER))
4376
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4377

4378
	if (!ext4_has_feature_journal_needs_recovery(sb))
4379
		err = jbd2_journal_wipe(journal, !really_read_only);
4380 4381 4382 4383 4384
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4385
		err = jbd2_journal_load(journal);
4386 4387 4388 4389 4390
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4391 4392

	if (err) {
4393
		ext4_msg(sb, KERN_ERR, "error loading journal");
4394
		jbd2_journal_destroy(journal);
4395 4396 4397
		return err;
	}

4398 4399
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4400

4401
	if (!really_read_only && journal_devnum &&
4402 4403 4404 4405
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4406
		ext4_commit_super(sb, 1);
4407 4408 4409 4410 4411
	}

	return 0;
}

4412
static int ext4_commit_super(struct super_block *sb, int sync)
4413
{
4414
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4415
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4416
	int error = 0;
4417

4418
	if (!sbh || block_device_ejected(sb))
4419
		return error;
4420 4421 4422 4423 4424 4425 4426 4427 4428
	if (buffer_write_io_error(sbh)) {
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
4429 4430
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
4431 4432 4433
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
	if (!(sb->s_flags & MS_RDONLY))
		es->s_wtime = cpu_to_le32(get_seconds());
4446 4447 4448
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4449 4450
			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4451 4452 4453
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4454 4455
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4456 4457
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4458 4459 4460
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4461
				&EXT4_SB(sb)->s_freeinodes_counter));
4462
	BUFFER_TRACE(sbh, "marking dirty");
4463
	ext4_superblock_csum_set(sb);
4464
	mark_buffer_dirty(sbh);
4465
	if (sync) {
4466 4467
		error = __sync_dirty_buffer(sbh,
			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
4468 4469 4470 4471 4472
		if (error)
			return error;

		error = buffer_write_io_error(sbh);
		if (error) {
4473 4474
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4475 4476 4477 4478
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4479
	return error;
4480 4481 4482 4483 4484 4485 4486
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4487 4488
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4489
{
4490
	journal_t *journal = EXT4_SB(sb)->s_journal;
4491

4492
	if (!ext4_has_feature_journal(sb)) {
4493 4494 4495
		BUG_ON(journal != NULL);
		return;
	}
4496
	jbd2_journal_lock_updates(journal);
4497 4498 4499
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4500
	if (ext4_has_feature_journal_needs_recovery(sb) &&
4501
	    sb->s_flags & MS_RDONLY) {
4502
		ext4_clear_feature_journal_needs_recovery(sb);
4503
		ext4_commit_super(sb, 1);
4504
	}
4505 4506

out:
4507
	jbd2_journal_unlock_updates(journal);
4508 4509 4510 4511 4512 4513 4514
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4515 4516
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4517 4518 4519 4520 4521
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4522
	BUG_ON(!ext4_has_feature_journal(sb));
4523

4524
	journal = EXT4_SB(sb)->s_journal;
4525 4526 4527

	/*
	 * Now check for any error status which may have been recorded in the
4528
	 * journal by a prior ext4_error() or ext4_abort()
4529 4530
	 */

4531
	j_errno = jbd2_journal_errno(journal);
4532 4533 4534
	if (j_errno) {
		char nbuf[16];

4535
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4536
		ext4_warning(sb, "Filesystem error recorded "
4537
			     "from previous mount: %s", errstr);
4538
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4539

4540 4541
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4542
		ext4_commit_super(sb, 1);
4543

4544
		jbd2_journal_clear_err(journal);
4545
		jbd2_journal_update_sb_errno(journal);
4546 4547 4548 4549 4550 4551 4552
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
4553
int ext4_force_commit(struct super_block *sb)
4554 4555 4556 4557 4558 4559
{
	journal_t *journal;

	if (sb->s_flags & MS_RDONLY)
		return 0;

4560
	journal = EXT4_SB(sb)->s_journal;
4561
	return ext4_journal_force_commit(journal);
4562 4563
}

4564
static int ext4_sync_fs(struct super_block *sb, int wait)
4565
{
4566
	int ret = 0;
4567
	tid_t target;
4568
	bool needs_barrier = false;
4569
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4570

4571
	trace_ext4_sync_fs(sb, wait);
4572
	flush_workqueue(sbi->rsv_conversion_wq);
4573 4574 4575 4576 4577
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
4578 4579 4580 4581 4582
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
4595 4596 4597 4598 4599 4600
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
4601
	}
4602 4603 4604 4605

	return ret;
}

4606 4607 4608
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
4609 4610
 *
 * Note that only this function cannot bring a filesystem to be in a clean
4611 4612
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
4613
 */
4614
static int ext4_freeze(struct super_block *sb)
4615
{
4616 4617
	int error = 0;
	journal_t *journal;
4618

4619 4620
	if (sb->s_flags & MS_RDONLY)
		return 0;
4621

4622
	journal = EXT4_SB(sb)->s_journal;
4623

4624 4625 4626
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
4627

4628 4629 4630 4631 4632 4633 4634
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
4635 4636

		/* Journal blocked and flushed, clear needs_recovery flag. */
4637
		ext4_clear_feature_journal_needs_recovery(sb);
4638
	}
4639 4640

	error = ext4_commit_super(sb, 1);
4641
out:
4642 4643 4644
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
4645
	return error;
4646 4647 4648 4649 4650 4651
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
4652
static int ext4_unfreeze(struct super_block *sb)
4653
{
4654 4655 4656
	if (sb->s_flags & MS_RDONLY)
		return 0;

4657 4658
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
4659
		ext4_set_feature_journal_needs_recovery(sb);
4660 4661
	}

4662
	ext4_commit_super(sb, 1);
4663
	return 0;
4664 4665
}

4666 4667 4668 4669 4670
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
4671
	unsigned long s_mount_opt2;
4672 4673
	kuid_t s_resuid;
	kgid_t s_resgid;
4674 4675 4676 4677
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4678
	char *s_qf_names[EXT4_MAXQUOTAS];
4679 4680 4681
#endif
};

4682
static int ext4_remount(struct super_block *sb, int *flags, char *data)
4683
{
4684
	struct ext4_super_block *es;
4685
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4686
	unsigned long old_sb_flags;
4687
	struct ext4_mount_options old_opts;
4688
	int enable_quota = 0;
4689
	ext4_group_t g;
4690
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4691
	int err = 0;
4692
#ifdef CONFIG_QUOTA
4693
	int i, j;
4694
#endif
4695
	char *orig_data = kstrdup(data, GFP_KERNEL);
4696 4697 4698 4699

	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
4700
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4701 4702 4703
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
4704 4705
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
4706 4707
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4708
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4709 4710 4711 4712 4713 4714
		if (sbi->s_qf_names[i]) {
			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
							 GFP_KERNEL);
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
4715
				kfree(orig_data);
4716 4717 4718 4719
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
4720
#endif
4721 4722
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4723

4724
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4725 4726 4727 4728
		err = -EINVAL;
		goto restore_opts;
	}

4729
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4730 4731
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4732 4733
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4734 4735
	}

4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
	}

	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
4761 4762
	}

4763
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4764
		ext4_abort(sb, "Abort forced by user");
4765 4766

	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
4767
		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
4768 4769 4770

	es = sbi->s_es;

4771
	if (sbi->s_journal) {
4772
		ext4_init_journal_params(sb, sbi->s_journal);
4773 4774
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
4775

4776 4777 4778
	if (*flags & MS_LAZYTIME)
		sb->s_flags |= MS_LAZYTIME;

4779
	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
4780
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
4781 4782 4783 4784 4785
			err = -EROFS;
			goto restore_opts;
		}

		if (*flags & MS_RDONLY) {
4786 4787 4788
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
4789 4790
			err = dquot_suspend(sb, -1);
			if (err < 0)
4791 4792
				goto restore_opts;

4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
			sb->s_flags |= MS_RDONLY;

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
4804 4805
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
4806 4807
				es->s_state = cpu_to_le16(sbi->s_mount_state);

4808
			if (sbi->s_journal)
4809
				ext4_mark_recovery_complete(sb, es);
4810
		} else {
4811
			/* Make sure we can mount this feature set readwrite */
4812
			if (ext4_has_feature_readonly(sb) ||
4813
			    !ext4_feature_set_ok(sb, 0)) {
4814 4815 4816
				err = -EROFS;
				goto restore_opts;
			}
4817 4818
			/*
			 * Make sure the group descriptor checksums
4819
			 * are sane.  If they aren't, refuse to remount r/w.
4820 4821 4822 4823 4824
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

4825
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
4826 4827
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
4828
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
4829
					       le16_to_cpu(gdp->bg_checksum));
4830
					err = -EFSBADCRC;
4831 4832 4833 4834
					goto restore_opts;
				}
			}

4835 4836 4837 4838 4839 4840
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
4841
				ext4_msg(sb, KERN_WARNING, "Couldn't "
4842 4843
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
4844
				       "umount/remount instead");
4845 4846 4847 4848
				err = -EINVAL;
				goto restore_opts;
			}

4849 4850 4851 4852 4853 4854
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
4855 4856
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
4857
			sbi->s_mount_state = le16_to_cpu(es->s_state);
4858
			if (!ext4_setup_super(sb, es, 0))
4859
				sb->s_flags &= ~MS_RDONLY;
4860
			if (ext4_has_feature_mmp(sb))
4861 4862 4863 4864 4865
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
4866
			enable_quota = 1;
4867 4868
		}
	}
4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
	if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

4882
	ext4_setup_system_zone(sb);
4883
	if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
4884
		ext4_commit_super(sb, 1);
4885

4886 4887
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
Jan Kara's avatar
Jan Kara committed
4888
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4889
		kfree(old_opts.s_qf_names[i]);
4890 4891 4892
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
4893
		else if (ext4_has_feature_quota(sb)) {
4894
			err = ext4_enable_quotas(sb);
4895
			if (err)
4896 4897 4898
				goto restore_opts;
		}
	}
4899
#endif
4900

4901
	*flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
4902 4903
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
4904
	return 0;
4905

4906 4907 4908
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
4909
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
4910 4911 4912
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
4913 4914
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
4915 4916
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4917
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
4918
		kfree(sbi->s_qf_names[i]);
4919 4920 4921
		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
	}
#endif
4922
	kfree(orig_data);
4923 4924 4925
	return err;
}

4926
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4927 4928
{
	struct super_block *sb = dentry->d_sb;
4929 4930
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
Lukas Czerner's avatar
Lukas Czerner committed
4931
	ext4_fsblk_t overhead = 0, resv_blocks;
Pekka Enberg's avatar
Pekka Enberg committed
4932
	u64 fsid;
4933
	s64 bfree;
Lukas Czerner's avatar
Lukas Czerner committed
4934
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
4935

4936 4937
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
4938

4939
	buf->f_type = EXT4_SUPER_MAGIC;
4940
	buf->f_bsize = sb->s_blocksize;
4941
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
4942 4943
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
4944
	/* prevent underflow in case that few free space is available */
4945
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
Lukas Czerner's avatar
Lukas Czerner committed
4946 4947 4948
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
4949 4950
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
4951
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
4952
	buf->f_namelen = EXT4_NAME_LEN;
Pekka Enberg's avatar
Pekka Enberg committed
4953 4954 4955 4956
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
4957

4958 4959 4960
	return 0;
}

4961 4962
/* Helper function for writing quotas on sync - we need to start transaction
 * before quota file is locked for write. Otherwise the are possible deadlocks:
4963
 * Process 1                         Process 2
4964
 * ext4_create()                     quota_sync()
4965
 *   jbd2_journal_start()                  write_dquot()
4966
 *   dquot_initialize()                         down(dqio_mutex)
4967
 *     down(dqio_mutex)                    jbd2_journal_start()
4968 4969 4970 4971 4972 4973 4974
 *
 */

#ifdef CONFIG_QUOTA

static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
4975
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
4976 4977
}

4978
static int ext4_write_dquot(struct dquot *dquot)
4979 4980 4981 4982 4983 4984
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
4985
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
4986
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
4987 4988 4989
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
4990
	err = ext4_journal_stop(handle);
4991 4992 4993 4994 4995
	if (!ret)
		ret = err;
	return ret;
}

4996
static int ext4_acquire_dquot(struct dquot *dquot)
4997 4998 4999 5000
{
	int ret, err;
	handle_t *handle;

5001
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5002
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5003 5004 5005
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5006
	err = ext4_journal_stop(handle);
5007 5008 5009 5010 5011
	if (!ret)
		ret = err;
	return ret;
}

5012
static int ext4_release_dquot(struct dquot *dquot)
5013 5014 5015 5016
{
	int ret, err;
	handle_t *handle;

5017
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5018
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
Jan Kara's avatar
Jan Kara committed
5019 5020 5021
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5022
		return PTR_ERR(handle);
Jan Kara's avatar
Jan Kara committed
5023
	}
5024
	ret = dquot_release(dquot);
5025
	err = ext4_journal_stop(handle);
5026 5027 5028 5029 5030
	if (!ret)
		ret = err;
	return ret;
}

5031
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5032
{
5033 5034 5035
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5036
	/* Are we journaling quotas? */
5037
	if (ext4_has_feature_quota(sb) ||
5038
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5039
		dquot_mark_dquot_dirty(dquot);
5040
		return ext4_write_dquot(dquot);
5041 5042 5043 5044 5045
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5046
static int ext4_write_info(struct super_block *sb, int type)
5047 5048 5049 5050 5051
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5052
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5053 5054 5055
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5056
	err = ext4_journal_stop(handle);
5057 5058 5059 5060 5061 5062 5063 5064 5065
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5066
static int ext4_quota_on_mount(struct super_block *sb, int type)
5067
{
5068 5069
	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
					EXT4_SB(sb)->s_jquota_fmt, type);
5070 5071
}

5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5086 5087 5088
/*
 * Standard function to be called on quota_on
 */
5089
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5090
			 struct path *path)
5091 5092 5093 5094 5095
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5096

5097
	/* Quotafile not on the same filesystem? */
5098
	if (path->dentry->d_sb != sb)
5099
		return -EXDEV;
5100 5101
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5102
		/* Quotafile not in fs root? */
5103
		if (path->dentry->d_parent != sb->s_root)
5104 5105 5106
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5107
	}
5108 5109 5110 5111 5112

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5113
	if (EXT4_SB(sb)->s_journal &&
5114
	    ext4_should_journal_data(d_inode(path->dentry))) {
5115 5116 5117 5118 5119
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5120
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5121
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5122
		if (err)
5123
			return err;
5124
	}
5125 5126 5127 5128 5129 5130
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
	if (err)
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
	return err;
5131 5132
}

5133 5134 5135 5136 5137
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
Jan Kara's avatar
Jan Kara committed
5138
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5139 5140 5141 5142
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
	};

5143
	BUG_ON(!ext4_has_feature_quota(sb));
5144 5145 5146 5147 5148 5149 5150 5151 5152 5153

	if (!qf_inums[type])
		return -EPERM;

	qf_inode = ext4_iget(sb, qf_inums[type]);
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

5154 5155
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5156
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5157 5158
	err = dquot_enable(qf_inode, type, format_id, flags);
	iput(qf_inode);
5159 5160
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5161 5162 5163 5164 5165 5166 5167 5168

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
Jan Kara's avatar
Jan Kara committed
5169
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5170 5171 5172 5173 5174
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
	};

	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
Jan Kara's avatar
Jan Kara committed
5175
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5176 5177 5178 5179
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
						DQUOT_USAGE_ENABLED);
			if (err) {
5180 5181 5182
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

5183
				ext4_warning(sb,
5184 5185 5186
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5187 5188 5189 5190 5191 5192 5193
				return err;
			}
		}
	}
	return 0;
}

5194 5195
static int ext4_quota_off(struct super_block *sb, int type)
{
5196 5197 5198
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;

5199 5200 5201
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5202 5203
		sync_filesystem(sb);

5204 5205 5206
	if (!inode)
		goto out;

5207 5208
	/* Update modification times of quota files when userspace can
	 * start looking at them */
5209
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5210 5211 5212 5213 5214 5215 5216
	if (IS_ERR(handle))
		goto out;
	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);

out:
5217 5218 5219
	return dquot_quota_off(sb, type);
}

5220 5221
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
Lucas De Marchi's avatar
Lucas De Marchi committed
5222
 * itself serializes the operations (and no one else should touch the files)
5223
 * we don't have to be afraid of races */
5224
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5225 5226 5227
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5228
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5243 5244 5245
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5261
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5262 5263 5264
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5265
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5266
	int err, offset = off & (sb->s_blocksize - 1);
5267
	int retries = 0;
5268 5269 5270
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5271
	if (EXT4_SB(sb)->s_journal && !handle) {
5272 5273
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
Jan Kara's avatar
Jan Kara committed
5274 5275 5276
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5288 5289 5290 5291 5292 5293
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5294 5295
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5296 5297
	if (!bh)
		goto out;
5298
	BUFFER_TRACE(bh, "get write access");
5299 5300 5301
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5302
		return err;
5303
	}
5304 5305 5306 5307
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5308
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5309
	brelse(bh);
5310
out:
5311 5312
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5313
		EXT4_I(inode)->i_disksize = inode->i_size;
5314
		ext4_mark_inode_dirty(handle, inode);
5315
	}
5316
	return len;
5317 5318 5319 5320
}

#endif

Al Viro's avatar
Al Viro committed
5321 5322
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5323
{
Al Viro's avatar
Al Viro committed
5324
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5325 5326
}

Jan Kara's avatar
Jan Kara committed
5327
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5340 5341 5342

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5343
	if (ext4_has_unknown_ext2_incompat_features(sb))
5344 5345 5346
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5347
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5348 5349 5350
		return 0;
	return 1;
}
5351 5352 5353
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5354
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5369 5370 5371

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5372
	if (ext4_has_unknown_ext3_incompat_features(sb))
5373
		return 0;
5374
	if (!ext4_has_feature_journal(sb))
5375 5376 5377
		return 0;
	if (sb->s_flags & MS_RDONLY)
		return 1;
5378
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5379 5380 5381
		return 0;
	return 1;
}
5382

Theodore Ts'o's avatar
Theodore Ts'o committed
5383 5384 5385
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
Al Viro's avatar
Al Viro committed
5386
	.mount		= ext4_mount,
Theodore Ts'o's avatar
Theodore Ts'o committed
5387
	.kill_sb	= kill_block_super,
5388
	.fs_flags	= FS_REQUIRES_DEV | FS_USERNS_MOUNT,
Theodore Ts'o's avatar
Theodore Ts'o committed
5389
};
5390
MODULE_ALIAS_FS("ext4");
Theodore Ts'o's avatar
Theodore Ts'o committed
5391

5392 5393 5394 5395
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];

5396
static int __init ext4_init_fs(void)
5397
{
5398
	int i, err;
5399

5400
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5401 5402 5403
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5404
	/* Build-time check for flags consistency */
5405
	ext4_check_flag_values();
5406 5407 5408 5409 5410 5411

	for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
		mutex_init(&ext4__aio_mutex[i]);
		init_waitqueue_head(&ext4__ioend_wq[i]);
	}

5412
	err = ext4_init_es();
5413 5414
	if (err)
		return err;
5415 5416 5417

	err = ext4_init_pageio();
	if (err)
5418
		goto out5;
5419

5420
	err = ext4_init_system_zone();
5421
	if (err)
5422
		goto out4;
5423

5424
	err = ext4_init_sysfs();
5425
	if (err)
5426
		goto out3;
5427

5428
	err = ext4_init_mballoc();
5429 5430
	if (err)
		goto out2;
5431 5432
	else
		ext4_mballoc_ready = 1;
5433 5434 5435
	err = init_inodecache();
	if (err)
		goto out1;
5436
	register_as_ext3();
5437
	register_as_ext2();
Theodore Ts'o's avatar
Theodore Ts'o committed
5438
	err = register_filesystem(&ext4_fs_type);
5439 5440
	if (err)
		goto out;
5441

5442 5443
	return 0;
out:
5444 5445
	unregister_as_ext2();
	unregister_as_ext3();
5446 5447
	destroy_inodecache();
out1:
5448
	ext4_mballoc_ready = 0;
5449
	ext4_exit_mballoc();
5450
out2:
5451 5452
	ext4_exit_sysfs();
out3:
5453
	ext4_exit_system_zone();
5454
out4:
5455
	ext4_exit_pageio();
5456
out5:
5457 5458
	ext4_exit_es();

5459 5460 5461
	return err;
}

5462
static void __exit ext4_exit_fs(void)
5463
{
5464
	ext4_exit_crypto();
5465
	ext4_destroy_lazyinit_thread();
5466 5467
	unregister_as_ext2();
	unregister_as_ext3();
Theodore Ts'o's avatar
Theodore Ts'o committed
5468
	unregister_filesystem(&ext4_fs_type);
5469
	destroy_inodecache();
5470
	ext4_exit_mballoc();
5471
	ext4_exit_sysfs();
5472 5473
	ext4_exit_system_zone();
	ext4_exit_pageio();
5474
	ext4_exit_es();
5475 5476 5477
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5478
MODULE_DESCRIPTION("Fourth Extended Filesystem");
5479
MODULE_LICENSE("GPL");
5480 5481
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)