super.c 167 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/super.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  from
 *
 *  linux/fs/minix/inode.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/time.h>
24
#include <linux/vmalloc.h>
25 26 27
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
28
#include <linux/backing-dev.h>
29 30
#include <linux/parser.h>
#include <linux/buffer_head.h>
31
#include <linux/exportfs.h>
32 33 34 35 36 37
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
Theodore Ts'o's avatar
Theodore Ts'o committed
38
#include <linux/ctype.h>
Vignesh Babu's avatar
Vignesh Babu committed
39
#include <linux/log2.h>
40
#include <linux/crc16.h>
41
#include <linux/dax.h>
Dan Magenheimer's avatar
Dan Magenheimer committed
42
#include <linux/cleancache.h>
43
#include <linux/uaccess.h>
44
#include <linux/iversion.h>
45

46 47 48
#include <linux/kthread.h>
#include <linux/freezer.h>

49
#include "ext4.h"
50
#include "ext4_extents.h"	/* Needed for trace points definition */
51
#include "ext4_jbd2.h"
52 53
#include "xattr.h"
#include "acl.h"
54
#include "mballoc.h"
55
#include "fsmap.h"
56

57 58 59
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>

60 61
static struct ext4_lazy_init *ext4_li_info;
static struct mutex ext4_li_mtx;
62
static struct ratelimit_state ext4_mount_msg_ratelimit;
63

64
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
65
			     unsigned long journal_devnum);
66
static int ext4_show_options(struct seq_file *seq, struct dentry *root);
67
static int ext4_commit_super(struct super_block *sb, int sync);
68 69 70 71
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es);
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es);
72
static int ext4_sync_fs(struct super_block *sb, int wait);
73 74
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
75 76
static int ext4_unfreeze(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
Al Viro's avatar
Al Viro committed
77 78
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data);
79 80
static inline int ext2_feature_set_ok(struct super_block *sb);
static inline int ext3_feature_set_ok(struct super_block *sb);
81
static int ext4_feature_set_ok(struct super_block *sb, int readonly);
82 83
static void ext4_destroy_lazyinit_thread(void);
static void ext4_unregister_li_request(struct super_block *sb);
84
static void ext4_clear_request_list(void);
85 86
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					    unsigned int journal_inum);
87

Jan Kara's avatar
Jan Kara committed
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * Lock ordering
 *
 * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
 * i_mmap_rwsem (inode->i_mmap_rwsem)!
 *
 * page fault path:
 * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
 *   page lock -> i_data_sem (rw)
 *
 * buffered write path:
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> page lock ->
 *   i_data_sem (rw)
 *
 * truncate:
104 105 106
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> i_mmap_rwsem (w) -> page lock
 * sb_start_write -> i_mutex -> i_mmap_sem (w) -> transaction start ->
 *   i_data_sem (rw)
Jan Kara's avatar
Jan Kara committed
107 108
 *
 * direct IO:
109 110
 * sb_start_write -> i_mutex -> mmap_sem
 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
Jan Kara's avatar
Jan Kara committed
111 112 113 114 115
 *
 * writepages:
 * transaction start -> page lock(s) -> i_data_sem (rw)
 */

Jan Kara's avatar
Jan Kara committed
116
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
117 118 119 120 121 122 123
static struct file_system_type ext2_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext2",
	.mount		= ext4_mount,
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
124
MODULE_ALIAS_FS("ext2");
125
MODULE_ALIAS("ext2");
126 127 128 129 130 131
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif


132 133 134
static struct file_system_type ext3_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext3",
Al Viro's avatar
Al Viro committed
135
	.mount		= ext4_mount,
136 137 138
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
139
MODULE_ALIAS_FS("ext3");
140
MODULE_ALIAS("ext3");
141
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
Laurent Vivier's avatar
Laurent Vivier committed
142

143 144 145
static int ext4_verify_csum_type(struct super_block *sb,
				 struct ext4_super_block *es)
{
146
	if (!ext4_has_feature_metadata_csum(sb))
147 148 149 150 151
		return 1;

	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
}

152 153 154 155 156 157 158 159 160 161 162 163
static __le32 ext4_superblock_csum(struct super_block *sb,
				   struct ext4_super_block *es)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int offset = offsetof(struct ext4_super_block, s_checksum);
	__u32 csum;

	csum = ext4_chksum(sbi, ~0, (char *)es, offset);

	return cpu_to_le32(csum);
}

164 165
static int ext4_superblock_csum_verify(struct super_block *sb,
				       struct ext4_super_block *es)
166
{
167
	if (!ext4_has_metadata_csum(sb))
168 169 170 171 172
		return 1;

	return es->s_checksum == ext4_superblock_csum(sb, es);
}

173
void ext4_superblock_csum_set(struct super_block *sb)
174
{
175 176
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

177
	if (!ext4_has_metadata_csum(sb))
178 179 180 181 182
		return;

	es->s_checksum = ext4_superblock_csum(sb, es);
}

183 184 185 186
void *ext4_kvmalloc(size_t size, gfp_t flags)
{
	void *ret;

187
	ret = kmalloc(size, flags | __GFP_NOWARN);
188 189 190 191 192 193 194 195 196
	if (!ret)
		ret = __vmalloc(size, flags, PAGE_KERNEL);
	return ret;
}

void *ext4_kvzalloc(size_t size, gfp_t flags)
{
	void *ret;

197
	ret = kzalloc(size, flags | __GFP_NOWARN);
198 199 200 201 202
	if (!ret)
		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
	return ret;
}

203 204
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
205
{
206
	return le32_to_cpu(bg->bg_block_bitmap_lo) |
207
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
208
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
209 210
}

211 212
ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
			       struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
213
{
214
	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
215
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
216
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
217 218
}

219 220
ext4_fsblk_t ext4_inode_table(struct super_block *sb,
			      struct ext4_group_desc *bg)
Laurent Vivier's avatar
Laurent Vivier committed
221
{
222
	return le32_to_cpu(bg->bg_inode_table_lo) |
223
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
224
		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
Laurent Vivier's avatar
Laurent Vivier committed
225 226
}

227 228
__u32 ext4_free_group_clusters(struct super_block *sb,
			       struct ext4_group_desc *bg)
229 230 231
{
	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
232
		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
233 234 235 236 237 238 239
}

__u32 ext4_free_inodes_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
240
		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
241 242 243 244 245 246 247
}

__u32 ext4_used_dirs_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
248
		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
249 250 251 252 253 254 255
}

__u32 ext4_itable_unused_count(struct super_block *sb,
			      struct ext4_group_desc *bg)
{
	return le16_to_cpu(bg->bg_itable_unused_lo) |
		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
256
		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
257 258
}

259 260
void ext4_block_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
261
{
262
	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
263 264
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
265 266
}

267 268
void ext4_inode_bitmap_set(struct super_block *sb,
			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
269
{
270
	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
271 272
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
273 274
}

275 276
void ext4_inode_table_set(struct super_block *sb,
			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
Laurent Vivier's avatar
Laurent Vivier committed
277
{
278
	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
279 280
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
Laurent Vivier's avatar
Laurent Vivier committed
281 282
}

283 284
void ext4_free_group_clusters_set(struct super_block *sb,
				  struct ext4_group_desc *bg, __u32 count)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
{
	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
}

void ext4_free_inodes_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
}

void ext4_used_dirs_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
}

void ext4_itable_unused_set(struct super_block *sb,
			  struct ext4_group_desc *bg, __u32 count)
{
	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}

315

316 317 318 319 320 321
static void __save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
322 323
	if (bdev_read_only(sb->s_bdev))
		return;
324 325 326 327 328 329 330 331 332 333 334 335
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
	es->s_last_error_line = cpu_to_le32(line);
	if (!es->s_first_error_time) {
		es->s_first_error_time = es->s_last_error_time;
		strncpy(es->s_first_error_func, func,
			sizeof(es->s_first_error_func));
		es->s_first_error_line = cpu_to_le32(line);
		es->s_first_error_ino = es->s_last_error_ino;
		es->s_first_error_block = es->s_last_error_block;
	}
336 337 338 339 340 341
	/*
	 * Start the daily error reporting function if it hasn't been
	 * started already
	 */
	if (!es->s_error_count)
		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
342
	le32_add_cpu(&es->s_error_count, 1);
343 344 345 346 347 348 349 350 351
}

static void save_error_info(struct super_block *sb, const char *func,
			    unsigned int line)
{
	__save_error_info(sb, func, line);
	ext4_commit_super(sb, 1);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
/*
 * The del_gendisk() function uninitializes the disk-specific data
 * structures, including the bdi structure, without telling anyone
 * else.  Once this happens, any attempt to call mark_buffer_dirty()
 * (for example, by ext4_commit_super), will cause a kernel OOPS.
 * This is a kludge to prevent these oops until we can put in a proper
 * hook in del_gendisk() to inform the VFS and file system layers.
 */
static int block_device_ejected(struct super_block *sb)
{
	struct inode *bd_inode = sb->s_bdev->bd_inode;
	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);

	return bdi->dev == NULL;
}

Bobi Jam's avatar
Bobi Jam committed
368 369 370 371 372
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
	struct super_block		*sb = journal->j_private;
	struct ext4_sb_info		*sbi = EXT4_SB(sb);
	int				error = is_journal_aborted(journal);
373
	struct ext4_journal_cb_entry	*jce;
Bobi Jam's avatar
Bobi Jam committed
374

375
	BUG_ON(txn->t_state == T_FINISHED);
376 377 378

	ext4_process_freed_data(sb, txn->t_tid);

Bobi Jam's avatar
Bobi Jam committed
379
	spin_lock(&sbi->s_md_lock);
380 381 382
	while (!list_empty(&txn->t_private_list)) {
		jce = list_entry(txn->t_private_list.next,
				 struct ext4_journal_cb_entry, jce_list);
Bobi Jam's avatar
Bobi Jam committed
383 384 385 386 387 388 389
		list_del_init(&jce->jce_list);
		spin_unlock(&sbi->s_md_lock);
		jce->jce_func(sb, jce, error);
		spin_lock(&sbi->s_md_lock);
	}
	spin_unlock(&sbi->s_md_lock);
}
390

391 392 393 394
/* Deal with the reporting of failure conditions on a filesystem such as
 * inconsistencies detected or read IO failures.
 *
 * On ext2, we can store the error state of the filesystem in the
395
 * superblock.  That is not possible on ext4, because we may have other
396 397 398 399 400
 * write ordering constraints on the superblock which prevent us from
 * writing it out straight away; and given that the journal is about to
 * be aborted, we can't rely on the current, or future, transactions to
 * write out the superblock safely.
 *
401
 * We'll just use the jbd2_journal_abort() error code to record an error in
402
 * the journal instead.  On recovery, the journal will complain about
403 404 405
 * that error until we've noted it down and cleared it.
 */

406
static void ext4_handle_error(struct super_block *sb)
407
{
408
	if (sb_rdonly(sb))
409 410
		return;

411
	if (!test_opt(sb, ERRORS_CONT)) {
412
		journal_t *journal = EXT4_SB(sb)->s_journal;
413

414
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
415
		if (journal)
416
			jbd2_journal_abort(journal, -EIO);
417
	}
418
	if (test_opt(sb, ERRORS_RO)) {
419
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
420 421 422 423 424
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
425
		sb->s_flags |= SB_RDONLY;
426
	}
427 428 429 430
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
431
		panic("EXT4-fs (device %s): panic forced after error\n",
432
			sb->s_id);
433
	}
434 435
}

436 437 438 439
#define ext4_error_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
			     "EXT4-fs error")

440
void __ext4_error(struct super_block *sb, const char *function,
441
		  unsigned int line, const char *fmt, ...)
442
{
Joe Perches's avatar
Joe Perches committed
443
	struct va_format vaf;
444 445
	va_list args;

446 447 448
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

449
	trace_ext4_error(sb, function, line);
450 451 452 453 454 455 456 457 458
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT
		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
		       sb->s_id, function, line, current->comm, &vaf);
		va_end(args);
	}
459
	save_error_info(sb, function, line);
460
	ext4_handle_error(sb);
461 462
}

463 464 465
void __ext4_error_inode(struct inode *inode, const char *function,
			unsigned int line, ext4_fsblk_t block,
			const char *fmt, ...)
466 467
{
	va_list args;
468
	struct va_format vaf;
469
	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
470

471 472 473
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

474
	trace_ext4_error(inode->i_sb, function, line);
475 476
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
	es->s_last_error_block = cpu_to_le64(block);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	if (ext4_error_ratelimit(inode->i_sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: block %llu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, &vaf);
		else
			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
			       "inode #%lu: comm %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, &vaf);
		va_end(args);
	}
493
	save_error_info(inode->i_sb, function, line);
494 495 496
	ext4_handle_error(inode->i_sb);
}

497 498 499
void __ext4_error_file(struct file *file, const char *function,
		       unsigned int line, ext4_fsblk_t block,
		       const char *fmt, ...)
500 501
{
	va_list args;
502
	struct va_format vaf;
503
	struct ext4_super_block *es;
Al Viro's avatar
Al Viro committed
504
	struct inode *inode = file_inode(file);
505 506
	char pathname[80], *path;

507 508 509
	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
		return;

510
	trace_ext4_error(inode->i_sb, function, line);
511 512
	es = EXT4_SB(inode->i_sb)->s_es;
	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
513
	if (ext4_error_ratelimit(inode->i_sb)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
514
		path = file_path(file, pathname, sizeof(pathname));
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
		if (IS_ERR(path))
			path = "(unknown)";
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		if (block)
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "block %llu: comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       block, current->comm, path, &vaf);
		else
			printk(KERN_CRIT
			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
			       "comm %s: path %s: %pV\n",
			       inode->i_sb->s_id, function, line, inode->i_ino,
			       current->comm, path, &vaf);
		va_end(args);
	}
534
	save_error_info(inode->i_sb, function, line);
535 536 537
	ext4_handle_error(inode->i_sb);
}

538 539
const char *ext4_decode_error(struct super_block *sb, int errno,
			      char nbuf[16])
540 541 542 543
{
	char *errstr = NULL;

	switch (errno) {
544 545 546 547 548 549
	case -EFSCORRUPTED:
		errstr = "Corrupt filesystem";
		break;
	case -EFSBADCRC:
		errstr = "Filesystem failed CRC";
		break;
550 551 552 553 554 555 556
	case -EIO:
		errstr = "IO failure";
		break;
	case -ENOMEM:
		errstr = "Out of memory";
		break;
	case -EROFS:
557 558
		if (!sb || (EXT4_SB(sb)->s_journal &&
			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
			errstr = "Journal has aborted";
		else
			errstr = "Readonly filesystem";
		break;
	default:
		/* If the caller passed in an extra buffer for unknown
		 * errors, textualise them now.  Else we just return
		 * NULL. */
		if (nbuf) {
			/* Check for truncated error codes... */
			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
				errstr = nbuf;
		}
		break;
	}

	return errstr;
}

578
/* __ext4_std_error decodes expected errors from journaling functions
579 580
 * automatically and invokes the appropriate error response.  */

581 582
void __ext4_std_error(struct super_block *sb, const char *function,
		      unsigned int line, int errno)
583 584 585 586
{
	char nbuf[16];
	const char *errstr;

587 588 589
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

590 591 592
	/* Special case: if the error is EROFS, and we're not already
	 * inside a transaction, then there's really no point in logging
	 * an error. */
593
	if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
594 595
		return;

596 597 598 599 600
	if (ext4_error_ratelimit(sb)) {
		errstr = ext4_decode_error(sb, errno, nbuf);
		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
		       sb->s_id, function, line, errstr);
	}
601

602
	save_error_info(sb, function, line);
603
	ext4_handle_error(sb);
604 605 606
}

/*
607
 * ext4_abort is a much stronger failure handler than ext4_error.  The
608 609 610 611 612 613 614 615
 * abort function may be used to deal with unrecoverable failures such
 * as journal IO errors or ENOMEM at a critical moment in log management.
 *
 * We unconditionally force the filesystem into an ABORT|READONLY state,
 * unless the error response on the fs has been set to panic in which
 * case we take the easy way out and panic immediately.
 */

616
void __ext4_abort(struct super_block *sb, const char *function,
617
		unsigned int line, const char *fmt, ...)
618
{
619
	struct va_format vaf;
620 621
	va_list args;

622 623 624
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

625
	save_error_info(sb, function, line);
626
	va_start(args, fmt);
627 628 629 630
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
631 632
	va_end(args);

633
	if (sb_rdonly(sb) == 0) {
634 635
		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
636 637 638 639 640
		/*
		 * Make sure updated value of ->s_mount_flags will be visible
		 * before ->s_flags update
		 */
		smp_wmb();
641
		sb->s_flags |= SB_RDONLY;
642 643 644 645
		if (EXT4_SB(sb)->s_journal)
			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
		save_error_info(sb, function, line);
	}
646 647 648 649
	if (test_opt(sb, ERRORS_PANIC)) {
		if (EXT4_SB(sb)->s_journal &&
		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
			return;
650
		panic("EXT4-fs panic from previous error\n");
651
	}
652 653
}

654 655
void __ext4_msg(struct super_block *sb,
		const char *prefix, const char *fmt, ...)
656
{
Joe Perches's avatar
Joe Perches committed
657
	struct va_format vaf;
658 659
	va_list args;

660 661 662
	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
		return;

663
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
664 665 666
	vaf.fmt = fmt;
	vaf.va = &args;
	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
667 668 669
	va_end(args);
}

670 671 672 673
#define ext4_warning_ratelimit(sb)					\
		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
			     "EXT4-fs warning")

674
void __ext4_warning(struct super_block *sb, const char *function,
675
		    unsigned int line, const char *fmt, ...)
676
{
Joe Perches's avatar
Joe Perches committed
677
	struct va_format vaf;
678 679
	va_list args;

680
	if (!ext4_warning_ratelimit(sb))
681 682
		return;

683
	va_start(args, fmt);
Joe Perches's avatar
Joe Perches committed
684 685 686 687
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
	       sb->s_id, function, line, &vaf);
688 689 690
	va_end(args);
}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
void __ext4_warning_inode(const struct inode *inode, const char *function,
			  unsigned int line, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (!ext4_warning_ratelimit(inode->i_sb))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
	       function, line, inode->i_ino, current->comm, &vaf);
	va_end(args);
}

709 710 711 712
void __ext4_grp_locked_error(const char *function, unsigned int line,
			     struct super_block *sb, ext4_group_t grp,
			     unsigned long ino, ext4_fsblk_t block,
			     const char *fmt, ...)
713 714 715
__releases(bitlock)
__acquires(bitlock)
{
Joe Perches's avatar
Joe Perches committed
716
	struct va_format vaf;
717 718 719
	va_list args;
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

720 721 722
	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
		return;

723
	trace_ext4_error(sb, function, line);
724 725 726
	es->s_last_error_ino = cpu_to_le32(ino);
	es->s_last_error_block = cpu_to_le64(block);
	__save_error_info(sb, function, line);
Joe Perches's avatar
Joe Perches committed
727

728 729 730 731 732 733 734 735 736 737 738 739 740 741
	if (ext4_error_ratelimit(sb)) {
		va_start(args, fmt);
		vaf.fmt = fmt;
		vaf.va = &args;
		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
		       sb->s_id, function, line, grp);
		if (ino)
			printk(KERN_CONT "inode %lu: ", ino);
		if (block)
			printk(KERN_CONT "block %llu:",
			       (unsigned long long) block);
		printk(KERN_CONT "%pV\n", &vaf);
		va_end(args);
	}
742 743

	if (test_opt(sb, ERRORS_CONT)) {
744
		ext4_commit_super(sb, 0);
745 746
		return;
	}
747

748
	ext4_unlock_group(sb, grp);
749
	ext4_commit_super(sb, 1);
750 751 752 753 754 755 756
	ext4_handle_error(sb);
	/*
	 * We only get here in the ERRORS_RO case; relocking the group
	 * may be dangerous, but nothing bad will happen since the
	 * filesystem will have already been marked read/only and the
	 * journal has been aborted.  We return 1 as a hint to callers
	 * who might what to use the return value from
Lucas De Marchi's avatar
Lucas De Marchi committed
757
	 * ext4_grp_locked_error() to distinguish between the
758 759 760 761 762 763 764 765
	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
	 * aggressively from the ext4 function in question, with a
	 * more appropriate error code.
	 */
	ext4_lock_group(sb, grp);
	return;
}

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
				     ext4_group_t group,
				     unsigned int flags)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);

	if ((flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) &&
	    !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
		percpu_counter_sub(&sbi->s_freeclusters_counter,
					grp->bb_free);
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
			&grp->bb_state);
	}

	if ((flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) &&
	    !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
		if (gdp) {
			int count;

			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
			&grp->bb_state);
	}
}

796
void ext4_update_dynamic_rev(struct super_block *sb)
797
{
798
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
799

800
	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
801 802
		return;

803
	ext4_warning(sb,
804 805
		     "updating to rev %d because of new feature flag, "
		     "running e2fsck is recommended",
806
		     EXT4_DYNAMIC_REV);
807

808 809 810
	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
811 812 813 814 815 816 817 818 819 820 821 822 823
	/* leave es->s_feature_*compat flags alone */
	/* es->s_uuid will be set by e2fsck if empty */

	/*
	 * The rest of the superblock fields should be zero, and if not it
	 * means they are likely already in use, so leave them alone.  We
	 * can leave it up to e2fsck to clean up any inconsistencies there.
	 */
}

/*
 * Open the external journal device
 */
824
static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
825 826 827 828
{
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

829
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
830 831 832 833 834
	if (IS_ERR(bdev))
		goto fail;
	return bdev;

fail:
835
	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
836 837 838 839 840 841 842
			__bdevname(dev, b), PTR_ERR(bdev));
	return NULL;
}

/*
 * Release the journal device
 */
Al Viro's avatar
Al Viro committed
843
static void ext4_blkdev_put(struct block_device *bdev)
844
{
Al Viro's avatar
Al Viro committed
845
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
846 847
}

Al Viro's avatar
Al Viro committed
848
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
849 850 851 852
{
	struct block_device *bdev;
	bdev = sbi->journal_bdev;
	if (bdev) {
Al Viro's avatar
Al Viro committed
853
		ext4_blkdev_put(bdev);
854 855 856 857 858 859
		sbi->journal_bdev = NULL;
	}
}

static inline struct inode *orphan_list_entry(struct list_head *l)
{
860
	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
861 862
}

863
static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
864 865 866
{
	struct list_head *l;

867 868
	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
		 le32_to_cpu(sbi->s_es->s_last_orphan));
869 870 871 872 873 874 875 876 877 878 879 880

	printk(KERN_ERR "sb_info orphan list:\n");
	list_for_each(l, &sbi->s_orphan) {
		struct inode *inode = orphan_list_entry(l);
		printk(KERN_ERR "  "
		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
		       inode->i_sb->s_id, inode->i_ino, inode,
		       inode->i_mode, inode->i_nlink,
		       NEXT_ORPHAN(inode));
	}
}

881 882 883 884 885 886 887
#ifdef CONFIG_QUOTA
static int ext4_quota_off(struct super_block *sb, int type);

static inline void ext4_quota_off_umount(struct super_block *sb)
{
	int type;

888 889 890
	/* Use our quota_off function to clear inode flags etc. */
	for (type = 0; type < EXT4_MAXQUOTAS; type++)
		ext4_quota_off(sb, type);
891 892 893 894 895 896 897
}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
}
#endif

898
static void ext4_put_super(struct super_block *sb)
899
{
900 901
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
902
	int aborted = 0;
903
	int i, err;
904

905
	ext4_unregister_li_request(sb);
906
	ext4_quota_off_umount(sb);
907

908
	destroy_workqueue(sbi->rsv_conversion_wq);
909

910
	if (sbi->s_journal) {
911
		aborted = is_journal_aborted(sbi->s_journal);
912 913
		err = jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
914
		if ((err < 0) && !aborted)
915
			ext4_abort(sb, "Couldn't clean up the journal");
916
	}
917

918
	ext4_unregister_sysfs(sb);
919
	ext4_es_unregister_shrinker(sbi);
920
	del_timer_sync(&sbi->s_err_report);
921 922 923 924
	ext4_release_system_zone(sb);
	ext4_mb_release(sb);
	ext4_ext_release(sb);

925
	if (!sb_rdonly(sb) && !aborted) {
926
		ext4_clear_feature_journal_needs_recovery(sb);
927 928
		es->s_state = cpu_to_le16(sbi->s_mount_state);
	}
929
	if (!sb_rdonly(sb))
930 931
		ext4_commit_super(sb, 1);

932 933
	for (i = 0; i < sbi->s_gdb_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
934 935
	kvfree(sbi->s_group_desc);
	kvfree(sbi->s_flex_groups);
936
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
937 938
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
939
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
940
	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
941
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
942
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
943 944 945 946 947 948 949 950 951 952 953
		kfree(sbi->s_qf_names[i]);
#endif

	/* Debugging code just in case the in-memory inode orphan list
	 * isn't empty.  The on-disk one can be non-empty if we've
	 * detected an error and taken the fs readonly, but the
	 * in-memory list had better be clean by this point. */
	if (!list_empty(&sbi->s_orphan))
		dump_orphan_list(sb, sbi);
	J_ASSERT(list_empty(&sbi->s_orphan));

954
	sync_blockdev(sb->s_bdev);
955
	invalidate_bdev(sb->s_bdev);
956 957 958 959 960 961 962
	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
		/*
		 * Invalidate the journal device's buffers.  We don't want them
		 * floating about in memory - the physical journal device may
		 * hotswapped, and it breaks the `ro-after' testing code.
		 */
		sync_blockdev(sbi->journal_bdev);
963
		invalidate_bdev(sbi->journal_bdev);
964
		ext4_blkdev_remove(sbi);
965
	}
966 967 968 969
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
970 971 972
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
973
	}
974 975
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
976
	brelse(sbi->s_sbh);
977
	sb->s_fs_info = NULL;
Theodore Ts'o's avatar
Theodore Ts'o committed
978 979 980 981 982 983
	/*
	 * Now that we are completely done shutting down the
	 * superblock, we need to actually destroy the kobject.
	 */
	kobject_put(&sbi->s_kobj);
	wait_for_completion(&sbi->s_kobj_unregister);
984 985
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
986
	kfree(sbi->s_blockgroup_lock);
987
	fs_put_dax(sbi->s_daxdev);
988 989 990
	kfree(sbi);
}

991
static struct kmem_cache *ext4_inode_cachep;
992 993 994 995

/*
 * Called inside transaction, so use GFP_NOFS
 */
996
static struct inode *ext4_alloc_inode(struct super_block *sb)
997
{
998
	struct ext4_inode_info *ei;
999

1000
	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
1001 1002
	if (!ei)
		return NULL;
1003

1004
	inode_set_iversion(&ei->vfs_inode, 1);
1005
	spin_lock_init(&ei->i_raw_lock);
1006 1007
	INIT_LIST_HEAD(&ei->i_prealloc_list);
	spin_lock_init(&ei->i_prealloc_lock);
1008 1009
	ext4_es_init_tree(&ei->i_es_tree);
	rwlock_init(&ei->i_es_lock);
1010
	INIT_LIST_HEAD(&ei->i_es_list);
1011
	ei->i_es_all_nr = 0;
1012
	ei->i_es_shk_nr = 0;
1013
	ei->i_es_shrink_lblk = 0;
1014
	ei->i_reserved_data_blocks = 0;
1015
	ei->i_da_metadata_calc_len = 0;
1016
	ei->i_da_metadata_calc_last_lblock = 0;
1017
	spin_lock_init(&(ei->i_block_reservation_lock));
1018 1019
#ifdef CONFIG_QUOTA
	ei->i_reserved_quota = 0;
1020
	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
1021
#endif
1022
	ei->jinode = NULL;
1023
	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
1024
	spin_lock_init(&ei->i_completed_io_lock);
1025 1026
	ei->i_sync_tid = 0;
	ei->i_datasync_tid = 0;
1027
	atomic_set(&ei->i_unwritten, 0);
1028
	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1029 1030 1031
	return &ei->vfs_inode;
}

1032 1033 1034 1035 1036 1037 1038 1039
static int ext4_drop_inode(struct inode *inode)
{
	int drop = generic_drop_inode(inode);

	trace_ext4_drop_inode(inode, drop);
	return drop;
}

Nick Piggin's avatar
Nick Piggin committed
1040 1041 1042 1043 1044 1045
static void ext4_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}

1046
static void ext4_destroy_inode(struct inode *inode)
1047
{
1048
	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
1049 1050 1051
		ext4_msg(inode->i_sb, KERN_ERR,
			 "Inode %lu (%p): orphan list check failed!",
			 inode->i_ino, EXT4_I(inode));
1052 1053 1054 1055 1056
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
				EXT4_I(inode), sizeof(struct ext4_inode_info),
				true);
		dump_stack();
	}
Nick Piggin's avatar
Nick Piggin committed
1057
	call_rcu(&inode->i_rcu, ext4_i_callback);
1058 1059
}

1060
static void init_once(void *foo)
1061
{
1062
	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1063

1064 1065
	INIT_LIST_HEAD(&ei->i_orphan);
	init_rwsem(&ei->xattr_sem);
1066
	init_rwsem(&ei->i_data_sem);
1067
	init_rwsem(&ei->i_mmap_sem);
1068
	inode_init_once(&ei->vfs_inode);
1069 1070
}

1071
static int __init init_inodecache(void)
1072
{
1073 1074 1075 1076 1077 1078 1079
	ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache",
				sizeof(struct ext4_inode_info), 0,
				(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
					SLAB_ACCOUNT),
				offsetof(struct ext4_inode_info, i_data),
				sizeof_field(struct ext4_inode_info, i_data),
				init_once);
1080
	if (ext4_inode_cachep == NULL)
1081 1082 1083 1084 1085 1086
		return -ENOMEM;
	return 0;
}

static void destroy_inodecache(void)
{
1087 1088 1089 1090 1091
	/*
	 * Make sure all delayed rcu free inodes are flushed before we
	 * destroy cache.
	 */
	rcu_barrier();
1092
	kmem_cache_destroy(ext4_inode_cachep);
1093 1094
}

Al Viro's avatar
Al Viro committed
1095
void ext4_clear_inode(struct inode *inode)
1096
{
Al Viro's avatar
Al Viro committed
1097
	invalidate_inode_buffers(inode);
1098
	clear_inode(inode);
1099
	dquot_drop(inode);
1100
	ext4_discard_preallocations(inode);
1101
	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
1102 1103 1104 1105 1106 1107
	if (EXT4_I(inode)->jinode) {
		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
					       EXT4_I(inode)->jinode);
		jbd2_free_inode(EXT4_I(inode)->jinode);
		EXT4_I(inode)->jinode = NULL;
	}
1108
	fscrypt_put_encryption_info(inode);
1109 1110
}

Christoph Hellwig's avatar
Christoph Hellwig committed
1111
static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1112
					u64 ino, u32 generation)
1113 1114 1115
{
	struct inode *inode;

1116
	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1117
		return ERR_PTR(-ESTALE);
1118
	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1119 1120 1121 1122
		return ERR_PTR(-ESTALE);

	/* iget isn't really right if the inode is currently unallocated!!
	 *
1123
	 * ext4_read_inode will return a bad_inode if the inode had been
1124 1125 1126 1127 1128
	 * deleted, so we should be safe.
	 *
	 * Currently we don't know the generation for parent directory, so
	 * a generation of 0 means "accept any"
	 */
1129
	inode = ext4_iget_normal(sb, ino);
1130 1131 1132
	if (IS_ERR(inode))
		return ERR_CAST(inode);
	if (generation && inode->i_generation != generation) {
1133 1134 1135
		iput(inode);
		return ERR_PTR(-ESTALE);
	}
Christoph Hellwig's avatar
Christoph Hellwig committed
1136 1137 1138 1139 1140

	return inode;
}

static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1141
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1142 1143 1144 1145 1146 1147
{
	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
}

static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1148
					int fh_len, int fh_type)
Christoph Hellwig's avatar
Christoph Hellwig committed
1149 1150 1151
{
	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
				    ext4_nfs_get_inode);
1152 1153
}

1154 1155 1156 1157 1158 1159
/*
 * Try to release metadata pages (indirect blocks, directories) which are
 * mapped via the block device.  Since these pages could have journal heads
 * which would prevent try_to_free_buffers() from freeing them, we must use
 * jbd2 layer's try_to_free_buffers() function to release them.
 */
1160 1161
static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
				 gfp_t wait)
1162 1163 1164 1165 1166 1167 1168 1169
{
	journal_t *journal = EXT4_SB(sb)->s_journal;

	WARN_ON(PageChecked(page));
	if (!page_has_buffers(page))
		return 0;
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, page,
1170
						wait & ~__GFP_DIRECT_RECLAIM);
1171 1172 1173
	return try_to_free_buffers(page);
}

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
{
	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}

static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
							void *fs_data)
{
1184
	handle_t *handle = fs_data;
1185
	int res, res2, credits, retries = 0;
1186

1187 1188 1189 1190 1191 1192 1193 1194
	/*
	 * Encrypting the root directory is not allowed because e2fsck expects
	 * lost+found to exist and be unencrypted, and encrypting the root
	 * directory would imply encrypting the lost+found directory as well as
	 * the filename "lost+found" itself.
	 */
	if (inode->i_ino == EXT4_ROOT_INO)
		return -EPERM;
1195

1196 1197 1198
	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
		return -EINVAL;

1199 1200 1201 1202
	res = ext4_convert_inline_data(inode);
	if (res)
		return res;

1203 1204 1205 1206 1207 1208 1209
	/*
	 * If a journal handle was specified, then the encryption context is
	 * being set on a new inode via inheritance and is part of a larger
	 * transaction to create the inode.  Otherwise the encryption context is
	 * being set on an existing inode in its own transaction.  Only in the
	 * latter case should the "retry on ENOSPC" logic be used.
	 */
1210

1211 1212 1213 1214 1215
	if (handle) {
		res = ext4_xattr_set_handle(handle, inode,
					    EXT4_XATTR_INDEX_ENCRYPTION,
					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
					    ctx, len, 0);
1216 1217 1218 1219
		if (!res) {
			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
			ext4_clear_inode_state(inode,
					EXT4_STATE_MAY_INLINE_DATA);
1220
			/*
1221 1222
			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
			 * S_DAX may be disabled
1223 1224
			 */
			ext4_set_inode_flags(inode);
1225 1226 1227 1228
		}
		return res;
	}

1229 1230 1231
	res = dquot_initialize(inode);
	if (res)
		return res;
1232
retry:
1233 1234
	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
				     &credits);
1235 1236 1237
	if (res)
		return res;

1238
	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
1239 1240 1241
	if (IS_ERR(handle))
		return PTR_ERR(handle);

1242 1243 1244
	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
				    ctx, len, 0);
1245 1246
	if (!res) {
		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1247 1248 1249 1250
		/*
		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
		 * S_DAX may be disabled
		 */
1251
		ext4_set_inode_flags(inode);
1252 1253 1254 1255 1256
		res = ext4_mark_inode_dirty(handle, inode);
		if (res)
			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
	}
	res2 = ext4_journal_stop(handle);
1257 1258 1259

	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
		goto retry;
1260 1261 1262 1263 1264
	if (!res)
		res = res2;
	return res;
}

1265
static bool ext4_dummy_context(struct inode *inode)
1266 1267 1268 1269
{
	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}

1270
static const struct fscrypt_operations ext4_cryptops = {
1271
	.key_prefix		= "ext4:",
1272 1273 1274 1275
	.get_context		= ext4_get_context,
	.set_context		= ext4_set_context,
	.dummy_context		= ext4_dummy_context,
	.empty_dir		= ext4_empty_dir,
1276
	.max_namelen		= EXT4_NAME_LEN,
1277 1278 1279
};
#endif

1280
#ifdef CONFIG_QUOTA
1281
static const char * const quotatypes[] = INITQFNAMES;
Li Xi's avatar
Li Xi committed
1282
#define QTYPE2NAME(t) (quotatypes[t])
1283

1284 1285 1286 1287 1288
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
1289
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1290
			 const struct path *path);
1291 1292
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1293
			       size_t len, loff_t off);
1294
static ssize_t ext4_quota_write(struct super_block *sb, int type,
1295
				const char *data, size_t len, loff_t off);
1296 1297 1298
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
1299
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1300

1301 1302 1303 1304 1305
static struct dquot **ext4_get_dquots(struct inode *inode)
{
	return EXT4_I(inode)->i_dquot;
}

1306
static const struct dquot_operations ext4_quota_operations = {
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	.get_reserved_space	= ext4_get_reserved_space,
	.write_dquot		= ext4_write_dquot,
	.acquire_dquot		= ext4_acquire_dquot,
	.release_dquot		= ext4_release_dquot,
	.mark_dirty		= ext4_mark_dquot_dirty,
	.write_info		= ext4_write_info,
	.alloc_dquot		= dquot_alloc,
	.destroy_dquot		= dquot_destroy,
	.get_projid		= ext4_get_projid,
	.get_inode_usage	= ext4_get_inode_usage,
	.get_next_id		= ext4_get_next_id,
1318 1319
};

1320
static const struct quotactl_ops ext4_qctl_operations = {
1321
	.quota_on	= ext4_quota_on,
1322
	.quota_off	= ext4_quota_off,
1323
	.quota_sync	= dquot_quota_sync,
1324
	.get_state	= dquot_get_state,
1325 1326
	.set_info	= dquot_set_dqinfo,
	.get_dqblk	= dquot_get_dqblk,
1327 1328
	.set_dqblk	= dquot_set_dqblk,
	.get_nextdqblk	= dquot_get_next_dqblk,
1329 1330 1331
};
#endif

1332
static const struct super_operations ext4_sops = {
1333 1334 1335 1336
	.alloc_inode	= ext4_alloc_inode,
	.destroy_inode	= ext4_destroy_inode,
	.write_inode	= ext4_write_inode,
	.dirty_inode	= ext4_dirty_inode,
1337
	.drop_inode	= ext4_drop_inode,
Al Viro's avatar
Al Viro committed
1338
	.evict_inode	= ext4_evict_inode,
1339 1340
	.put_super	= ext4_put_super,
	.sync_fs	= ext4_sync_fs,
1341 1342
	.freeze_fs	= ext4_freeze,
	.unfreeze_fs	= ext4_unfreeze,
1343 1344 1345
	.statfs		= ext4_statfs,
	.remount_fs	= ext4_remount,
	.show_options	= ext4_show_options,
1346
#ifdef CONFIG_QUOTA
1347 1348
	.quota_read	= ext4_quota_read,
	.quota_write	= ext4_quota_write,
1349
	.get_dquots	= ext4_get_dquots,
1350
#endif
1351
	.bdev_try_to_free_page = bdev_try_to_free_page,
1352 1353
};

1354
static const struct export_operations ext4_export_ops = {
Christoph Hellwig's avatar
Christoph Hellwig committed
1355 1356
	.fh_to_dentry = ext4_fh_to_dentry,
	.fh_to_parent = ext4_fh_to_parent,
1357
	.get_parent = ext4_get_parent,
1358 1359 1360 1361 1362
};

enum {
	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1363
	Opt_nouid32, Opt_debug, Opt_removed,
1364
	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1365
	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1366 1367
	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1368
	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1369
	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1370
	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1371
	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Theodore Ts'o's avatar
Theodore Ts'o committed
1372
	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
1373
	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
1374
	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1375
	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
1376
	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1377
	Opt_inode_readahead_blks, Opt_journal_ioprio,
1378
	Opt_dioread_nolock, Opt_dioread_lock,
1379
	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1380
	Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
1381 1382
};

1383
static const match_table_t tokens = {
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	{Opt_bsd_df, "bsddf"},
	{Opt_minix_df, "minixdf"},
	{Opt_grpid, "grpid"},
	{Opt_grpid, "bsdgroups"},
	{Opt_nogrpid, "nogrpid"},
	{Opt_nogrpid, "sysvgroups"},
	{Opt_resgid, "resgid=%u"},
	{Opt_resuid, "resuid=%u"},
	{Opt_sb, "sb=%u"},
	{Opt_err_cont, "errors=continue"},
	{Opt_err_panic, "errors=panic"},
	{Opt_err_ro, "errors=remount-ro"},
	{Opt_nouid32, "nouid32"},
	{Opt_debug, "debug"},
1398 1399
	{Opt_removed, "oldalloc"},
	{Opt_removed, "orlov"},
1400 1401 1402 1403
	{Opt_user_xattr, "user_xattr"},
	{Opt_nouser_xattr, "nouser_xattr"},
	{Opt_acl, "acl"},
	{Opt_noacl, "noacl"},
1404
	{Opt_noload, "norecovery"},
1405
	{Opt_noload, "noload"},
1406 1407
	{Opt_removed, "nobh"},
	{Opt_removed, "bh"},
1408
	{Opt_commit, "commit=%u"},
1409 1410
	{Opt_min_batch_time, "min_batch_time=%u"},
	{Opt_max_batch_time, "max_batch_time=%u"},
1411
	{Opt_journal_dev, "journal_dev=%u"},
1412
	{Opt_journal_path, "journal_path=%s"},
1413
	{Opt_journal_checksum, "journal_checksum"},
1414
	{Opt_nojournal_checksum, "nojournal_checksum"},
1415
	{Opt_journal_async_commit, "journal_async_commit"},
1416 1417 1418 1419
	{Opt_abort, "abort"},
	{Opt_data_journal, "data=journal"},
	{Opt_data_ordered, "data=ordered"},
	{Opt_data_writeback, "data=writeback"},
1420 1421
	{Opt_data_err_abort, "data_err=abort"},
	{Opt_data_err_ignore, "data_err=ignore"},
1422 1423 1424 1425 1426 1427
	{Opt_offusrjquota, "usrjquota="},
	{Opt_usrjquota, "usrjquota=%s"},
	{Opt_offgrpjquota, "grpjquota="},
	{Opt_grpjquota, "grpjquota=%s"},
	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1428
	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1429 1430 1431 1432
	{Opt_grpquota, "grpquota"},
	{Opt_noquota, "noquota"},
	{Opt_quota, "quota"},
	{Opt_usrquota, "usrquota"},
1433
	{Opt_prjquota, "prjquota"},
1434
	{Opt_barrier, "barrier=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1435 1436
	{Opt_barrier, "barrier"},
	{Opt_nobarrier, "nobarrier"},
1437
	{Opt_i_version, "i_version"},
Ross Zwisler's avatar
Ross Zwisler committed
1438
	{Opt_dax, "dax"},
1439
	{Opt_stripe, "stripe=%u"},
1440
	{Opt_delalloc, "delalloc"},
1441 1442
	{Opt_lazytime, "lazytime"},
	{Opt_nolazytime, "nolazytime"},
1443
	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1444
	{Opt_nodelalloc, "nodelalloc"},
1445 1446
	{Opt_removed, "mblk_io_submit"},
	{Opt_removed, "nomblk_io_submit"},
1447 1448
	{Opt_block_validity, "block_validity"},
	{Opt_noblock_validity, "noblock_validity"},
1449
	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1450
	{Opt_journal_ioprio, "journal_ioprio=%u"},
1451
	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
Theodore Ts'o's avatar
Theodore Ts'o committed
1452 1453
	{Opt_auto_da_alloc, "auto_da_alloc"},
	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1454 1455
	{Opt_dioread_nolock, "dioread_nolock"},
	{Opt_dioread_lock, "dioread_lock"},
1456 1457
	{Opt_discard, "discard"},
	{Opt_nodiscard, "nodiscard"},
1458 1459 1460
	{Opt_init_itable, "init_itable=%u"},
	{Opt_init_itable, "init_itable"},
	{Opt_noinit_itable, "noinit_itable"},
1461
	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1462
	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1463 1464
	{Opt_nombcache, "nombcache"},
	{Opt_nombcache, "no_mbcache"},	/* for backward compatibility */
1465 1466 1467 1468 1469
	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
Josef Bacik's avatar
Josef Bacik committed
1470
	{Opt_err, NULL},
1471 1472
};

1473
static ext4_fsblk_t get_sb_block(void **data)
1474
{
1475
	ext4_fsblk_t	sb_block;
1476 1477 1478 1479
	char		*options = (char *) *data;

	if (!options || strncmp(options, "sb=", 3) != 0)
		return 1;	/* Default location */
1480

1481
	options += 3;
1482
	/* TODO: use simple_strtoll with >32bit ext4 */
1483 1484
	sb_block = simple_strtoul(options, &options, 0);
	if (*options && *options != ',') {
1485
		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1486 1487 1488 1489 1490 1491
		       (char *) *data);
		return 1;
	}
	if (*options == ',')
		options++;
	*data = (void *) options;
1492

1493 1494 1495
	return sb_block;
}

1496
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1497 1498
static const char deprecated_msg[] =
	"Mount option \"%s\" will be removed by %s\n"
1499
	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1500

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1501 1502 1503 1504 1505
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *qname;
1506
	int ret = -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1507 1508 1509 1510 1511 1512

	if (sb_any_quota_loaded(sb) &&
		!sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR,
			"Cannot change journaled "
			"quota options when quota turned on");
1513
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1514
	}
1515
	if (ext4_has_feature_quota(sb)) {
1516 1517 1518
		ext4_msg(sb, KERN_INFO, "Journaled quota options "
			 "ignored when QUOTA feature is enabled");
		return 1;
1519
	}
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1520 1521 1522 1523
	qname = match_strdup(args);
	if (!qname) {
		ext4_msg(sb, KERN_ERR,
			"Not enough memory for storing quotafile name");
1524
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1525
	}
1526 1527 1528 1529 1530 1531 1532 1533
	if (sbi->s_qf_names[qtype]) {
		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
			ret = 1;
		else
			ext4_msg(sb, KERN_ERR,
				 "%s quota file already specified",
				 QTYPE2NAME(qtype));
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1534
	}
1535
	if (strchr(qname, '/')) {
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1536 1537
		ext4_msg(sb, KERN_ERR,
			"quotafile must be on filesystem root");
1538
		goto errout;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1539
	}
1540
	sbi->s_qf_names[qtype] = qname;
1541
	set_opt(sb, QUOTA);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1542
	return 1;
1543 1544 1545
errout:
	kfree(qname);
	return ret;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
}

static int clear_qf_name(struct super_block *sb, int qtype)
{

	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sb_any_quota_loaded(sb) &&
		sbi->s_qf_names[qtype]) {
		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
			" when quota turned on");
1557
		return -1;
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1558
	}
1559
	kfree(sbi->s_qf_names[qtype]);
Dmitry Monakhov's avatar
Dmitry Monakhov committed
1560 1561 1562 1563 1564
	sbi->s_qf_names[qtype] = NULL;
	return 1;
}
#endif

1565 1566 1567 1568 1569 1570
#define MOPT_SET	0x0001
#define MOPT_CLEAR	0x0002
#define MOPT_NOSUPPORT	0x0004
#define MOPT_EXPLICIT	0x0008
#define MOPT_CLEAR_ERR	0x0010
#define MOPT_GTE0	0x0020
1571
#ifdef CONFIG_QUOTA
1572 1573 1574 1575 1576
#define MOPT_Q		0
#define MOPT_QFMT	0x0040
#else
#define MOPT_Q		MOPT_NOSUPPORT
#define MOPT_QFMT	MOPT_NOSUPPORT
1577
#endif
1578
#define MOPT_DATAJ	0x0080
1579 1580 1581
#define MOPT_NO_EXT2	0x0100
#define MOPT_NO_EXT3	0x0200
#define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1582
#define MOPT_STRING	0x0400
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594

static const struct mount_opts {
	int	token;
	int	mount_opt;
	int	flags;
} ext4_mount_opts[] = {
	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
1595 1596 1597 1598
	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_SET},
	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1599 1600
	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
1601 1602 1603
	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1604
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1605 1606
	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1607
	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1608
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1609
	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
1610
				    EXT4_MOUNT_JOURNAL_CHECKSUM),
1611
	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1612
	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
1613 1614 1615
	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
1616
	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
1617
	 MOPT_NO_EXT2},
1618
	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
1619
	 MOPT_NO_EXT2},
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
	{Opt_commit, 0, MOPT_GTE0},
	{Opt_max_batch_time, 0, MOPT_GTE0},
	{Opt_min_batch_time, 0, MOPT_GTE0},
	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
	{Opt_init_itable, 0, MOPT_GTE0},
Ross Zwisler's avatar
Ross Zwisler committed
1630
	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
1631
	{Opt_stripe, 0, MOPT_GTE0},
1632 1633
	{Opt_resuid, 0, MOPT_GTE0},
	{Opt_resgid, 0, MOPT_GTE0},
1634 1635 1636
	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
1637 1638 1639 1640
	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
	 MOPT_NO_EXT2 | MOPT_DATAJ},
1641 1642
	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
Theodore Ts'o's avatar
Theodore Ts'o committed
1643
#ifdef CONFIG_EXT4_FS_POSIX_ACL
1644 1645
	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
1646
#else
1647 1648
	{Opt_acl, 0, MOPT_NOSUPPORT},
	{Opt_noacl, 0, MOPT_NOSUPPORT},
1649
#endif
1650 1651
	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1652
	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
1653 1654 1655 1656 1657
	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
							MOPT_SET | MOPT_Q},
	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
							MOPT_SET | MOPT_Q},
1658 1659
	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
							MOPT_SET | MOPT_Q},
1660
	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
1661 1662
		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
							MOPT_CLEAR | MOPT_Q},
1663 1664 1665 1666 1667 1668 1669
	{Opt_usrjquota, 0, MOPT_Q},
	{Opt_grpjquota, 0, MOPT_Q},
	{Opt_offusrjquota, 0, MOPT_Q},
	{Opt_offgrpjquota, 0, MOPT_Q},
	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1670
	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
1671
	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
1672
	{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
1673 1674 1675 1676 1677 1678 1679 1680 1681
	{Opt_err, 0, 0}
};

static int handle_mount_opt(struct super_block *sb, char *opt, int token,
			    substring_t *args, unsigned long *journal_devnum,
			    unsigned int *journal_ioprio, int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	const struct mount_opts *m;
1682 1683
	kuid_t uid;
	kgid_t gid;
1684 1685
	int arg = 0;

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
#ifdef CONFIG_QUOTA
	if (token == Opt_usrjquota)
		return set_qf_name(sb, USRQUOTA, &args[0]);
	else if (token == Opt_grpjquota)
		return set_qf_name(sb, GRPQUOTA, &args[0]);
	else if (token == Opt_offusrjquota)
		return clear_qf_name(sb, USRQUOTA);
	else if (token == Opt_offgrpjquota)
		return clear_qf_name(sb, GRPQUOTA);
#endif
1696
	switch (token) {
1697 1698 1699 1700
	case Opt_noacl:
	case Opt_nouser_xattr:
		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
		break;
1701 1702 1703
	case Opt_sb:
		return 1;	/* handled by get_sb_block() */
	case Opt_removed:
1704
		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
1705 1706 1707 1708 1709
		return 1;
	case Opt_abort:
		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
		return 1;
	case Opt_i_version:
1710
		sb->s_flags |= SB_I_VERSION;
1711
		return 1;
1712
	case Opt_lazytime:
1713
		sb->s_flags |= SB_LAZYTIME;
1714 1715
		return 1;
	case Opt_nolazytime:
1716
		sb->s_flags &= ~SB_LAZYTIME;
1717
		return 1;
1718 1719
	}

1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
	for (m = ext4_mount_opts; m->token != Opt_err; m++)
		if (token == m->token)
			break;

	if (m->token == Opt_err) {
		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
			 "or missing value", opt);
		return -1;
	}

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext2", opt);
		return -1;
	}
	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
		ext4_msg(sb, KERN_ERR,
			 "Mount option \"%s\" incompatible with ext3", opt);
		return -1;
	}

1741
	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
1742 1743 1744
		return -1;
	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
		return -1;
1745 1746 1747
	if (m->flags & MOPT_EXPLICIT) {
		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
			set_opt2(sb, EXPLICIT_DELALLOC);
1748 1749
		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1750 1751 1752
		} else
			return -1;
	}
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
	if (m->flags & MOPT_CLEAR_ERR)
		clear_opt(sb, ERRORS_MASK);
	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
		ext4_msg(sb, KERN_ERR, "Cannot change quota "
			 "options when quota turned on");
		return -1;
	}

	if (m->flags & MOPT_NOSUPPORT) {
		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
	} else if (token == Opt_commit) {
		if (arg == 0)
			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
		sbi->s_commit_interval = HZ * arg;
1767 1768
	} else if (token == Opt_debug_want_extra_isize) {
		sbi->s_want_extra_isize = arg;
1769 1770 1771 1772 1773
	} else if (token == Opt_max_batch_time) {
		sbi->s_max_batch_time = arg;
	} else if (token == Opt_min_batch_time) {
		sbi->s_min_batch_time = arg;
	} else if (token == Opt_inode_readahead_blks) {
1774 1775 1776 1777
		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
			ext4_msg(sb, KERN_ERR,
				 "EXT4-fs: inode_readahead_blks must be "
				 "0 or a power of 2 smaller than 2^31");
1778
			return -1;
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
		}
		sbi->s_inode_readahead_blks = arg;
	} else if (token == Opt_init_itable) {
		set_opt(sb, INIT_INODE_TABLE);
		if (!args->from)
			arg = EXT4_DEF_LI_WAIT_MULT;
		sbi->s_li_wait_mult = arg;
	} else if (token == Opt_max_dir_size_kb) {
		sbi->s_max_dir_size_kb = arg;
	} else if (token == Opt_stripe) {
		sbi->s_stripe = arg;
	} else if (token == Opt_resuid) {
		uid = make_kuid(current_user_ns(), arg);
		if (!uid_valid(uid)) {
			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
1794 1795
			return -1;
		}
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
		sbi->s_resuid = uid;
	} else if (token == Opt_resgid) {
		gid = make_kgid(current_user_ns(), arg);
		if (!gid_valid(gid)) {
			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
			return -1;
		}
		sbi->s_resgid = gid;
	} else if (token == Opt_journal_dev) {
		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		*journal_devnum = arg;
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
	} else if (token == Opt_journal_path) {
		char *journal_path;
		struct inode *journal_inode;
		struct path path;
		int error;

		if (is_remount) {
			ext4_msg(sb, KERN_ERR,
				 "Cannot specify journal on remount");
			return -1;
		}
		journal_path = match_strdup(&args[0]);
		if (!journal_path) {
			ext4_msg(sb, KERN_ERR, "error: could not dup "
				"journal device string");
			return -1;
		}

		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
		if (error) {
			ext4_msg(sb, KERN_ERR, "error: could not find "
				"journal device path: error %d", error);
			kfree(journal_path);
			return -1;
		}

1837
		journal_inode = d_inode(path.dentry);
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
		if (!S_ISBLK(journal_inode->i_mode)) {
			ext4_msg(sb, KERN_ERR, "error: journal path %s "
				"is not a block device", journal_path);
			path_put(&path);
			kfree(journal_path);
			return -1;
		}

		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
		path_put(&path);
		kfree(journal_path);
1849 1850 1851 1852 1853 1854 1855 1856
	} else if (token == Opt_journal_ioprio) {
		if (arg > 7) {
			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
				 " (must be 0-7)");
			return -1;
		}
		*journal_ioprio =
			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1857 1858 1859 1860 1861 1862 1863 1864 1865
	} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mode enabled");
#else
		ext4_msg(sb, KERN_WARNING,
			 "Test dummy encryption mount option ignored");
#endif
1866 1867 1868 1869 1870
	} else if (m->flags & MOPT_DATAJ) {
		if (is_remount) {
			if (!sbi->s_journal)
				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
1871
				ext4_msg(sb, KERN_ERR,
1872 1873
					 "Cannot change data mode on remount");
				return -1;
1874
			}
1875
		} else {
1876 1877
			clear_opt(sb, DATA_FLAGS);
			sbi->s_mount_opt |= m->mount_opt;
1878
		}
1879 1880 1881 1882 1883 1884 1885 1886
#ifdef CONFIG_QUOTA
	} else if (m->flags & MOPT_QFMT) {
		if (sb_any_quota_loaded(sb) &&
		    sbi->s_jquota_fmt != m->mount_opt) {
			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
				 "quota options when quota turned on");
			return -1;
		}
1887
		if (ext4_has_feature_quota(sb)) {
1888 1889
			ext4_msg(sb, KERN_INFO,
				 "Quota format mount options ignored "
1890
				 "when QUOTA feature is enabled");
1891
			return 1;
1892
		}
1893
		sbi->s_jquota_fmt = m->mount_opt;
Ross Zwisler's avatar
Ross Zwisler committed
1894 1895
#endif
	} else if (token == Opt_dax) {
1896 1897 1898 1899 1900
#ifdef CONFIG_FS_DAX
		ext4_msg(sb, KERN_WARNING,
		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
			sbi->s_mount_opt |= m->mount_opt;
#else
Ross Zwisler's avatar
Ross Zwisler committed
1901 1902
		ext4_msg(sb, KERN_INFO, "dax option not supported");
		return -1;
1903
#endif
1904 1905 1906 1907
	} else if (token == Opt_data_err_abort) {
		sbi->s_mount_opt |= m->mount_opt;
	} else if (token == Opt_data_err_ignore) {
		sbi->s_mount_opt &= ~m->mount_opt;
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
	} else {
		if (!args->from)
			arg = 1;
		if (m->flags & MOPT_CLEAR)
			arg = !arg;
		else if (unlikely(!(m->flags & MOPT_SET))) {
			ext4_msg(sb, KERN_WARNING,
				 "buggy handling of option %s", opt);
			WARN_ON(1);
			return -1;
		}
		if (arg != 0)
			sbi->s_mount_opt |= m->mount_opt;
		else
			sbi->s_mount_opt &= ~m->mount_opt;
1923
	}
1924
	return 1;
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
}

static int parse_options(char *options, struct super_block *sb,
			 unsigned long *journal_devnum,
			 unsigned int *journal_ioprio,
			 int is_remount)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int token;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		if (!*p)
			continue;
		/*
		 * Initialize args struct so we know whether arg was
		 * found; some options take optional arguments.
		 */
1947
		args[0].to = args[0].from = NULL;
1948 1949 1950 1951
		token = match_token(p, tokens, args);
		if (handle_mount_opt(sb, p, token, args, journal_devnum,
				     journal_ioprio, is_remount) < 0)
			return 0;
1952 1953
	}
#ifdef CONFIG_QUOTA
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	/*
	 * We do the test below only for project quotas. 'usrquota' and
	 * 'grpquota' mount options are allowed even without quota feature
	 * to support legacy quotas in quota files.
	 */
	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
			 "Cannot enable project quota enforcement.");
		return 0;
	}
	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1965
		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1966
			clear_opt(sb, USRQUOTA);
1967

1968
		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1969
			clear_opt(sb, GRPQUOTA);
1970

Dmitry Monakhov's avatar
Dmitry Monakhov committed
1971
		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1972 1973
			ext4_msg(sb, KERN_ERR, "old and new quota "
					"format mixing");
1974 1975 1976 1977
			return 0;
		}

		if (!sbi->s_jquota_fmt) {
1978 1979
			ext4_msg(sb, KERN_ERR, "journaled quota format "
					"not specified");
1980 1981 1982 1983
			return 0;
		}
	}
#endif
1984 1985 1986 1987
	if (test_opt(sb, DIOREAD_NOLOCK)) {
		int blocksize =
			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);

1988
		if (blocksize < PAGE_SIZE) {
1989 1990 1991 1992 1993
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "dioread_nolock if block size != PAGE_SIZE");
			return 0;
		}
	}
1994 1995 1996
	return 1;
}

1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
static inline void ext4_show_quota_options(struct seq_file *seq,
					   struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (sbi->s_jquota_fmt) {
		char *fmtname = "";

		switch (sbi->s_jquota_fmt) {
		case QFMT_VFS_OLD:
			fmtname = "vfsold";
			break;
		case QFMT_VFS_V0:
			fmtname = "vfsv0";
			break;
		case QFMT_VFS_V1:
			fmtname = "vfsv1";
			break;
		}
		seq_printf(seq, ",jqfmt=%s", fmtname);
	}

	if (sbi->s_qf_names[USRQUOTA])
2021
		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
2022 2023

	if (sbi->s_qf_names[GRPQUOTA])
2024
		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
2025 2026 2027
#endif
}

2028 2029
static const char *token2str(int token)
{
2030
	const struct match_token *t;
2031 2032 2033 2034 2035 2036 2037

	for (t = tokens; t->token != Opt_err; t++)
		if (t->token == token && !strchr(t->pattern, '='))
			break;
	return t->pattern;
}

2038 2039 2040 2041 2042
/*
 * Show an option if
 *  - it's set to a non-default value OR
 *  - if the per-sb default is different from the global default
 */
2043 2044
static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
			      int nodefs)
2045 2046 2047
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
2048
	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
2049
	const struct mount_opts *m;
2050
	char sep = nodefs ? '\n' : ',';
2051

2052 2053
#define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
#define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
2054 2055

	if (sbi->s_sb_block != 1)
2056 2057 2058 2059 2060 2061 2062
		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);

	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
		int want_set = m->flags & MOPT_SET;
		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
		    (m->flags & MOPT_CLEAR_ERR))
			continue;
2063
		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
2064 2065 2066 2067 2068 2069
			continue; /* skip if same as the default */
		if ((want_set &&
		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
			continue; /* select Opt_noFoo vs Opt_Foo */
		SEQ_OPTS_PRINT("%s", token2str(m->token));
2070
	}
2071

2072
	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
2073
	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
2074 2075 2076
		SEQ_OPTS_PRINT("resuid=%u",
				from_kuid_munged(&init_user_ns, sbi->s_resuid));
	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
2077
	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
2078 2079
		SEQ_OPTS_PRINT("resgid=%u",
				from_kgid_munged(&init_user_ns, sbi->s_resgid));
2080
	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
2081 2082
	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
		SEQ_OPTS_PUTS("errors=remount-ro");
2083
	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
2084
		SEQ_OPTS_PUTS("errors=continue");
2085
	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
2086
		SEQ_OPTS_PUTS("errors=panic");
2087
	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
2088
		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
2089
	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
2090
		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2091
	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2092
		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2093
	if (sb->s_flags & SB_I_VERSION)
2094
		SEQ_OPTS_PUTS("i_version");
2095
	if (nodefs || sbi->s_stripe)
2096
		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
2097 2098
	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
			(sbi->s_mount_opt ^ def_mount_opt)) {
2099 2100 2101 2102 2103 2104 2105
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			SEQ_OPTS_PUTS("data=journal");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			SEQ_OPTS_PUTS("data=ordered");
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
			SEQ_OPTS_PUTS("data=writeback");
	}
2106 2107
	if (nodefs ||
	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
2108 2109
		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
			       sbi->s_inode_readahead_blks);
2110

2111
	if (test_opt(sb, INIT_INODE_TABLE) && (nodefs ||
2112
		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
2113
		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2114 2115
	if (nodefs || sbi->s_max_dir_size_kb)
		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2116 2117
	if (test_opt(sb, DATA_ERR_ABORT))
		SEQ_OPTS_PUTS("data_err=abort");
2118 2119 2120 2121 2122

	ext4_show_quota_options(seq, sb);
	return 0;
}

2123 2124 2125 2126 2127
static int ext4_show_options(struct seq_file *seq, struct dentry *root)
{
	return _ext4_show_options(seq, root->d_sb, 0);
}

2128
int ext4_seq_options_show(struct seq_file *seq, void *offset)
2129 2130 2131 2132
{
	struct super_block *sb = seq->private;
	int rc;

2133
	seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
2134 2135 2136 2137 2138
	rc = _ext4_show_options(seq, sb, 1);
	seq_puts(seq, "\n");
	return rc;
}

2139
static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2140 2141
			    int read_only)
{
2142
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2143
	int err = 0;
2144

2145
	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2146 2147
		ext4_msg(sb, KERN_ERR, "revision level too high, "
			 "forcing read-only mode");
2148
		err = -EROFS;
2149 2150
	}
	if (read_only)
2151
		goto done;
2152
	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2153 2154
		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
			 "running e2fsck is recommended");
2155
	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2156 2157 2158
		ext4_msg(sb, KERN_WARNING,
			 "warning: mounting fs with errors, "
			 "running e2fsck is recommended");
2159
	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2160 2161
		 le16_to_cpu(es->s_mnt_count) >=
		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2162 2163 2164
		ext4_msg(sb, KERN_WARNING,
			 "warning: maximal mount count reached, "
			 "running e2fsck is recommended");
2165 2166 2167
	else if (le32_to_cpu(es->s_checkinterval) &&
		(le32_to_cpu(es->s_lastcheck) +
			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2168 2169 2170
		ext4_msg(sb, KERN_WARNING,
			 "warning: checktime reached, "
			 "running e2fsck is recommended");
2171
	if (!sbi->s_journal)
2172
		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2173
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2174
		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
Marcin Slusarz's avatar
Marcin Slusarz committed
2175
	le16_add_cpu(&es->s_mnt_count, 1);
2176
	es->s_mtime = cpu_to_le32(get_seconds());
2177
	ext4_update_dynamic_rev(sb);
2178
	if (sbi->s_journal)
2179
		ext4_set_feature_journal_needs_recovery(sb);
2180

2181
	err = ext4_commit_super(sb, 1);
2182
done:
2183
	if (test_opt(sb, DEBUG))
2184
		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2185
				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2186 2187
			sb->s_blocksize,
			sbi->s_groups_count,
2188 2189
			EXT4_BLOCKS_PER_GROUP(sb),
			EXT4_INODES_PER_GROUP(sb),
2190
			sbi->s_mount_opt, sbi->s_mount_opt2);
2191

Dan Magenheimer's avatar
Dan Magenheimer committed
2192
	cleancache_init_fs(sb);
2193
	return err;
2194 2195
}

2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct flex_groups *new_groups;
	int size;

	if (!sbi->s_log_groups_per_flex)
		return 0;

	size = ext4_flex_group(sbi, ngroup - 1) + 1;
	if (size <= sbi->s_flex_groups_allocated)
		return 0;

	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2210
	new_groups = kvzalloc(size, GFP_KERNEL);
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
	if (!new_groups) {
		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
			 size / (int) sizeof(struct flex_groups));
		return -ENOMEM;
	}

	if (sbi->s_flex_groups) {
		memcpy(new_groups, sbi->s_flex_groups,
		       (sbi->s_flex_groups_allocated *
			sizeof(struct flex_groups)));
Al Viro's avatar
Al Viro committed
2221
		kvfree(sbi->s_flex_groups);
2222 2223 2224 2225 2226 2227
	}
	sbi->s_flex_groups = new_groups;
	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
	return 0;
}

2228 2229 2230 2231 2232
static int ext4_fill_flex_info(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t flex_group;
2233
	int i, err;
2234

2235
	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2236
	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2237 2238 2239 2240
		sbi->s_log_groups_per_flex = 0;
		return 1;
	}

2241 2242
	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
	if (err)
2243
		goto failed;
2244 2245

	for (i = 0; i < sbi->s_groups_count; i++) {
2246
		gdp = ext4_get_group_desc(sb, i, NULL);
2247 2248

		flex_group = ext4_flex_group(sbi, i);
2249 2250
		atomic_add(ext4_free_inodes_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].free_inodes);
2251 2252
		atomic64_add(ext4_free_group_clusters(sb, gdp),
			     &sbi->s_flex_groups[flex_group].free_clusters);
2253 2254
		atomic_add(ext4_used_dirs_count(sb, gdp),
			   &sbi->s_flex_groups[flex_group].used_dirs);
2255 2256 2257 2258 2259 2260 2261
	}

	return 1;
failed:
	return 0;
}

2262
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2263
				   struct ext4_group_desc *gdp)
2264
{
2265
	int offset = offsetof(struct ext4_group_desc, bg_checksum);
2266
	__u16 crc = 0;
2267
	__le32 le_group = cpu_to_le32(block_group);
2268
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2269

2270
	if (ext4_has_metadata_csum(sbi->s_sb)) {
2271 2272
		/* Use new metadata_csum algorithm */
		__u32 csum32;
2273
		__u16 dummy_csum = 0;
2274 2275 2276

		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
				     sizeof(le_group));
2277 2278 2279 2280 2281 2282 2283
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
				     sizeof(dummy_csum));
		offset += sizeof(dummy_csum);
		if (offset < sbi->s_desc_size)
			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
					     sbi->s_desc_size - offset);
2284 2285 2286

		crc = csum32 & 0xFFFF;
		goto out;
2287 2288
	}

2289
	/* old crc16 code */
2290
	if (!ext4_has_feature_gdt_csum(sb))
2291 2292
		return 0;

2293 2294 2295 2296 2297
	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
	crc = crc16(crc, (__u8 *)gdp, offset);
	offset += sizeof(gdp->bg_checksum); /* skip checksum */
	/* for checksum of struct ext4_group_desc do the rest...*/
2298
	if (ext4_has_feature_64bit(sb) &&
2299 2300 2301 2302 2303 2304
	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
		crc = crc16(crc, (__u8 *)gdp + offset,
			    le16_to_cpu(sbi->s_es->s_desc_size) -
				offset);

out:
2305 2306 2307
	return cpu_to_le16(crc);
}

2308
int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2309 2310
				struct ext4_group_desc *gdp)
{
2311
	if (ext4_has_group_desc_csum(sb) &&
2312
	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2313 2314 2315 2316 2317
		return 0;

	return 1;
}

2318 2319 2320 2321 2322
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
			      struct ext4_group_desc *gdp)
{
	if (!ext4_has_group_desc_csum(sb))
		return;
2323
	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2324 2325
}

2326
/* Called at mount-time, super-block is locked */
2327
static int ext4_check_descriptors(struct super_block *sb,
2328
				  ext4_fsblk_t sb_block,
2329
				  ext4_group_t *first_not_zeroed)
2330
{
2331 2332 2333
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
	ext4_fsblk_t last_block;
Laurent Vivier's avatar
Laurent Vivier committed
2334 2335 2336
	ext4_fsblk_t block_bitmap;
	ext4_fsblk_t inode_bitmap;
	ext4_fsblk_t inode_table;
2337
	int flexbg_flag = 0;
2338
	ext4_group_t i, grp = sbi->s_groups_count;
2339

2340
	if (ext4_has_feature_flex_bg(sb))
2341 2342
		flexbg_flag = 1;

2343
	ext4_debug("Checking group descriptors");
2344

2345 2346 2347
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);

2348
		if (i == sbi->s_groups_count - 1 || flexbg_flag)
Laurent Vivier's avatar
Laurent Vivier committed
2349
			last_block = ext4_blocks_count(sbi->s_es) - 1;
2350 2351
		else
			last_block = first_block +
2352
				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2353

2354 2355 2356 2357
		if ((grp == sbi->s_groups_count) &&
		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			grp = i;

2358
		block_bitmap = ext4_block_bitmap(sb, gdp);
2359 2360 2361 2362
		if (block_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Block bitmap for group %u overlaps "
				 "superblock", i);
2363 2364
			if (!sb_rdonly(sb))
				return 0;
2365
		}
2366
		if (block_bitmap < first_block || block_bitmap > last_block) {
2367
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2368
			       "Block bitmap for group %u not in group "
2369
			       "(block %llu)!", i, block_bitmap);
2370 2371
			return 0;
		}
2372
		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2373 2374 2375 2376
		if (inode_bitmap == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode bitmap for group %u overlaps "
				 "superblock", i);
2377 2378
			if (!sb_rdonly(sb))
				return 0;
2379
		}
2380
		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2381
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2382
			       "Inode bitmap for group %u not in group "
2383
			       "(block %llu)!", i, inode_bitmap);
2384 2385
			return 0;
		}
2386
		inode_table = ext4_inode_table(sb, gdp);
2387 2388 2389 2390
		if (inode_table == sb_block) {
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Inode table for group %u overlaps "
				 "superblock", i);
2391 2392
			if (!sb_rdonly(sb))
				return 0;
2393
		}
Laurent Vivier's avatar
Laurent Vivier committed
2394
		if (inode_table < first_block ||
2395
		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2396
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2397
			       "Inode table for group %u not in group "
2398
			       "(block %llu)!", i, inode_table);
2399 2400
			return 0;
		}
2401
		ext4_lock_group(sb, i);
2402
		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2403 2404
			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
				 "Checksum for group %u failed (%u!=%u)",
2405
				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2406
				     gdp)), le16_to_cpu(gdp->bg_checksum));
2407
			if (!sb_rdonly(sb)) {
2408
				ext4_unlock_group(sb, i);
2409
				return 0;
2410
			}
2411
		}
2412
		ext4_unlock_group(sb, i);
2413 2414
		if (!flexbg_flag)
			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2415
	}
2416 2417
	if (NULL != first_not_zeroed)
		*first_not_zeroed = grp;
2418 2419 2420
	return 1;
}

2421
/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
 * the superblock) which were deleted from all directories, but held open by
 * a process at the time of a crash.  We walk the list and try to delete these
 * inodes at recovery time (only with a read-write filesystem).
 *
 * In order to keep the orphan inode chain consistent during traversal (in
 * case of crash during recovery), we link each inode into the superblock
 * orphan list_head and handle it the same way as an inode deletion during
 * normal operation (which journals the operations for us).
 *
 * We only do an iget() and an iput() on each inode, which is very safe if we
 * accidentally point at an in-use or already deleted inode.  The worst that
 * can happen in this case is that we get a "bit already cleared" message from
2434
 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2435 2436 2437
 * e2fsck was run on this filesystem, and it must have already done the orphan
 * inode cleanup for us, so we can safely abort without any further action.
 */
2438 2439
static void ext4_orphan_cleanup(struct super_block *sb,
				struct ext4_super_block *es)
2440 2441
{
	unsigned int s_flags = sb->s_flags;
2442
	int ret, nr_orphans = 0, nr_truncates = 0;
2443
#ifdef CONFIG_QUOTA
2444
	int quota_update = 0;
2445 2446 2447 2448 2449 2450 2451
	int i;
#endif
	if (!es->s_last_orphan) {
		jbd_debug(4, "no orphan inodes to clean up\n");
		return;
	}

2452
	if (bdev_read_only(sb->s_bdev)) {
2453 2454
		ext4_msg(sb, KERN_ERR, "write access "
			"unavailable, skipping orphan cleanup");
2455 2456 2457
		return;
	}

2458 2459 2460 2461 2462 2463 2464
	/* Check if feature set would not allow a r/w mount */
	if (!ext4_feature_set_ok(sb, 0)) {
		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
			 "unknown ROCOMPAT features");
		return;
	}

2465
	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2466
		/* don't clear list on RO mount w/ errors */
2467
		if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
2468
			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2469
				  "clearing orphan list.\n");
2470 2471
			es->s_last_orphan = 0;
		}
2472 2473 2474 2475
		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
		return;
	}

2476
	if (s_flags & SB_RDONLY) {
2477
		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2478
		sb->s_flags &= ~SB_RDONLY;
2479 2480 2481
	}
#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
2482
	sb->s_flags |= SB_ACTIVE;
2483 2484 2485 2486 2487

	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
2488
	if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
		int ret = ext4_enable_quotas(sb);

		if (!ret)
			quota_update = 1;
		else
			ext4_msg(sb, KERN_ERR,
				"Cannot turn on quotas: error %d", ret);
	}

	/* Turn on journaled quotas used for old sytle */
Jan Kara's avatar
Jan Kara committed
2499
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2500 2501
		if (EXT4_SB(sb)->s_qf_names[i]) {
			int ret = ext4_quota_on_mount(sb, i);
2502 2503 2504 2505

			if (!ret)
				quota_update = 1;
			else
2506 2507
				ext4_msg(sb, KERN_ERR,
					"Cannot turn on journaled "
2508
					"quota: type %d: error %d", i, ret);
2509 2510 2511 2512 2513 2514 2515
		}
	}
#endif

	while (es->s_last_orphan) {
		struct inode *inode;

2516 2517 2518 2519 2520 2521 2522 2523 2524 2525
		/*
		 * We may have encountered an error during cleanup; if
		 * so, skip the rest.
		 */
		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
			es->s_last_orphan = 0;
			break;
		}

2526 2527
		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
		if (IS_ERR(inode)) {
2528 2529 2530 2531
			es->s_last_orphan = 0;
			break;
		}

2532
		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2533
		dquot_initialize(inode);
2534
		if (inode->i_nlink) {
2535 2536 2537 2538
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: truncating inode %lu to %lld bytes",
					__func__, inode->i_ino, inode->i_size);
2539
			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2540
				  inode->i_ino, inode->i_size);
Al Viro's avatar
Al Viro committed
2541
			inode_lock(inode);
2542
			truncate_inode_pages(inode->i_mapping, inode->i_size);
2543 2544 2545
			ret = ext4_truncate(inode);
			if (ret)
				ext4_std_error(inode->i_sb, ret);
Al Viro's avatar
Al Viro committed
2546
			inode_unlock(inode);
2547 2548
			nr_truncates++;
		} else {
2549 2550 2551 2552
			if (test_opt(sb, DEBUG))
				ext4_msg(sb, KERN_DEBUG,
					"%s: deleting unreferenced inode %lu",
					__func__, inode->i_ino);
2553 2554 2555 2556 2557 2558 2559
			jbd_debug(2, "deleting unreferenced inode %lu\n",
				  inode->i_ino);
			nr_orphans++;
		}
		iput(inode);  /* The delete magic happens here! */
	}

2560
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2561 2562

	if (nr_orphans)
2563 2564
		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
		       PLURAL(nr_orphans));
2565
	if (nr_truncates)
2566 2567
		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
		       PLURAL(nr_truncates));
2568
#ifdef CONFIG_QUOTA
2569 2570 2571 2572 2573 2574
	/* Turn off quotas if they were enabled for orphan cleanup */
	if (quota_update) {
		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
			if (sb_dqopt(sb)->files[i])
				dquot_quota_off(sb, i);
		}
2575 2576
	}
#endif
2577
	sb->s_flags = s_flags; /* Restore SB_RDONLY status */
2578
}
2579

2580 2581 2582 2583 2584 2585 2586
/*
 * Maximal extent format file size.
 * Resulting logical blkno at s_maxbytes must fit in our on-disk
 * extent format containers, within a sector_t, and within i_blocks
 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
 * so that won't be a limiting factor.
 *
2587 2588 2589 2590 2591 2592
 * However there is other limiting factor. We do store extents in the form
 * of starting block and length, hence the resulting length of the extent
 * covering maximum file size must fit into on-disk format containers as
 * well. Given that length is always by 1 unit bigger than max unit (because
 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
 *
2593 2594
 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
 */
2595
static loff_t ext4_max_size(int blkbits, int has_huge_files)
2596 2597 2598 2599 2600
{
	loff_t res;
	loff_t upper_limit = MAX_LFS_FILESIZE;

	/* small i_blocks in vfs inode? */
2601
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2602
		/*
2603
		 * CONFIG_LBDAF is not enabled implies the inode
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
		 * i_block represent total blocks in 512 bytes
		 * 32 == size of vfs inode i_blocks * 8
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (blkbits - 9);
		upper_limit <<= blkbits;
	}

2614 2615 2616 2617 2618 2619
	/*
	 * 32-bit extent-start container, ee_block. We lower the maxbytes
	 * by one fs block, so ee_len can cover the extent of maximum file
	 * size
	 */
	res = (1LL << 32) - 1;
2620 2621 2622 2623 2624 2625 2626 2627
	res <<= blkbits;

	/* Sanity check against vm- & vfs- imposed limits */
	if (res > upper_limit)
		res = upper_limit;

	return res;
}
2628 2629

/*
2630
 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2631 2632
 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
 * We need to be 1 filesystem block less than the 2^48 sector limit.
2633
 */
2634
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2635
{
2636
	loff_t res = EXT4_NDIR_BLOCKS;
2637 2638
	int meta_blocks;
	loff_t upper_limit;
2639 2640 2641 2642 2643 2644
	/* This is calculated to be the largest file size for a dense, block
	 * mapped file such that the file's total number of 512-byte sectors,
	 * including data and all indirect blocks, does not exceed (2^48 - 1).
	 *
	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
	 * number of 512-byte sectors of the file.
2645 2646
	 */

2647
	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2648
		/*
2649
		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2650 2651
		 * the inode i_block field represents total file blocks in
		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2652 2653 2654 2655 2656 2657 2658
		 */
		upper_limit = (1LL << 32) - 1;

		/* total blocks in file system block size */
		upper_limit >>= (bits - 9);

	} else {
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
2659 2660 2661 2662 2663 2664
		/*
		 * We use 48 bit ext4_inode i_blocks
		 * With EXT4_HUGE_FILE_FL set the i_blocks
		 * represent total number of blocks in
		 * file system block size
		 */
2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
		upper_limit = (1LL << 48) - 1;

	}

	/* indirect blocks */
	meta_blocks = 1;
	/* double indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2));
	/* tripple indirect blocks */
	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));

	upper_limit -= meta_blocks;
	upper_limit <<= bits;
2678 2679 2680 2681 2682 2683 2684

	res += 1LL << (bits-2);
	res += 1LL << (2*(bits-2));
	res += 1LL << (3*(bits-2));
	res <<= bits;
	if (res > upper_limit)
		res = upper_limit;
2685 2686 2687 2688

	if (res > MAX_LFS_FILESIZE)
		res = MAX_LFS_FILESIZE;

2689 2690 2691
	return res;
}

2692
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2693
				   ext4_fsblk_t logical_sb_block, int nr)
2694
{
2695
	struct ext4_sb_info *sbi = EXT4_SB(sb);
2696
	ext4_group_t bg, first_meta_bg;
2697 2698 2699 2700
	int has_super = 0;

	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

2701
	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
2702
		return logical_sb_block + nr + 1;
2703
	bg = sbi->s_desc_per_block * nr;
2704
	if (ext4_bg_has_super(sb, bg))
2705
		has_super = 1;
2706

2707 2708 2709 2710 2711 2712 2713
	/*
	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
	 * compensate.
	 */
	if (sb->s_blocksize == 1024 && nr == 0 &&
2714
	    le32_to_cpu(sbi->s_es->s_first_data_block) == 0)
2715 2716
		has_super++;

2717
	return (has_super + ext4_group_first_block_no(sb, bg));
2718 2719
}

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
/**
 * ext4_get_stripe_size: Get the stripe size.
 * @sbi: In memory super block info
 *
 * If we have specified it via mount option, then
 * use the mount option value. If the value specified at mount time is
 * greater than the blocks per group use the super block value.
 * If the super block value is greater than blocks per group return 0.
 * Allocator needs it be less than blocks per group.
 *
 */
static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
{
	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
	unsigned long stripe_width =
			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2736
	int ret;
2737 2738

	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2739
		ret = sbi->s_stripe;
2740
	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
2741
		ret = stripe_width;
2742
	else if (stride && stride <= sbi->s_blocks_per_group)
2743 2744 2745
		ret = stride;
	else
		ret = 0;
2746

2747 2748 2749 2750 2751 2752
	/*
	 * If the stripe width is 1, this makes no sense and
	 * we set it to 0 to turn off stripe handling code.
	 */
	if (ret <= 1)
		ret = 0;
2753

2754
	return ret;
2755
}
2756

2757 2758 2759 2760 2761 2762 2763 2764
/*
 * Check whether this filesystem can be mounted based on
 * the features present and the RDONLY/RDWR mount requested.
 * Returns 1 if this filesystem can be mounted as requested,
 * 0 if it cannot be.
 */
static int ext4_feature_set_ok(struct super_block *sb, int readonly)
{
2765
	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
		ext4_msg(sb, KERN_ERR,
			"Couldn't mount because of "
			"unsupported optional features (%x)",
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
			~EXT4_FEATURE_INCOMPAT_SUPP));
		return 0;
	}

	if (readonly)
		return 1;

2777
	if (ext4_has_feature_readonly(sb)) {
2778
		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
2779
		sb->s_flags |= SB_RDONLY;
2780 2781 2782
		return 1;
	}

2783
	/* Check that feature set is OK for a read-write mount */
2784
	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
			 "unsupported optional features (%x)",
			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
				~EXT4_FEATURE_RO_COMPAT_SUPP));
		return 0;
	}
	/*
	 * Large file size enabled file system can only be mounted
	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
	 */
2795
	if (ext4_has_feature_huge_file(sb)) {
2796 2797 2798 2799 2800 2801 2802
		if (sizeof(blkcnt_t) < sizeof(u64)) {
			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
				 "cannot be mounted RDWR without "
				 "CONFIG_LBDAF");
			return 0;
		}
	}
2803
	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2804 2805 2806 2807 2808
		ext4_msg(sb, KERN_ERR,
			 "Can't support bigalloc feature without "
			 "extents feature\n");
		return 0;
	}
2809 2810

#ifndef CONFIG_QUOTA
2811
	if (ext4_has_feature_quota(sb) && !readonly) {
2812 2813 2814 2815 2816
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
Li Xi's avatar
Li Xi committed
2817 2818 2819 2820 2821 2822
	if (ext4_has_feature_project(sb) && !readonly) {
		ext4_msg(sb, KERN_ERR,
			 "Filesystem with project quota feature cannot be mounted RDWR "
			 "without CONFIG_QUOTA");
		return 0;
	}
2823
#endif  /* CONFIG_QUOTA */
2824 2825 2826
	return 1;
}

2827 2828 2829 2830
/*
 * This function is called once a day if we have errors logged
 * on the file system
 */
2831
static void print_daily_error_info(struct timer_list *t)
2832
{
2833 2834 2835
	struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
	struct super_block *sb = sbi->s_sb;
	struct ext4_super_block *es = sbi->s_es;
2836 2837

	if (es->s_error_count)
2838 2839
		/* fsck newer than v1.41.13 is needed to clean this condition. */
		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2840 2841
			 le32_to_cpu(es->s_error_count));
	if (es->s_first_error_time) {
2842
		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2843 2844 2845 2846 2847
		       sb->s_id, le32_to_cpu(es->s_first_error_time),
		       (int) sizeof(es->s_first_error_func),
		       es->s_first_error_func,
		       le32_to_cpu(es->s_first_error_line));
		if (es->s_first_error_ino)
2848
			printk(KERN_CONT ": inode %u",
2849 2850
			       le32_to_cpu(es->s_first_error_ino));
		if (es->s_first_error_block)
2851
			printk(KERN_CONT ": block %llu", (unsigned long long)
2852
			       le64_to_cpu(es->s_first_error_block));
2853
		printk(KERN_CONT "\n");
2854 2855
	}
	if (es->s_last_error_time) {
2856
		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2857 2858 2859 2860 2861
		       sb->s_id, le32_to_cpu(es->s_last_error_time),
		       (int) sizeof(es->s_last_error_func),
		       es->s_last_error_func,
		       le32_to_cpu(es->s_last_error_line));
		if (es->s_last_error_ino)
2862
			printk(KERN_CONT ": inode %u",
2863 2864
			       le32_to_cpu(es->s_last_error_ino));
		if (es->s_last_error_block)
2865
			printk(KERN_CONT ": block %llu", (unsigned long long)
2866
			       le64_to_cpu(es->s_last_error_block));
2867
		printk(KERN_CONT "\n");
2868 2869 2870 2871
	}
	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
}

2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
/* Find next suitable group and run ext4_init_inode_table */
static int ext4_run_li_request(struct ext4_li_request *elr)
{
	struct ext4_group_desc *gdp = NULL;
	ext4_group_t group, ngroups;
	struct super_block *sb;
	unsigned long timeout = 0;
	int ret = 0;

	sb = elr->lr_super;
	ngroups = EXT4_SB(sb)->s_groups_count;

	for (group = elr->lr_next_group; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp) {
			ret = 1;
			break;
		}

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

2895
	if (group >= ngroups)
2896 2897 2898 2899 2900 2901 2902
		ret = 1;

	if (!ret) {
		timeout = jiffies;
		ret = ext4_init_inode_table(sb, group,
					    elr->lr_timeout ? 0 : 1);
		if (elr->lr_timeout == 0) {
2903 2904
			timeout = (jiffies - timeout) *
				  elr->lr_sbi->s_li_wait_mult;
2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
			elr->lr_timeout = timeout;
		}
		elr->lr_next_sched = jiffies + elr->lr_timeout;
		elr->lr_next_group = group + 1;
	}
	return ret;
}

/*
 * Remove lr_request from the list_request and free the
2915
 * request structure. Should be called with li_list_mtx held
2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
 */
static void ext4_remove_li_request(struct ext4_li_request *elr)
{
	struct ext4_sb_info *sbi;

	if (!elr)
		return;

	sbi = elr->lr_sbi;

	list_del(&elr->lr_request);
	sbi->s_li_request = NULL;
	kfree(elr);
}

static void ext4_unregister_li_request(struct super_block *sb)
{
2933 2934 2935
	mutex_lock(&ext4_li_mtx);
	if (!ext4_li_info) {
		mutex_unlock(&ext4_li_mtx);
2936
		return;
2937
	}
2938 2939

	mutex_lock(&ext4_li_info->li_list_mtx);
2940
	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2941
	mutex_unlock(&ext4_li_info->li_list_mtx);
2942
	mutex_unlock(&ext4_li_mtx);
2943 2944
}

2945 2946
static struct task_struct *ext4_lazyinit_task;

2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960
/*
 * This is the function where ext4lazyinit thread lives. It walks
 * through the request list searching for next scheduled filesystem.
 * When such a fs is found, run the lazy initialization request
 * (ext4_rn_li_request) and keep track of the time spend in this
 * function. Based on that time we compute next schedule time of
 * the request. When walking through the list is complete, compute
 * next waking time and put itself into sleep.
 */
static int ext4_lazyinit_thread(void *arg)
{
	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
	struct list_head *pos, *n;
	struct ext4_li_request *elr;
2961
	unsigned long next_wakeup, cur;
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974

	BUG_ON(NULL == eli);

cont_thread:
	while (true) {
		next_wakeup = MAX_JIFFY_OFFSET;

		mutex_lock(&eli->li_list_mtx);
		if (list_empty(&eli->li_request_list)) {
			mutex_unlock(&eli->li_list_mtx);
			goto exit_thread;
		}
		list_for_each_safe(pos, n, &eli->li_request_list) {
2975 2976
			int err = 0;
			int progress = 0;
2977 2978 2979
			elr = list_entry(pos, struct ext4_li_request,
					 lr_request);

2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
			if (time_before(jiffies, elr->lr_next_sched)) {
				if (time_before(elr->lr_next_sched, next_wakeup))
					next_wakeup = elr->lr_next_sched;
				continue;
			}
			if (down_read_trylock(&elr->lr_super->s_umount)) {
				if (sb_start_write_trylock(elr->lr_super)) {
					progress = 1;
					/*
					 * We hold sb->s_umount, sb can not
					 * be removed from the list, it is
					 * now safe to drop li_list_mtx
					 */
					mutex_unlock(&eli->li_list_mtx);
					err = ext4_run_li_request(elr);
					sb_end_write(elr->lr_super);
					mutex_lock(&eli->li_list_mtx);
					n = pos->next;
2998
				}
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
				up_read((&elr->lr_super->s_umount));
			}
			/* error, remove the lazy_init job */
			if (err) {
				ext4_remove_li_request(elr);
				continue;
			}
			if (!progress) {
				elr->lr_next_sched = jiffies +
					(prandom_u32()
					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
3010 3011 3012 3013 3014 3015
			}
			if (time_before(elr->lr_next_sched, next_wakeup))
				next_wakeup = elr->lr_next_sched;
		}
		mutex_unlock(&eli->li_list_mtx);

3016
		try_to_freeze();
3017

3018 3019
		cur = jiffies;
		if ((time_after_eq(cur, next_wakeup)) ||
3020
		    (MAX_JIFFY_OFFSET == next_wakeup)) {
3021 3022 3023 3024
			cond_resched();
			continue;
		}

3025 3026
		schedule_timeout_interruptible(next_wakeup - cur);

3027 3028 3029 3030
		if (kthread_should_stop()) {
			ext4_clear_request_list();
			goto exit_thread;
		}
3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
	}

exit_thread:
	/*
	 * It looks like the request list is empty, but we need
	 * to check it under the li_list_mtx lock, to prevent any
	 * additions into it, and of course we should lock ext4_li_mtx
	 * to atomically free the list and ext4_li_info, because at
	 * this point another ext4 filesystem could be registering
	 * new one.
	 */
	mutex_lock(&ext4_li_mtx);
	mutex_lock(&eli->li_list_mtx);
	if (!list_empty(&eli->li_request_list)) {
		mutex_unlock(&eli->li_list_mtx);
		mutex_unlock(&ext4_li_mtx);
		goto cont_thread;
	}
	mutex_unlock(&eli->li_list_mtx);
	kfree(ext4_li_info);
	ext4_li_info = NULL;
	mutex_unlock(&ext4_li_mtx);

	return 0;
}

static void ext4_clear_request_list(void)
{
	struct list_head *pos, *n;
	struct ext4_li_request *elr;

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
		elr = list_entry(pos, struct ext4_li_request,
				 lr_request);
		ext4_remove_li_request(elr);
	}
	mutex_unlock(&ext4_li_info->li_list_mtx);
}

static int ext4_run_lazyinit_thread(void)
{
3073 3074 3075 3076
	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
					 ext4_li_info, "ext4lazyinit");
	if (IS_ERR(ext4_lazyinit_task)) {
		int err = PTR_ERR(ext4_lazyinit_task);
3077 3078 3079
		ext4_clear_request_list();
		kfree(ext4_li_info);
		ext4_li_info = NULL;
3080
		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148
				 "initialization thread\n",
				 err);
		return err;
	}
	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
	return 0;
}

/*
 * Check whether it make sense to run itable init. thread or not.
 * If there is at least one uninitialized inode table, return
 * corresponding group number, else the loop goes through all
 * groups and return total number of groups.
 */
static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
{
	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
	struct ext4_group_desc *gdp = NULL;

	for (group = 0; group < ngroups; group++) {
		gdp = ext4_get_group_desc(sb, group, NULL);
		if (!gdp)
			continue;

		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
			break;
	}

	return group;
}

static int ext4_li_info_new(void)
{
	struct ext4_lazy_init *eli = NULL;

	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
	if (!eli)
		return -ENOMEM;

	INIT_LIST_HEAD(&eli->li_request_list);
	mutex_init(&eli->li_list_mtx);

	eli->li_state |= EXT4_LAZYINIT_QUIT;

	ext4_li_info = eli;

	return 0;
}

static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
					    ext4_group_t start)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_li_request *elr;

	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
	if (!elr)
		return NULL;

	elr->lr_super = sb;
	elr->lr_sbi = sbi;
	elr->lr_next_group = start;

	/*
	 * Randomize first schedule time of the request to
	 * spread the inode table initialization requests
	 * better.
	 */
3149 3150
	elr->lr_next_sched = jiffies + (prandom_u32() %
				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3151 3152 3153
	return elr;
}

3154 3155
int ext4_register_li_request(struct super_block *sb,
			     ext4_group_t first_not_zeroed)
3156 3157
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
3158
	struct ext4_li_request *elr = NULL;
3159
	ext4_group_t ngroups = sbi->s_groups_count;
3160
	int ret = 0;
3161

3162
	mutex_lock(&ext4_li_mtx);
3163 3164 3165 3166 3167 3168
	if (sbi->s_li_request != NULL) {
		/*
		 * Reset timeout so it can be computed again, because
		 * s_li_wait_mult might have changed.
		 */
		sbi->s_li_request->lr_timeout = 0;
3169
		goto out;
3170
	}
3171

3172
	if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
3173
	    !test_opt(sb, INIT_INODE_TABLE))
3174
		goto out;
3175 3176

	elr = ext4_li_request_new(sb, first_not_zeroed);
3177 3178 3179 3180
	if (!elr) {
		ret = -ENOMEM;
		goto out;
	}
3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192

	if (NULL == ext4_li_info) {
		ret = ext4_li_info_new();
		if (ret)
			goto out;
	}

	mutex_lock(&ext4_li_info->li_list_mtx);
	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
	mutex_unlock(&ext4_li_info->li_list_mtx);

	sbi->s_li_request = elr;
3193 3194 3195 3196 3197 3198
	/*
	 * set elr to NULL here since it has been inserted to
	 * the request_list and the removal and free of it is
	 * handled by ext4_clear_request_list from now on.
	 */
	elr = NULL;
3199 3200 3201 3202 3203 3204 3205

	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
		ret = ext4_run_lazyinit_thread();
		if (ret)
			goto out;
	}
out:
3206 3207
	mutex_unlock(&ext4_li_mtx);
	if (ret)
3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221
		kfree(elr);
	return ret;
}

/*
 * We do not need to lock anything since this is called on
 * module unload.
 */
static void ext4_destroy_lazyinit_thread(void)
{
	/*
	 * If thread exited earlier
	 * there's nothing to be done.
	 */
3222
	if (!ext4_li_info || !ext4_lazyinit_task)
3223 3224
		return;

3225
	kthread_stop(ext4_lazyinit_task);
3226 3227
}

3228 3229 3230 3231 3232 3233
static int set_journal_csum_feature_set(struct super_block *sb)
{
	int ret = 1;
	int compat, incompat;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

3234
	if (ext4_has_metadata_csum(sb)) {
3235
		/* journal checksum v3 */
3236
		compat = 0;
3237
		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
3238 3239 3240 3241 3242 3243
	} else {
		/* journal checksum v1 */
		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
		incompat = 0;
	}

3244 3245 3246 3247
	jbd2_journal_clear_features(sbi->s_journal,
			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
			JBD2_FEATURE_INCOMPAT_CSUM_V2);
3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
				incompat);
	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
		ret = jbd2_journal_set_features(sbi->s_journal,
				compat, 0,
				incompat);
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
	} else {
3260 3261
		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3262 3263 3264 3265 3266
	}

	return ret;
}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290
/*
 * Note: calculating the overhead so we can be compatible with
 * historical BSD practice is quite difficult in the face of
 * clusters/bigalloc.  This is because multiple metadata blocks from
 * different block group can end up in the same allocation cluster.
 * Calculating the exact overhead in the face of clustered allocation
 * requires either O(all block bitmaps) in memory or O(number of block
 * groups**2) in time.  We will still calculate the superblock for
 * older file systems --- and if we come across with a bigalloc file
 * system with zero in s_overhead_clusters the estimate will be close to
 * correct especially for very large cluster sizes --- but for newer
 * file systems, it's better to calculate this figure once at mkfs
 * time, and store it in the superblock.  If the superblock value is
 * present (even for non-bigalloc file systems), we will use it.
 */
static int count_overhead(struct super_block *sb, ext4_group_t grp,
			  char *buf)
{
	struct ext4_sb_info	*sbi = EXT4_SB(sb);
	struct ext4_group_desc	*gdp;
	ext4_fsblk_t		first_block, last_block, b;
	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
	int			s, j, count = 0;

3291
	if (!ext4_has_feature_bigalloc(sb))
3292 3293 3294
		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
			sbi->s_itb_per_group + 2);

3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323
	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
		(grp * EXT4_BLOCKS_PER_GROUP(sb));
	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
	for (i = 0; i < ngroups; i++) {
		gdp = ext4_get_group_desc(sb, i, NULL);
		b = ext4_block_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_bitmap(sb, gdp);
		if (b >= first_block && b <= last_block) {
			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
			count++;
		}
		b = ext4_inode_table(sb, gdp);
		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
				int c = EXT4_B2C(sbi, b - first_block);
				ext4_set_bit(c, buf);
				count++;
			}
		if (i != grp)
			continue;
		s = 0;
		if (ext4_bg_has_super(sb, grp)) {
			ext4_set_bit(s++, buf);
			count++;
		}
3324 3325 3326 3327 3328
		j = ext4_bg_num_gdb(sb, grp);
		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
			ext4_error(sb, "Invalid number of block group "
				   "descriptor blocks: %d", j);
			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3329
		}
3330 3331 3332
		count += j;
		for (; j > 0; j--)
			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346
	}
	if (!count)
		return 0;
	return EXT4_CLUSTERS_PER_GROUP(sb) -
		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
}

/*
 * Compute the overhead and stash it in sbi->s_overhead
 */
int ext4_calculate_overhead(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
3347 3348
	struct inode *j_inode;
	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3349 3350
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
	ext4_fsblk_t overhead = 0;
3351
	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378

	if (!buf)
		return -ENOMEM;

	/*
	 * Compute the overhead (FS structures).  This is constant
	 * for a given filesystem unless the number of block groups
	 * changes so we cache the previous value until it does.
	 */

	/*
	 * All of the blocks before first_data_block are overhead
	 */
	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));

	/*
	 * Add the overhead found in each block group
	 */
	for (i = 0; i < ngroups; i++) {
		int blks;

		blks = count_overhead(sb, i, buf);
		overhead += blks;
		if (blks)
			memset(buf, 0, PAGE_SIZE);
		cond_resched();
	}
3379 3380 3381 3382 3383

	/*
	 * Add the internal journal blocks whether the journal has been
	 * loaded or not
	 */
3384
	if (sbi->s_journal && !sbi->journal_bdev)
3385
		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395
	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
		j_inode = ext4_get_journal_inode(sb, j_inum);
		if (j_inode) {
			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
			overhead += EXT4_NUM_B2C(sbi, j_blocks);
			iput(j_inode);
		} else {
			ext4_msg(sb, KERN_ERR, "can't get journal size");
		}
	}
3396 3397 3398 3399 3400 3401
	sbi->s_overhead = overhead;
	smp_wmb();
	free_page((unsigned long) buf);
	return 0;
}

3402
static void ext4_set_resv_clusters(struct super_block *sb)
Lukas Czerner's avatar
Lukas Czerner committed
3403 3404
{
	ext4_fsblk_t resv_clusters;
3405
	struct ext4_sb_info *sbi = EXT4_SB(sb);
Lukas Czerner's avatar
Lukas Czerner committed
3406

3407 3408 3409 3410 3411 3412
	/*
	 * There's no need to reserve anything when we aren't using extents.
	 * The space estimates are exact, there are no unwritten extents,
	 * hole punching doesn't need new metadata... This is needed especially
	 * to keep ext2/3 backward compatibility.
	 */
3413
	if (!ext4_has_feature_extents(sb))
3414
		return;
Lukas Czerner's avatar
Lukas Czerner committed
3415 3416 3417 3418
	/*
	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
	 * This should cover the situations where we can not afford to run
	 * out of space like for example punch hole, or converting
3419
	 * unwritten extents in delalloc path. In most cases such
Lukas Czerner's avatar
Lukas Czerner committed
3420 3421 3422
	 * allocation would require 1, or 2 blocks, higher numbers are
	 * very rare.
	 */
3423 3424
	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
			 sbi->s_cluster_bits);
Lukas Czerner's avatar
Lukas Czerner committed
3425 3426 3427 3428

	do_div(resv_clusters, 50);
	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);

3429
	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
Lukas Czerner's avatar
Lukas Czerner committed
3430 3431
}

3432
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3433
{
3434
	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
3435
	char *orig_data = kstrdup(data, GFP_KERNEL);
3436
	struct buffer_head *bh;
3437
	struct ext4_super_block *es = NULL;
3438
	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3439 3440
	ext4_fsblk_t block;
	ext4_fsblk_t sb_block = get_sb_block(&data);
3441
	ext4_fsblk_t logical_sb_block;
3442 3443 3444 3445
	unsigned long offset = 0;
	unsigned long journal_devnum = 0;
	unsigned long def_mount_opts;
	struct inode *root;
3446
	const char *descr;
3447
	int ret = -ENOMEM;
3448
	int blocksize, clustersize;
3449 3450
	unsigned int db_count;
	unsigned int i;
3451
	int needs_recovery, has_huge_files, has_bigalloc;
Laurent Vivier's avatar
Laurent Vivier committed
3452
	__u64 blocks_count;
3453
	int err = 0;
3454
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3455
	ext4_group_t first_not_zeroed;
3456

3457 3458
	if ((data && !orig_data) || !sbi)
		goto out_free_base;
3459

3460
	sbi->s_daxdev = dax_dev;
3461 3462
	sbi->s_blockgroup_lock =
		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
3463 3464 3465
	if (!sbi->s_blockgroup_lock)
		goto out_free_base;

3466
	sb->s_fs_info = sbi;
3467
	sbi->s_sb = sb;
3468
	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
Miklos Szeredi's avatar
Miklos Szeredi committed
3469
	sbi->s_sb_block = sb_block;
3470 3471 3472
	if (sb->s_bdev->bd_part)
		sbi->s_sectors_written_start =
			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3473

3474
	/* Cleanup superblock name */
3475
	strreplace(sb->s_id, '/', '!');
3476

3477
	/* -EINVAL is default */
3478
	ret = -EINVAL;
3479
	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3480
	if (!blocksize) {
3481
		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3482 3483 3484 3485
		goto out_fail;
	}

	/*
3486
	 * The ext4 superblock will not be buffer aligned for other than 1kB
3487 3488
	 * block sizes.  We need to calculate the offset from buffer start.
	 */
3489
	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
3490 3491
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3492
	} else {
3493
		logical_sb_block = sb_block;
3494 3495
	}

3496
	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3497
		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3498 3499 3500 3501
		goto out_fail;
	}
	/*
	 * Note: s_es must be initialized as soon as possible because
3502
	 *       some ext4 macro-instructions depend on its value
3503
	 */
3504
	es = (struct ext4_super_block *) (bh->b_data + offset);
3505 3506
	sbi->s_es = es;
	sb->s_magic = le16_to_cpu(es->s_magic);
3507 3508
	if (sb->s_magic != EXT4_SUPER_MAGIC)
		goto cantfind_ext4;
3509
	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3510

3511
	/* Warn if metadata_csum and gdt_csum are both set. */
3512 3513
	if (ext4_has_feature_metadata_csum(sb) &&
	    ext4_has_feature_gdt_csum(sb))
3514
		ext4_warning(sb, "metadata_csum and uninit_bg are "
3515 3516
			     "redundant flags; please run fsck.");

3517 3518 3519 3520 3521 3522 3523 3524
	/* Check for a known checksum algorithm */
	if (!ext4_verify_csum_type(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "unknown checksum algorithm.");
		silent = 1;
		goto cantfind_ext4;
	}

3525
	/* Load the checksum driver */
3526 3527 3528 3529 3530 3531
	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
	if (IS_ERR(sbi->s_chksum_driver)) {
		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
		ret = PTR_ERR(sbi->s_chksum_driver);
		sbi->s_chksum_driver = NULL;
		goto failed_mount;
3532 3533
	}

3534 3535 3536 3537 3538
	/* Check superblock checksum */
	if (!ext4_superblock_csum_verify(sb, es)) {
		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
			 "invalid superblock checksum.  Run e2fsck?");
		silent = 1;
3539
		ret = -EFSBADCRC;
3540 3541 3542 3543
		goto cantfind_ext4;
	}

	/* Precompute checksum seed for all metadata */
3544
	if (ext4_has_feature_csum_seed(sb))
3545
		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
3546
	else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
3547 3548 3549
		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
					       sizeof(es->s_uuid));

3550 3551
	/* Set defaults before we parse the mount options */
	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3552
	set_opt(sb, INIT_INODE_TABLE);
3553
	if (def_mount_opts & EXT4_DEFM_DEBUG)
3554
		set_opt(sb, DEBUG);
3555
	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3556
		set_opt(sb, GRPID);
3557
	if (def_mount_opts & EXT4_DEFM_UID16)
3558
		set_opt(sb, NO_UID32);
3559 3560
	/* xattr user namespace & acls are now defaulted on */
	set_opt(sb, XATTR_USER);
Theodore Ts'o's avatar
Theodore Ts'o committed
3561
#ifdef CONFIG_EXT4_FS_POSIX_ACL
3562
	set_opt(sb, POSIX_ACL);
3563
#endif
3564 3565 3566 3567
	/* don't forget to enable journal_csum when metadata_csum is enabled. */
	if (ext4_has_metadata_csum(sb))
		set_opt(sb, JOURNAL_CHECKSUM);

3568
	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3569
		set_opt(sb, JOURNAL_DATA);
3570
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3571
		set_opt(sb, ORDERED_DATA);
3572
	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3573
		set_opt(sb, WRITEBACK_DATA);
3574 3575

	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3576
		set_opt(sb, ERRORS_PANIC);
3577
	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3578
		set_opt(sb, ERRORS_CONT);
3579
	else
3580
		set_opt(sb, ERRORS_RO);
3581 3582
	/* block_validity enabled by default; disable with noblock_validity */
	set_opt(sb, BLOCK_VALIDITY);
3583
	if (def_mount_opts & EXT4_DEFM_DISCARD)
3584
		set_opt(sb, DISCARD);
3585

3586 3587
	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
3588 3589 3590
	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3591

3592
	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3593
		set_opt(sb, BARRIER);
3594

3595 3596 3597 3598
	/*
	 * enable delayed allocation by default
	 * Use -o nodelalloc to turn it off
	 */
3599
	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
3600
	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3601
		set_opt(sb, DELALLOC);
3602

3603 3604 3605 3606 3607 3608
	/*
	 * set default s_li_wait_mult for lazyinit, for the case there is
	 * no mount option specified.
	 */
	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;

3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621
	if (sbi->s_es->s_mount_opts[0]) {
		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
					      sizeof(sbi->s_es->s_mount_opts),
					      GFP_KERNEL);
		if (!s_mount_opts)
			goto failed_mount;
		if (!parse_options(s_mount_opts, sb, &journal_devnum,
				   &journal_ioprio, 0)) {
			ext4_msg(sb, KERN_WARNING,
				 "failed to parse options in superblock: %s",
				 s_mount_opts);
		}
		kfree(s_mount_opts);
3622
	}
3623
	sbi->s_def_mount_opt = sbi->s_mount_opt;
3624
	if (!parse_options((char *) data, sb, &journal_devnum,
3625
			   &journal_ioprio, 0))
3626 3627
		goto failed_mount;

3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
			    "with data=journal disables delayed "
			    "allocation and O_DIRECT support!\n");
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			goto failed_mount;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
3639
				 "both data=journal and dioread_nolock");
3640 3641
			goto failed_mount;
		}
Ross Zwisler's avatar
Ross Zwisler committed
3642 3643 3644 3645 3646
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			goto failed_mount;
		}
3647 3648 3649 3650 3651
		if (ext4_has_feature_encrypt(sb)) {
			ext4_msg(sb, KERN_WARNING,
				 "encrypted files will use data=ordered "
				 "instead of data journaling mode");
		}
3652 3653
		if (test_opt(sb, DELALLOC))
			clear_opt(sb, DELALLOC);
3654 3655
	} else {
		sb->s_iflags |= SB_I_CGROUPWB;
3656 3657
	}

3658 3659
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
3660

3661
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3662 3663 3664
	    (ext4_has_compat_features(sb) ||
	     ext4_has_ro_compat_features(sb) ||
	     ext4_has_incompat_features(sb)))
3665 3666 3667
		ext4_msg(sb, KERN_WARNING,
		       "feature flags set on rev 0 fs, "
		       "running e2fsck is recommended");
3668

3669 3670
	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
		set_opt2(sb, HURD_COMPAT);
3671
		if (ext4_has_feature_64bit(sb)) {
3672 3673 3674 3675
			ext4_msg(sb, KERN_ERR,
				 "The Hurd can't support 64-bit file systems");
			goto failed_mount;
		}
3676 3677 3678 3679 3680 3681 3682 3683 3684 3685

		/*
		 * ea_inode feature uses l_i_version field which is not
		 * available in HURD_COMPAT mode.
		 */
		if (ext4_has_feature_ea_inode(sb)) {
			ext4_msg(sb, KERN_ERR,
				 "ea_inode feature is not supported for Hurd");
			goto failed_mount;
		}
3686 3687
	}

3688 3689 3690 3691 3692
	if (IS_EXT2_SB(sb)) {
		if (ext2_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
				 "using the ext4 subsystem");
		else {
3693 3694 3695 3696 3697 3698
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext[34] filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

	if (IS_EXT3_SB(sb)) {
		if (ext3_feature_set_ok(sb))
			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
				 "using the ext4 subsystem");
		else {
3710 3711 3712 3713 3714 3715
			/*
			 * If we're probing be silent, if this looks like
			 * it's actually an ext4 filesystem.
			 */
			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
				goto failed_mount;
3716 3717 3718 3719 3720 3721
			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
				 "to feature incompatibilities");
			goto failed_mount;
		}
	}

3722 3723 3724 3725 3726
	/*
	 * Check feature flags regardless of the revision level, since we
	 * previously didn't change the revision level when setting the flags,
	 * so there is a chance incompat flags are set on a rev 0 filesystem.
	 */
3727
	if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
3728
		goto failed_mount;
3729

3730
	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3731 3732
	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3733
		ext4_msg(sb, KERN_ERR,
3734 3735 3736 3737 3738 3739 3740 3741 3742
		       "Unsupported filesystem blocksize %d (%d log_block_size)",
			 blocksize, le32_to_cpu(es->s_log_block_size));
		goto failed_mount;
	}
	if (le32_to_cpu(es->s_log_block_size) >
	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
		ext4_msg(sb, KERN_ERR,
			 "Invalid log block size: %u",
			 le32_to_cpu(es->s_log_block_size));
3743 3744 3745
		goto failed_mount;
	}

3746 3747 3748 3749 3750 3751 3752
	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
		ext4_msg(sb, KERN_ERR,
			 "Number of reserved GDT blocks insanely large: %d",
			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
		goto failed_mount;
	}

Ross Zwisler's avatar
Ross Zwisler committed
3753
	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
3754 3755 3756
		if (ext4_has_feature_inline_data(sb)) {
			ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
					" that may contain inline data");
3757
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
3758
		}
3759
		if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
3760 3761 3762 3763
			ext4_msg(sb, KERN_ERR,
				"DAX unsupported by block device. Turning off DAX.");
			sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
		}
Ross Zwisler's avatar
Ross Zwisler committed
3764 3765
	}

3766
	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
3767 3768 3769 3770 3771
		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
			 es->s_encryption_level);
		goto failed_mount;
	}

3772
	if (sb->s_blocksize != blocksize) {
3773 3774
		/* Validate the filesystem blocksize */
		if (!sb_set_blocksize(sb, blocksize)) {
3775
			ext4_msg(sb, KERN_ERR, "bad block size %d",
3776
					blocksize);
3777 3778 3779
			goto failed_mount;
		}

3780
		brelse(bh);
3781 3782
		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
		offset = do_div(logical_sb_block, blocksize);
3783
		bh = sb_bread_unmovable(sb, logical_sb_block);
3784
		if (!bh) {
3785 3786
			ext4_msg(sb, KERN_ERR,
			       "Can't read superblock on 2nd try");
3787 3788
			goto failed_mount;
		}
3789
		es = (struct ext4_super_block *)(bh->b_data + offset);
3790
		sbi->s_es = es;
3791
		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3792 3793
			ext4_msg(sb, KERN_ERR,
			       "Magic mismatch, very weird!");
3794 3795 3796 3797
			goto failed_mount;
		}
	}

3798
	has_huge_files = ext4_has_feature_huge_file(sb);
3799 3800 3801
	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
						      has_huge_files);
	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3802

3803 3804 3805
	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3806 3807 3808
	} else {
		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3809
		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
Vignesh Babu's avatar
Vignesh Babu committed
3810
		    (!is_power_of_2(sbi->s_inode_size)) ||
3811
		    (sbi->s_inode_size > blocksize)) {
3812 3813
			ext4_msg(sb, KERN_ERR,
			       "unsupported inode size: %d",
3814
			       sbi->s_inode_size);
3815 3816
			goto failed_mount;
		}
Kalpak Shah's avatar
Kalpak Shah committed
3817 3818
		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3819
	}
3820

3821
	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3822
	if (ext4_has_feature_64bit(sb)) {
3823
		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
3824
		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
vignesh babu's avatar
vignesh babu committed
3825
		    !is_power_of_2(sbi->s_desc_size)) {
3826 3827
			ext4_msg(sb, KERN_ERR,
			       "unsupported descriptor size %lu",
3828 3829 3830 3831 3832
			       sbi->s_desc_size);
			goto failed_mount;
		}
	} else
		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
3833

3834 3835
	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
3836

3837
	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3838
	if (sbi->s_inodes_per_block == 0)
3839
		goto cantfind_ext4;
3840 3841 3842 3843 3844 3845
	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
	    sbi->s_inodes_per_group > blocksize * 8) {
		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
			 sbi->s_blocks_per_group);
		goto failed_mount;
	}
3846 3847
	sbi->s_itb_per_group = sbi->s_inodes_per_group /
					sbi->s_inodes_per_block;
3848
	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3849 3850
	sbi->s_sbh = bh;
	sbi->s_mount_state = le16_to_cpu(es->s_state);
3851 3852
	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
3853

3854
	for (i = 0; i < 4; i++)
3855 3856
		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
	sbi->s_def_hash_version = es->s_def_hash_version;
3857
	if (ext4_has_feature_dir_index(sb)) {
3858 3859 3860 3861
		i = le32_to_cpu(es->s_flags);
		if (i & EXT2_FLAGS_UNSIGNED_HASH)
			sbi->s_hash_unsigned = 3;
		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3862
#ifdef __CHAR_UNSIGNED__
3863
			if (!sb_rdonly(sb))
3864 3865 3866
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
			sbi->s_hash_unsigned = 3;
3867
#else
3868
			if (!sb_rdonly(sb))
3869 3870
				es->s_flags |=
					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3871
#endif
3872
		}
3873
	}
3874

3875 3876
	/* Handle clustersize */
	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3877
	has_bigalloc = ext4_has_feature_bigalloc(sb);
3878 3879 3880 3881 3882 3883 3884
	if (has_bigalloc) {
		if (clustersize < blocksize) {
			ext4_msg(sb, KERN_ERR,
				 "cluster size (%d) smaller than "
				 "block size (%d)", clustersize, blocksize);
			goto failed_mount;
		}
3885 3886 3887 3888 3889 3890 3891
		if (le32_to_cpu(es->s_log_cluster_size) >
		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
			ext4_msg(sb, KERN_ERR,
				 "Invalid log cluster size: %u",
				 le32_to_cpu(es->s_log_cluster_size));
			goto failed_mount;
		}
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
			le32_to_cpu(es->s_log_block_size);
		sbi->s_clusters_per_group =
			le32_to_cpu(es->s_clusters_per_group);
		if (sbi->s_clusters_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#clusters per group too big: %lu",
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
		if (sbi->s_blocks_per_group !=
		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
				 "clusters per group (%lu) inconsistent",
				 sbi->s_blocks_per_group,
				 sbi->s_clusters_per_group);
			goto failed_mount;
		}
	} else {
		if (clustersize != blocksize) {
			ext4_warning(sb, "fragment/cluster size (%d) != "
				     "block size (%d)", clustersize,
				     blocksize);
			clustersize = blocksize;
		}
		if (sbi->s_blocks_per_group > blocksize * 8) {
			ext4_msg(sb, KERN_ERR,
				 "#blocks per group too big: %lu",
				 sbi->s_blocks_per_group);
			goto failed_mount;
		}
		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
		sbi->s_cluster_bits = 0;
3925
	}
3926 3927
	sbi->s_cluster_ratio = clustersize / blocksize;

3928 3929 3930 3931
	/* Do we have standard group size of clustersize * 8 blocks ? */
	if (sbi->s_blocks_per_group == clustersize << 3)
		set_opt2(sb, STD_GROUP_SIZE);

3932 3933 3934 3935
	/*
	 * Test whether we have more sectors than will fit in sector_t,
	 * and whether the max offset is addressable by the page cache.
	 */
3936
	err = generic_check_addressable(sb->s_blocksize_bits,
3937
					ext4_blocks_count(es));
3938
	if (err) {
3939
		ext4_msg(sb, KERN_ERR, "filesystem"
3940
			 " too large to mount safely on this system");
3941
		if (sizeof(sector_t) < 8)
3942
			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3943 3944 3945
		goto failed_mount;
	}

3946 3947
	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
		goto cantfind_ext4;
3948

3949 3950 3951
	/* check blocks count against device size */
	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3952 3953
		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
		       "exceeds size of device (%llu blocks)",
3954 3955 3956 3957
		       ext4_blocks_count(es), blocks_count);
		goto failed_mount;
	}

3958 3959 3960 3961 3962
	/*
	 * It makes no sense for the first data block to be beyond the end
	 * of the filesystem.
	 */
	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3963
		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3964 3965 3966
			 "block %u is beyond end of filesystem (%llu)",
			 le32_to_cpu(es->s_first_data_block),
			 ext4_blocks_count(es));
3967 3968
		goto failed_mount;
	}
Laurent Vivier's avatar
Laurent Vivier committed
3969 3970 3971 3972
	blocks_count = (ext4_blocks_count(es) -
			le32_to_cpu(es->s_first_data_block) +
			EXT4_BLOCKS_PER_GROUP(sb) - 1);
	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
3973
	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3974
		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
3975
		       "(block count %llu, first data block %u, "
3976
		       "blocks per group %lu)", sbi->s_groups_count,
3977 3978 3979 3980 3981
		       ext4_blocks_count(es),
		       le32_to_cpu(es->s_first_data_block),
		       EXT4_BLOCKS_PER_GROUP(sb));
		goto failed_mount;
	}
Laurent Vivier's avatar
Laurent Vivier committed
3982
	sbi->s_groups_count = blocks_count;
3983 3984
	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3985 3986
	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
		   EXT4_DESC_PER_BLOCK(sb);
3987
	if (ext4_has_feature_meta_bg(sb)) {
3988
		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
3989 3990 3991 3992 3993 3994 3995
			ext4_msg(sb, KERN_WARNING,
				 "first meta block group too large: %u "
				 "(group descriptor block count %u)",
				 le32_to_cpu(es->s_first_meta_bg), db_count);
			goto failed_mount;
		}
	}
3996
	sbi->s_group_desc = kvmalloc(db_count *
3997 3998
					  sizeof(struct buffer_head *),
					  GFP_KERNEL);
3999
	if (sbi->s_group_desc == NULL) {
4000
		ext4_msg(sb, KERN_ERR, "not enough memory");
4001
		ret = -ENOMEM;
4002 4003 4004
		goto failed_mount;
	}

4005
	bgl_lock_init(sbi->s_blockgroup_lock);
4006

4007 4008 4009 4010 4011 4012
	/* Pre-read the descriptors into the buffer cache */
	for (i = 0; i < db_count; i++) {
		block = descriptor_loc(sb, logical_sb_block, i);
		sb_breadahead(sb, block);
	}

4013
	for (i = 0; i < db_count; i++) {
4014
		block = descriptor_loc(sb, logical_sb_block, i);
4015
		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
4016
		if (!sbi->s_group_desc[i]) {
4017 4018
			ext4_msg(sb, KERN_ERR,
			       "can't read group descriptor %d", i);
4019 4020 4021 4022
			db_count = i;
			goto failed_mount2;
		}
	}
4023
	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4024
		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4025
		ret = -EFSCORRUPTED;
4026
		goto failed_mount2;
4027
	}
4028

4029
	sbi->s_gdb_count = db_count;
4030

4031
	timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4032

4033
	/* Register extent status tree shrinker */
4034
	if (ext4_es_register_shrinker(sbi))
4035 4036
		goto failed_mount3;

4037
	sbi->s_stripe = ext4_get_stripe_size(sbi);
4038
	sbi->s_extent_max_zeroout_kb = 32;
4039

4040 4041 4042
	/*
	 * set up enough so that it can read an inode
	 */
4043
	sb->s_op = &ext4_sops;
4044 4045
	sb->s_export_op = &ext4_export_ops;
	sb->s_xattr = ext4_xattr_handlers;
4046
#ifdef CONFIG_EXT4_FS_ENCRYPTION
4047
	sb->s_cop = &ext4_cryptops;
4048
#endif
4049
#ifdef CONFIG_QUOTA
4050
	sb->dq_op = &ext4_quota_operations;
4051
	if (ext4_has_feature_quota(sb))
4052
		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4053 4054
	else
		sb->s_qcop = &ext4_qctl_operations;
Li Xi's avatar
Li Xi committed
4055
	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4056
#endif
4057
	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
4058

4059
	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
4060
	mutex_init(&sbi->s_orphan_lock);
4061 4062 4063 4064

	sb->s_root = NULL;

	needs_recovery = (es->s_last_orphan != 0 ||
4065
			  ext4_has_feature_journal_needs_recovery(sb));
4066

4067
	if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
4068
		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
4069
			goto failed_mount3a;
4070

4071 4072 4073 4074
	/*
	 * The first inode we look at is the journal inode.  Don't try
	 * root first: it may be modified in the journal!
	 */
4075
	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4076 4077
		err = ext4_load_journal(sb, es, journal_devnum);
		if (err)
4078
			goto failed_mount3a;
4079
	} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
4080
		   ext4_has_feature_journal_needs_recovery(sb)) {
4081 4082
		ext4_msg(sb, KERN_ERR, "required journal recovery "
		       "suppressed and not mounted read-only");
4083
		goto failed_mount_wq;
4084
	} else {
4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109
		/* Nojournal mode, all journal mount options are illegal */
		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_checksum, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "journal_async_commit, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "commit=%lu, fs mounted w/o journal",
				 sbi->s_commit_interval / HZ);
			goto failed_mount_wq;
		}
		if (EXT4_MOUNT_DATA_FLAGS &
		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "data=, fs mounted w/o journal");
			goto failed_mount_wq;
		}
		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
		clear_opt(sb, JOURNAL_CHECKSUM);
4110
		clear_opt(sb, DATA_FLAGS);
4111 4112 4113
		sbi->s_journal = NULL;
		needs_recovery = 0;
		goto no_journal;
4114 4115
	}

4116
	if (ext4_has_feature_64bit(sb) &&
4117 4118
	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
				       JBD2_FEATURE_INCOMPAT_64BIT)) {
4119
		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
4120
		goto failed_mount_wq;
4121 4122
	}

4123 4124 4125 4126
	if (!set_journal_csum_feature_set(sb)) {
		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
			 "feature set");
		goto failed_mount_wq;
4127
	}
4128

4129 4130 4131 4132 4133
	/* We have now updated the journal if required, so we can
	 * validate the data journaling mode. */
	switch (test_opt(sb, DATA_FLAGS)) {
	case 0:
		/* No mode set, assume a default based on the journal
4134 4135 4136
		 * capabilities: ORDERED_DATA if the journal can
		 * cope, else JOURNAL_DATA
		 */
4137
		if (jbd2_journal_check_available_features
4138
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4139
			set_opt(sb, ORDERED_DATA);
4140 4141
			sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
		} else {
4142
			set_opt(sb, JOURNAL_DATA);
4143 4144
			sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
		}
4145 4146
		break;

4147 4148
	case EXT4_MOUNT_ORDERED_DATA:
	case EXT4_MOUNT_WRITEBACK_DATA:
4149 4150
		if (!jbd2_journal_check_available_features
		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4151 4152
			ext4_msg(sb, KERN_ERR, "Journal does not support "
			       "requested data journaling mode");
4153
			goto failed_mount_wq;
4154 4155 4156 4157
		}
	default:
		break;
	}
4158 4159 4160 4161 4162 4163 4164 4165

	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
		ext4_msg(sb, KERN_ERR, "can't mount with "
			"journal_async_commit in data=ordered mode");
		goto failed_mount_wq;
	}

4166
	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4167

Bobi Jam's avatar
Bobi Jam committed
4168 4169
	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;

4170
no_journal:
4171 4172 4173
	if (!test_opt(sb, NO_MBCACHE)) {
		sbi->s_ea_block_cache = ext4_xattr_create_cache();
		if (!sbi->s_ea_block_cache) {
4174
			ext4_msg(sb, KERN_ERR,
4175
				 "Failed to create ea_block_cache");
4176 4177
			goto failed_mount_wq;
		}
4178 4179 4180 4181 4182 4183 4184 4185 4186

		if (ext4_has_feature_ea_inode(sb)) {
			sbi->s_ea_inode_cache = ext4_xattr_create_cache();
			if (!sbi->s_ea_inode_cache) {
				ext4_msg(sb, KERN_ERR,
					 "Failed to create ea_inode_cache");
				goto failed_mount_wq;
			}
		}
4187 4188
	}

4189
	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
4190
	    (blocksize != PAGE_SIZE)) {
4191 4192 4193 4194 4195
		ext4_msg(sb, KERN_ERR,
			 "Unsupported blocksize for fs encryption");
		goto failed_mount_wq;
	}

4196
	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
4197 4198
	    !ext4_has_feature_encrypt(sb)) {
		ext4_set_feature_encrypt(sb);
4199 4200 4201
		ext4_commit_super(sb, 1);
	}

4202 4203 4204 4205 4206 4207 4208
	/*
	 * Get the # of file system overhead blocks from the
	 * superblock if present.
	 */
	if (es->s_overhead_clusters)
		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
	else {
4209 4210
		err = ext4_calculate_overhead(sb);
		if (err)
4211 4212 4213
			goto failed_mount_wq;
	}

Tejun Heo's avatar
Tejun Heo committed
4214 4215 4216 4217
	/*
	 * The maximum number of concurrent works can be high and
	 * concurrency isn't really necessary.  Limit it to 1.
	 */
4218 4219 4220 4221
	EXT4_SB(sb)->rsv_conversion_wq =
		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
	if (!EXT4_SB(sb)->rsv_conversion_wq) {
		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
4222
		ret = -ENOMEM;
4223 4224 4225
		goto failed_mount4;
	}

4226
	/*
4227
	 * The jbd2_journal_load will have done any necessary log recovery,
4228 4229 4230
	 * so we can safely mount the rest of the filesystem now.
	 */

4231 4232
	root = ext4_iget(sb, EXT4_ROOT_INO);
	if (IS_ERR(root)) {
4233
		ext4_msg(sb, KERN_ERR, "get root inode failed");
4234
		ret = PTR_ERR(root);
4235
		root = NULL;
4236 4237 4238
		goto failed_mount4;
	}
	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4239
		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
Al Viro's avatar
Al Viro committed
4240
		iput(root);
4241 4242
		goto failed_mount4;
	}
4243
	sb->s_root = d_make_root(root);
4244
	if (!sb->s_root) {
4245
		ext4_msg(sb, KERN_ERR, "get root dentry failed");
4246 4247 4248
		ret = -ENOMEM;
		goto failed_mount4;
	}
4249

4250 4251
	ret = ext4_setup_super(sb, es, sb_rdonly(sb));
	if (ret == -EROFS) {
4252
		sb->s_flags |= SB_RDONLY;
4253 4254 4255
		ret = 0;
	} else if (ret)
		goto failed_mount4a;
Kalpak Shah's avatar
Kalpak Shah committed
4256 4257

	/* determine the minimum size of new large inodes, if present */
4258 4259
	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
	    sbi->s_want_extra_isize == 0) {
Kalpak Shah's avatar
Kalpak Shah committed
4260 4261
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						     EXT4_GOOD_OLD_INODE_SIZE;
4262
		if (ext4_has_feature_extra_isize(sb)) {
Kalpak Shah's avatar
Kalpak Shah committed
4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_want_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_want_extra_isize);
			if (sbi->s_want_extra_isize <
			    le16_to_cpu(es->s_min_extra_isize))
				sbi->s_want_extra_isize =
					le16_to_cpu(es->s_min_extra_isize);
		}
	}
	/* Check if enough inode space is available */
	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
							sbi->s_inode_size) {
		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
						       EXT4_GOOD_OLD_INODE_SIZE;
4278 4279
		ext4_msg(sb, KERN_INFO, "required extra inode space not"
			 "available");
Kalpak Shah's avatar
Kalpak Shah committed
4280 4281
	}

4282
	ext4_set_resv_clusters(sb);
Lukas Czerner's avatar
Lukas Czerner committed
4283

4284 4285
	err = ext4_setup_system_zone(sb);
	if (err) {
4286
		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4287
			 "zone (%d)", err);
4288 4289 4290 4291 4292 4293 4294 4295
		goto failed_mount4a;
	}

	ext4_ext_init(sb);
	err = ext4_mb_init(sb);
	if (err) {
		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
			 err);
4296
		goto failed_mount5;
4297 4298
	}

4299 4300 4301
	block = ext4_count_free_clusters(sb);
	ext4_free_blocks_count_set(sbi->s_es, 
				   EXT4_C2B(sbi, block));
4302 4303
	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
				  GFP_KERNEL);
4304 4305 4306
	if (!err) {
		unsigned long freei = ext4_count_free_inodes(sb);
		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4307 4308
		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
					  GFP_KERNEL);
4309 4310 4311
	}
	if (!err)
		err = percpu_counter_init(&sbi->s_dirs_counter,
4312
					  ext4_count_dirs(sb), GFP_KERNEL);
4313
	if (!err)
4314 4315
		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
					  GFP_KERNEL);
4316 4317 4318
	if (!err)
		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);

4319 4320 4321 4322 4323
	if (err) {
		ext4_msg(sb, KERN_ERR, "insufficient memory");
		goto failed_mount6;
	}

4324
	if (ext4_has_feature_flex_bg(sb))
4325 4326 4327 4328 4329 4330 4331
		if (!ext4_fill_flex_info(sb)) {
			ext4_msg(sb, KERN_ERR,
			       "unable to initialize "
			       "flex_bg meta info!");
			goto failed_mount6;
		}

4332 4333
	err = ext4_register_li_request(sb, first_not_zeroed);
	if (err)
4334
		goto failed_mount6;
4335

4336
	err = ext4_register_sysfs(sb);
4337 4338
	if (err)
		goto failed_mount7;
Theodore Ts'o's avatar
Theodore Ts'o committed
4339

4340 4341
#ifdef CONFIG_QUOTA
	/* Enable quota usage during mount. */
4342
	if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
4343 4344 4345 4346 4347 4348
		err = ext4_enable_quotas(sb);
		if (err)
			goto failed_mount8;
	}
#endif  /* CONFIG_QUOTA */

4349 4350 4351
	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
	ext4_orphan_cleanup(sb, es);
	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
4352
	if (needs_recovery) {
4353
		ext4_msg(sb, KERN_INFO, "recovery complete");
4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365
		ext4_mark_recovery_complete(sb, es);
	}
	if (EXT4_SB(sb)->s_journal) {
		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
			descr = " journalled data mode";
		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
			descr = " ordered data mode";
		else
			descr = " writeback data mode";
	} else
		descr = "out journal";

4366 4367 4368 4369 4370 4371 4372 4373
	if (test_opt(sb, DISCARD)) {
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		if (!blk_queue_discard(q))
			ext4_msg(sb, KERN_WARNING,
				 "mounting with \"discard\" option, but "
				 "the device does not support discard");
	}

4374 4375
	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
4376 4377 4378
			 "Opts: %.*s%s%s", descr,
			 (int) sizeof(sbi->s_es->s_mount_opts),
			 sbi->s_es->s_mount_opts,
4379
			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4380

4381 4382
	if (es->s_error_count)
		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4383

4384 4385 4386 4387 4388
	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);

4389
	kfree(orig_data);
4390 4391
	return 0;

4392
cantfind_ext4:
4393
	if (!silent)
4394
		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4395 4396
	goto failed_mount;

4397 4398
#ifdef CONFIG_QUOTA
failed_mount8:
4399
	ext4_unregister_sysfs(sb);
4400
#endif
4401 4402 4403
failed_mount7:
	ext4_unregister_li_request(sb);
failed_mount6:
4404
	ext4_mb_release(sb);
4405
	if (sbi->s_flex_groups)
Al Viro's avatar
Al Viro committed
4406
		kvfree(sbi->s_flex_groups);
4407 4408 4409 4410
	percpu_counter_destroy(&sbi->s_freeclusters_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4411
failed_mount5:
4412 4413 4414
	ext4_ext_release(sb);
	ext4_release_system_zone(sb);
failed_mount4a:
Al Viro's avatar
Al Viro committed
4415
	dput(sb->s_root);
4416
	sb->s_root = NULL;
Al Viro's avatar
Al Viro committed
4417
failed_mount4:
4418
	ext4_msg(sb, KERN_ERR, "mount failed");
4419 4420
	if (EXT4_SB(sb)->rsv_conversion_wq)
		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4421
failed_mount_wq:
4422 4423 4424 4425
	if (sbi->s_ea_inode_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
		sbi->s_ea_inode_cache = NULL;
	}
4426 4427 4428
	if (sbi->s_ea_block_cache) {
		ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
		sbi->s_ea_block_cache = NULL;
Jan Kara's avatar
Jan Kara committed
4429
	}
4430 4431 4432 4433
	if (sbi->s_journal) {
		jbd2_journal_destroy(sbi->s_journal);
		sbi->s_journal = NULL;
	}
4434
failed_mount3a:
4435
	ext4_es_unregister_shrinker(sbi);
4436
failed_mount3:
4437
	del_timer_sync(&sbi->s_err_report);
4438 4439
	if (sbi->s_mmp_tsk)
		kthread_stop(sbi->s_mmp_tsk);
4440 4441 4442
failed_mount2:
	for (i = 0; i < db_count; i++)
		brelse(sbi->s_group_desc[i]);
Al Viro's avatar
Al Viro committed
4443
	kvfree(sbi->s_group_desc);
4444
failed_mount:
4445 4446
	if (sbi->s_chksum_driver)
		crypto_free_shash(sbi->s_chksum_driver);
4447
#ifdef CONFIG_QUOTA
Jan Kara's avatar
Jan Kara committed
4448
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4449 4450
		kfree(sbi->s_qf_names[i]);
#endif
4451
	ext4_blkdev_remove(sbi);
4452 4453 4454
	brelse(bh);
out_fail:
	sb->s_fs_info = NULL;
4455
	kfree(sbi->s_blockgroup_lock);
4456
out_free_base:
4457
	kfree(sbi);
4458
	kfree(orig_data);
4459
	fs_put_dax(dax_dev);
4460
	return err ? err : ret;
4461 4462 4463 4464 4465 4466 4467
}

/*
 * Setup any per-fs journal parameters now.  We'll do this both on
 * initial mount, once the journal has been initialised but before we've
 * done any recovery; and again on any subsequent remount.
 */
4468
static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4469
{
4470
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4471

4472 4473 4474
	journal->j_commit_interval = sbi->s_commit_interval;
	journal->j_min_batch_time = sbi->s_min_batch_time;
	journal->j_max_batch_time = sbi->s_max_batch_time;
4475

4476
	write_lock(&journal->j_state_lock);
4477
	if (test_opt(sb, BARRIER))
4478
		journal->j_flags |= JBD2_BARRIER;
4479
	else
4480
		journal->j_flags &= ~JBD2_BARRIER;
4481 4482 4483 4484
	if (test_opt(sb, DATA_ERR_ABORT))
		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
	else
		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4485
	write_unlock(&journal->j_state_lock);
4486 4487
}

4488 4489
static struct inode *ext4_get_journal_inode(struct super_block *sb,
					     unsigned int journal_inum)
4490 4491 4492
{
	struct inode *journal_inode;

4493 4494 4495 4496 4497
	/*
	 * Test for the existence of a valid inode on disk.  Bad things
	 * happen if we iget() an unused inode, as the subsequent iput()
	 * will try to delete it.
	 */
4498 4499
	journal_inode = ext4_iget(sb, journal_inum);
	if (IS_ERR(journal_inode)) {
4500
		ext4_msg(sb, KERN_ERR, "no journal found");
4501 4502 4503 4504 4505
		return NULL;
	}
	if (!journal_inode->i_nlink) {
		make_bad_inode(journal_inode);
		iput(journal_inode);
4506
		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4507 4508 4509
		return NULL;
	}

4510
	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4511
		  journal_inode, journal_inode->i_size);
4512
	if (!S_ISREG(journal_inode->i_mode)) {
4513
		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4514 4515 4516
		iput(journal_inode);
		return NULL;
	}
4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530
	return journal_inode;
}

static journal_t *ext4_get_journal(struct super_block *sb,
				   unsigned int journal_inum)
{
	struct inode *journal_inode;
	journal_t *journal;

	BUG_ON(!ext4_has_feature_journal(sb));

	journal_inode = ext4_get_journal_inode(sb, journal_inum);
	if (!journal_inode)
		return NULL;
4531

4532
	journal = jbd2_journal_init_inode(journal_inode);
4533
	if (!journal) {
4534
		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4535 4536 4537 4538
		iput(journal_inode);
		return NULL;
	}
	journal->j_private = sb;
4539
	ext4_init_journal_params(sb, journal);
4540 4541 4542
	return journal;
}

4543
static journal_t *ext4_get_dev_journal(struct super_block *sb,
4544 4545
				       dev_t j_dev)
{
4546
	struct buffer_head *bh;
4547
	journal_t *journal;
4548 4549
	ext4_fsblk_t start;
	ext4_fsblk_t len;
4550
	int hblock, blocksize;
4551
	ext4_fsblk_t sb_block;
4552
	unsigned long offset;
4553
	struct ext4_super_block *es;
4554 4555
	struct block_device *bdev;

4556
	BUG_ON(!ext4_has_feature_journal(sb));
4557

4558
	bdev = ext4_blkdev_get(j_dev, sb);
4559 4560 4561 4562
	if (bdev == NULL)
		return NULL;

	blocksize = sb->s_blocksize;
4563
	hblock = bdev_logical_block_size(bdev);
4564
	if (blocksize < hblock) {
4565 4566
		ext4_msg(sb, KERN_ERR,
			"blocksize too small for journal device");
4567 4568 4569
		goto out_bdev;
	}

4570 4571
	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4572 4573
	set_blocksize(bdev, blocksize);
	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4574 4575
		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
		       "external journal");
4576 4577 4578
		goto out_bdev;
	}

4579
	es = (struct ext4_super_block *) (bh->b_data + offset);
4580
	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4581
	    !(le32_to_cpu(es->s_feature_incompat) &
4582
	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4583 4584
		ext4_msg(sb, KERN_ERR, "external journal has "
					"bad superblock");
4585 4586 4587 4588
		brelse(bh);
		goto out_bdev;
	}

4589 4590 4591 4592 4593 4594 4595 4596 4597
	if ((le32_to_cpu(es->s_feature_ro_compat) &
	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
	    es->s_checksum != ext4_superblock_csum(sb, es)) {
		ext4_msg(sb, KERN_ERR, "external journal has "
				       "corrupt superblock");
		brelse(bh);
		goto out_bdev;
	}

4598
	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4599
		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4600 4601 4602 4603
		brelse(bh);
		goto out_bdev;
	}

Laurent Vivier's avatar
Laurent Vivier committed
4604
	len = ext4_blocks_count(es);
4605 4606 4607
	start = sb_block + 1;
	brelse(bh);	/* we're done with the superblock */

4608
	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4609 4610
					start, len, blocksize);
	if (!journal) {
4611
		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4612 4613 4614
		goto out_bdev;
	}
	journal->j_private = sb;
4615
	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4616 4617
	wait_on_buffer(journal->j_sb_buffer);
	if (!buffer_uptodate(journal->j_sb_buffer)) {
4618
		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4619 4620 4621
		goto out_journal;
	}
	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4622 4623
		ext4_msg(sb, KERN_ERR, "External journal has more than one "
					"user (unsupported) - %d",
4624 4625 4626
			be32_to_cpu(journal->j_superblock->s_nr_users));
		goto out_journal;
	}
4627 4628
	EXT4_SB(sb)->journal_bdev = bdev;
	ext4_init_journal_params(sb, journal);
4629
	return journal;
4630

4631
out_journal:
4632
	jbd2_journal_destroy(journal);
4633
out_bdev:
4634
	ext4_blkdev_put(bdev);
4635 4636 4637
	return NULL;
}

4638 4639
static int ext4_load_journal(struct super_block *sb,
			     struct ext4_super_block *es,
4640 4641 4642 4643 4644 4645 4646 4647
			     unsigned long journal_devnum)
{
	journal_t *journal;
	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
	dev_t journal_dev;
	int err = 0;
	int really_read_only;

4648
	BUG_ON(!ext4_has_feature_journal(sb));
4649

4650 4651
	if (journal_devnum &&
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4652 4653
		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
			"numbers have changed");
4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664
		journal_dev = new_decode_dev(journal_devnum);
	} else
		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));

	really_read_only = bdev_read_only(sb->s_bdev);

	/*
	 * Are we loading a blank journal or performing recovery after a
	 * crash?  For recovery, we need to check in advance whether we
	 * can get read-write access to the device.
	 */
4665
	if (ext4_has_feature_journal_needs_recovery(sb)) {
4666
		if (sb_rdonly(sb)) {
4667 4668
			ext4_msg(sb, KERN_INFO, "INFO: recovery "
					"required on readonly filesystem");
4669
			if (really_read_only) {
4670
				ext4_msg(sb, KERN_ERR, "write access "
4671 4672
					"unavailable, cannot proceed "
					"(try mounting with noload)");
4673 4674
				return -EROFS;
			}
4675 4676
			ext4_msg(sb, KERN_INFO, "write access will "
			       "be enabled during recovery");
4677 4678 4679 4680
		}
	}

	if (journal_inum && journal_dev) {
4681 4682
		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
		       "and inode journals!");
4683 4684 4685 4686
		return -EINVAL;
	}

	if (journal_inum) {
4687
		if (!(journal = ext4_get_journal(sb, journal_inum)))
4688 4689
			return -EINVAL;
	} else {
4690
		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4691 4692 4693
			return -EINVAL;
	}

4694
	if (!(journal->j_flags & JBD2_BARRIER))
4695
		ext4_msg(sb, KERN_INFO, "barriers disabled");
4696

4697
	if (!ext4_has_feature_journal_needs_recovery(sb))
4698
		err = jbd2_journal_wipe(journal, !really_read_only);
4699 4700 4701 4702 4703
	if (!err) {
		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
		if (save)
			memcpy(save, ((char *) es) +
			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4704
		err = jbd2_journal_load(journal);
4705 4706 4707 4708 4709
		if (save)
			memcpy(((char *) es) + EXT4_S_ERR_START,
			       save, EXT4_S_ERR_LEN);
		kfree(save);
	}
4710 4711

	if (err) {
4712
		ext4_msg(sb, KERN_ERR, "error loading journal");
4713
		jbd2_journal_destroy(journal);
4714 4715 4716
		return err;
	}

4717 4718
	EXT4_SB(sb)->s_journal = journal;
	ext4_clear_journal_err(sb, es);
4719

4720
	if (!really_read_only && journal_devnum &&
4721 4722 4723 4724
	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
		es->s_journal_dev = cpu_to_le32(journal_devnum);

		/* Make sure we flush the recovery flag to disk. */
4725
		ext4_commit_super(sb, 1);
4726 4727 4728 4729 4730
	}

	return 0;
}

4731
static int ext4_commit_super(struct super_block *sb, int sync)
4732
{
4733
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4734
	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4735
	int error = 0;
4736

4737
	if (!sbh || block_device_ejected(sb))
4738
		return error;
4739 4740 4741 4742 4743 4744 4745 4746 4747 4748
	/*
	 * If the file system is mounted read-only, don't update the
	 * superblock write time.  This avoids updating the superblock
	 * write time when we are mounting the root file system
	 * read/only but we need to replay the journal; at that point,
	 * for people who are east of GMT and who make their clock
	 * tick in localtime for Windows bug-for-bug compatibility,
	 * the clock is set in the future, and this will cause e2fsck
	 * to complain and force a full file system check.
	 */
4749
	if (!(sb->s_flags & SB_RDONLY))
4750
		es->s_wtime = cpu_to_le32(get_seconds());
4751 4752 4753
	if (sb->s_bdev->bd_part)
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4754 4755
			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4756 4757 4758
	else
		es->s_kbytes_written =
			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4759 4760
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
		ext4_free_blocks_count_set(es,
4761 4762
			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
				&EXT4_SB(sb)->s_freeclusters_counter)));
4763 4764 4765
	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
		es->s_free_inodes_count =
			cpu_to_le32(percpu_counter_sum_positive(
4766
				&EXT4_SB(sb)->s_freeinodes_counter));
4767
	BUFFER_TRACE(sbh, "marking dirty");
4768
	ext4_superblock_csum_set(sb);
4769 4770
	if (sync)
		lock_buffer(sbh);
4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784
	if (buffer_write_io_error(sbh)) {
		/*
		 * Oh, dear.  A previous attempt to write the
		 * superblock failed.  This could happen because the
		 * USB device was yanked out.  Or it could happen to
		 * be a transient write error and maybe the block will
		 * be remapped.  Nothing we can do but to retry the
		 * write and hope for the best.
		 */
		ext4_msg(sb, KERN_ERR, "previous I/O error to "
		       "superblock detected");
		clear_buffer_write_io_error(sbh);
		set_buffer_uptodate(sbh);
	}
4785
	mark_buffer_dirty(sbh);
4786
	if (sync) {
4787
		unlock_buffer(sbh);
4788
		error = __sync_dirty_buffer(sbh,
4789
			REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
4790
		if (buffer_write_io_error(sbh)) {
4791 4792
			ext4_msg(sb, KERN_ERR, "I/O error while writing "
			       "superblock");
4793 4794 4795 4796
			clear_buffer_write_io_error(sbh);
			set_buffer_uptodate(sbh);
		}
	}
4797
	return error;
4798 4799 4800 4801 4802 4803 4804
}

/*
 * Have we just finished recovery?  If so, and if we are mounting (or
 * remounting) the filesystem readonly, then we will end up with a
 * consistent fs on disk.  Record that fact.
 */
4805 4806
static void ext4_mark_recovery_complete(struct super_block *sb,
					struct ext4_super_block *es)
4807
{
4808
	journal_t *journal = EXT4_SB(sb)->s_journal;
4809

4810
	if (!ext4_has_feature_journal(sb)) {
4811 4812 4813
		BUG_ON(journal != NULL);
		return;
	}
4814
	jbd2_journal_lock_updates(journal);
4815 4816 4817
	if (jbd2_journal_flush(journal) < 0)
		goto out;

4818
	if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
4819
		ext4_clear_feature_journal_needs_recovery(sb);
4820
		ext4_commit_super(sb, 1);
4821
	}
4822 4823

out:
4824
	jbd2_journal_unlock_updates(journal);
4825 4826 4827 4828 4829 4830 4831
}

/*
 * If we are mounting (or read-write remounting) a filesystem whose journal
 * has recorded an error from a previous lifetime, move that error to the
 * main filesystem now.
 */
4832 4833
static void ext4_clear_journal_err(struct super_block *sb,
				   struct ext4_super_block *es)
4834 4835 4836 4837 4838
{
	journal_t *journal;
	int j_errno;
	const char *errstr;

4839
	BUG_ON(!ext4_has_feature_journal(sb));
4840

4841
	journal = EXT4_SB(sb)->s_journal;
4842 4843 4844

	/*
	 * Now check for any error status which may have been recorded in the
4845
	 * journal by a prior ext4_error() or ext4_abort()
4846 4847
	 */

4848
	j_errno = jbd2_journal_errno(journal);
4849 4850 4851
	if (j_errno) {
		char nbuf[16];

4852
		errstr = ext4_decode_error(sb, j_errno, nbuf);
4853
		ext4_warning(sb, "Filesystem error recorded "
4854
			     "from previous mount: %s", errstr);
4855
		ext4_warning(sb, "Marking fs in need of filesystem check.");
4856

4857 4858
		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4859
		ext4_commit_super(sb, 1);
4860

4861
		jbd2_journal_clear_err(journal);
4862
		jbd2_journal_update_sb_errno(journal);
4863 4864 4865 4866 4867 4868 4869
	}
}

/*
 * Force the running and committing transactions to commit,
 * and wait on the commit.
 */
4870
int ext4_force_commit(struct super_block *sb)
4871 4872 4873
{
	journal_t *journal;

4874
	if (sb_rdonly(sb))
4875 4876
		return 0;

4877
	journal = EXT4_SB(sb)->s_journal;
4878
	return ext4_journal_force_commit(journal);
4879 4880
}

4881
static int ext4_sync_fs(struct super_block *sb, int wait)
4882
{
4883
	int ret = 0;
4884
	tid_t target;
4885
	bool needs_barrier = false;
4886
	struct ext4_sb_info *sbi = EXT4_SB(sb);
4887

4888
	if (unlikely(ext4_forced_shutdown(sbi)))
4889 4890
		return 0;

4891
	trace_ext4_sync_fs(sb, wait);
4892
	flush_workqueue(sbi->rsv_conversion_wq);
4893 4894 4895 4896 4897
	/*
	 * Writeback quota in non-journalled quota case - journalled quota has
	 * no dirty dquots
	 */
	dquot_writeback_dquots(sb, -1);
4898 4899 4900 4901 4902
	/*
	 * Data writeback is possible w/o journal transaction, so barrier must
	 * being sent at the end of the function. But we can skip it if
	 * transaction_commit will do it for us.
	 */
4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914
	if (sbi->s_journal) {
		target = jbd2_get_latest_transaction(sbi->s_journal);
		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
			needs_barrier = true;

		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
			if (wait)
				ret = jbd2_log_wait_commit(sbi->s_journal,
							   target);
		}
	} else if (wait && test_opt(sb, BARRIER))
4915 4916 4917 4918 4919 4920
		needs_barrier = true;
	if (needs_barrier) {
		int err;
		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
		if (!ret)
			ret = err;
4921
	}
4922 4923 4924 4925

	return ret;
}

4926 4927 4928
/*
 * LVM calls this function before a (read-only) snapshot is created.  This
 * gives us a chance to flush the journal completely and mark the fs clean.
4929 4930
 *
 * Note that only this function cannot bring a filesystem to be in a clean
4931 4932
 * state independently. It relies on upper layer to stop all data & metadata
 * modifications.
4933
 */
4934
static int ext4_freeze(struct super_block *sb)
4935
{
4936 4937
	int error = 0;
	journal_t *journal;
4938

4939
	if (sb_rdonly(sb))
4940
		return 0;
4941

4942
	journal = EXT4_SB(sb)->s_journal;
4943

4944 4945 4946
	if (journal) {
		/* Now we set up the journal barrier. */
		jbd2_journal_lock_updates(journal);
4947

4948 4949 4950 4951 4952 4953 4954
		/*
		 * Don't clear the needs_recovery flag if we failed to
		 * flush the journal.
		 */
		error = jbd2_journal_flush(journal);
		if (error < 0)
			goto out;
4955 4956

		/* Journal blocked and flushed, clear needs_recovery flag. */
4957
		ext4_clear_feature_journal_needs_recovery(sb);
4958
	}
4959 4960

	error = ext4_commit_super(sb, 1);
4961
out:
4962 4963 4964
	if (journal)
		/* we rely on upper layer to stop further updates */
		jbd2_journal_unlock_updates(journal);
4965
	return error;
4966 4967 4968 4969 4970 4971
}

/*
 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
 * flag here, even though the filesystem is not technically dirty yet.
 */
4972
static int ext4_unfreeze(struct super_block *sb)
4973
{
4974
	if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
4975 4976
		return 0;

4977 4978
	if (EXT4_SB(sb)->s_journal) {
		/* Reset the needs_recovery flag before the fs is unlocked. */
4979
		ext4_set_feature_journal_needs_recovery(sb);
4980 4981
	}

4982
	ext4_commit_super(sb, 1);
4983
	return 0;
4984 4985
}

4986 4987 4988 4989 4990
/*
 * Structure to save mount options for ext4_remount's benefit
 */
struct ext4_mount_options {
	unsigned long s_mount_opt;
4991
	unsigned long s_mount_opt2;
4992 4993
	kuid_t s_resuid;
	kgid_t s_resgid;
4994 4995 4996 4997
	unsigned long s_commit_interval;
	u32 s_min_batch_time, s_max_batch_time;
#ifdef CONFIG_QUOTA
	int s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
4998
	char *s_qf_names[EXT4_MAXQUOTAS];
4999 5000 5001
#endif
};

5002
static int ext4_remount(struct super_block *sb, int *flags, char *data)
5003
{
5004
	struct ext4_super_block *es;
5005
	struct ext4_sb_info *sbi = EXT4_SB(sb);
5006
	unsigned long old_sb_flags;
5007
	struct ext4_mount_options old_opts;
5008
	int enable_quota = 0;
5009
	ext4_group_t g;
5010
	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5011
	int err = 0;
5012
#ifdef CONFIG_QUOTA
5013
	int i, j;
5014
#endif
5015
	char *orig_data = kstrdup(data, GFP_KERNEL);
5016 5017 5018 5019

	/* Store the original options */
	old_sb_flags = sb->s_flags;
	old_opts.s_mount_opt = sbi->s_mount_opt;
5020
	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
5021 5022 5023
	old_opts.s_resuid = sbi->s_resuid;
	old_opts.s_resgid = sbi->s_resgid;
	old_opts.s_commit_interval = sbi->s_commit_interval;
5024 5025
	old_opts.s_min_batch_time = sbi->s_min_batch_time;
	old_opts.s_max_batch_time = sbi->s_max_batch_time;
5026 5027
#ifdef CONFIG_QUOTA
	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
5028
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5029 5030 5031 5032 5033 5034
		if (sbi->s_qf_names[i]) {
			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
							 GFP_KERNEL);
			if (!old_opts.s_qf_names[i]) {
				for (j = 0; j < i; j++)
					kfree(old_opts.s_qf_names[j]);
5035
				kfree(orig_data);
5036 5037 5038 5039
				return -ENOMEM;
			}
		} else
			old_opts.s_qf_names[i] = NULL;
5040
#endif
5041 5042
	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5043

5044
	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5045 5046 5047 5048
		err = -EINVAL;
		goto restore_opts;
	}

5049
	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
5050 5051
	    test_opt(sb, JOURNAL_CHECKSUM)) {
		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
5052 5053
			 "during remount not supported; ignoring");
		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
5054 5055
	}

5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068
	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and delalloc");
			err = -EINVAL;
			goto restore_opts;
		}
		if (test_opt(sb, DIOREAD_NOLOCK)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dioread_nolock");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
5069 5070 5071 5072 5073 5074
		if (test_opt(sb, DAX)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				 "both data=journal and dax");
			err = -EINVAL;
			goto restore_opts;
		}
5075 5076 5077 5078 5079 5080 5081
	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
			ext4_msg(sb, KERN_ERR, "can't mount with "
				"journal_async_commit in data=ordered mode");
			err = -EINVAL;
			goto restore_opts;
		}
Ross Zwisler's avatar
Ross Zwisler committed
5082 5083
	}

5084 5085 5086 5087 5088 5089
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
		ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
		err = -EINVAL;
		goto restore_opts;
	}

Ross Zwisler's avatar
Ross Zwisler committed
5090 5091 5092 5093
	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
			"dax flag with busy inodes while remounting");
		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
5094 5095
	}

5096
	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
5097
		ext4_abort(sb, "Abort forced by user");
5098

5099 5100
	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
		(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
5101 5102 5103

	es = sbi->s_es;

5104
	if (sbi->s_journal) {
5105
		ext4_init_journal_params(sb, sbi->s_journal);
5106 5107
		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
	}
5108

5109 5110
	if (*flags & SB_LAZYTIME)
		sb->s_flags |= SB_LAZYTIME;
5111

5112
	if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5113
		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5114 5115 5116 5117
			err = -EROFS;
			goto restore_opts;
		}

5118
		if (*flags & SB_RDONLY) {
5119 5120 5121
			err = sync_filesystem(sb);
			if (err < 0)
				goto restore_opts;
5122 5123
			err = dquot_suspend(sb, -1);
			if (err < 0)
5124 5125
				goto restore_opts;

5126 5127 5128 5129
			/*
			 * First of all, the unconditional stuff we have to do
			 * to disable replay of the journal when we next remount
			 */
5130
			sb->s_flags |= SB_RDONLY;
5131 5132 5133 5134 5135 5136

			/*
			 * OK, test if we are remounting a valid rw partition
			 * readonly, and if so set the rdonly flag and then
			 * mark the partition as valid again.
			 */
5137 5138
			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
			    (sbi->s_mount_state & EXT4_VALID_FS))
5139 5140
				es->s_state = cpu_to_le16(sbi->s_mount_state);

5141
			if (sbi->s_journal)
5142
				ext4_mark_recovery_complete(sb, es);
5143
		} else {
5144
			/* Make sure we can mount this feature set readwrite */
5145
			if (ext4_has_feature_readonly(sb) ||
5146
			    !ext4_feature_set_ok(sb, 0)) {
5147 5148 5149
				err = -EROFS;
				goto restore_opts;
			}
5150 5151
			/*
			 * Make sure the group descriptor checksums
5152
			 * are sane.  If they aren't, refuse to remount r/w.
5153 5154 5155 5156 5157
			 */
			for (g = 0; g < sbi->s_groups_count; g++) {
				struct ext4_group_desc *gdp =
					ext4_get_group_desc(sb, g, NULL);

5158
				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5159 5160
					ext4_msg(sb, KERN_ERR,
	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5161
		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
5162
					       le16_to_cpu(gdp->bg_checksum));
5163
					err = -EFSBADCRC;
5164 5165 5166 5167
					goto restore_opts;
				}
			}

5168 5169 5170 5171 5172 5173
			/*
			 * If we have an unprocessed orphan list hanging
			 * around from a previously readonly bdev mount,
			 * require a full umount/remount for now.
			 */
			if (es->s_last_orphan) {
5174
				ext4_msg(sb, KERN_WARNING, "Couldn't "
5175 5176
				       "remount RDWR because of unprocessed "
				       "orphan inode list.  Please "
5177
				       "umount/remount instead");
5178 5179 5180 5181
				err = -EINVAL;
				goto restore_opts;
			}

5182 5183 5184 5185 5186 5187
			/*
			 * Mounting a RDONLY partition read-write, so reread
			 * and store the current valid flag.  (It may have
			 * been changed by e2fsck since we originally mounted
			 * the partition.)
			 */
5188 5189
			if (sbi->s_journal)
				ext4_clear_journal_err(sb, es);
5190
			sbi->s_mount_state = le16_to_cpu(es->s_state);
5191 5192 5193 5194 5195 5196

			err = ext4_setup_super(sb, es, 0);
			if (err)
				goto restore_opts;

			sb->s_flags &= ~SB_RDONLY;
5197
			if (ext4_has_feature_mmp(sb))
5198 5199 5200 5201 5202
				if (ext4_multi_mount_protect(sb,
						le64_to_cpu(es->s_mmp_block))) {
					err = -EROFS;
					goto restore_opts;
				}
5203
			enable_quota = 1;
5204 5205
		}
	}
5206 5207 5208 5209 5210

	/*
	 * Reinitialize lazy itable initialization thread based on
	 * current settings
	 */
5211
	if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
5212 5213 5214 5215 5216 5217 5218
		ext4_unregister_li_request(sb);
	else {
		ext4_group_t first_not_zeroed;
		first_not_zeroed = ext4_has_uninit_itable(sb);
		ext4_register_li_request(sb, first_not_zeroed);
	}

5219
	ext4_setup_system_zone(sb);
5220 5221 5222 5223 5224
	if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
		err = ext4_commit_super(sb, 1);
		if (err)
			goto restore_opts;
	}
5225

5226 5227
#ifdef CONFIG_QUOTA
	/* Release old quota file names */
Jan Kara's avatar
Jan Kara committed
5228
	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5229
		kfree(old_opts.s_qf_names[i]);
5230 5231 5232
	if (enable_quota) {
		if (sb_any_quota_suspended(sb))
			dquot_resume(sb, -1);
5233
		else if (ext4_has_feature_quota(sb)) {
5234
			err = ext4_enable_quotas(sb);
5235
			if (err)
5236 5237 5238
				goto restore_opts;
		}
	}
5239
#endif
5240

5241
	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
5242 5243
	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
	kfree(orig_data);
5244
	return 0;
5245

5246 5247 5248
restore_opts:
	sb->s_flags = old_sb_flags;
	sbi->s_mount_opt = old_opts.s_mount_opt;
5249
	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5250 5251 5252
	sbi->s_resuid = old_opts.s_resuid;
	sbi->s_resgid = old_opts.s_resgid;
	sbi->s_commit_interval = old_opts.s_commit_interval;
5253 5254
	sbi->s_min_batch_time = old_opts.s_min_batch_time;
	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5255 5256
#ifdef CONFIG_QUOTA
	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
Jan Kara's avatar
Jan Kara committed
5257
	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5258
		kfree(sbi->s_qf_names[i]);
5259 5260 5261
		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
	}
#endif
5262
	kfree(orig_data);
5263 5264 5265
	return err;
}

Li Xi's avatar
Li Xi committed
5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278
#ifdef CONFIG_QUOTA
static int ext4_statfs_project(struct super_block *sb,
			       kprojid_t projid, struct kstatfs *buf)
{
	struct kqid qid;
	struct dquot *dquot;
	u64 limit;
	u64 curblock;

	qid = make_kqid_projid(projid);
	dquot = dqget(sb, qid);
	if (IS_ERR(dquot))
		return PTR_ERR(dquot);
5279
	spin_lock(&dquot->dq_dqb_lock);
Li Xi's avatar
Li Xi committed
5280 5281 5282 5283 5284

	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
		 dquot->dq_dqb.dqb_bsoftlimit :
		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
	if (limit && buf->f_blocks > limit) {
5285 5286
		curblock = (dquot->dq_dqb.dqb_curspace +
			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
Li Xi's avatar
Li Xi committed
5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302
		buf->f_blocks = limit;
		buf->f_bfree = buf->f_bavail =
			(buf->f_blocks > curblock) ?
			 (buf->f_blocks - curblock) : 0;
	}

	limit = dquot->dq_dqb.dqb_isoftlimit ?
		dquot->dq_dqb.dqb_isoftlimit :
		dquot->dq_dqb.dqb_ihardlimit;
	if (limit && buf->f_files > limit) {
		buf->f_files = limit;
		buf->f_ffree =
			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
	}

5303
	spin_unlock(&dquot->dq_dqb_lock);
Li Xi's avatar
Li Xi committed
5304 5305 5306 5307 5308
	dqput(dquot);
	return 0;
}
#endif

5309
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5310 5311
{
	struct super_block *sb = dentry->d_sb;
5312 5313
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
Lukas Czerner's avatar
Lukas Czerner committed
5314
	ext4_fsblk_t overhead = 0, resv_blocks;
Pekka Enberg's avatar
Pekka Enberg committed
5315
	u64 fsid;
5316
	s64 bfree;
Lukas Czerner's avatar
Lukas Czerner committed
5317
	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5318

5319 5320
	if (!test_opt(sb, MINIX_DF))
		overhead = sbi->s_overhead;
5321

5322
	buf->f_type = EXT4_SUPER_MAGIC;
5323
	buf->f_bsize = sb->s_blocksize;
5324
	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
5325 5326
	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5327
	/* prevent underflow in case that few free space is available */
5328
	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
Lukas Czerner's avatar
Lukas Czerner committed
5329 5330 5331
	buf->f_bavail = buf->f_bfree -
			(ext4_r_blocks_count(es) + resv_blocks);
	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5332 5333
		buf->f_bavail = 0;
	buf->f_files = le32_to_cpu(es->s_inodes_count);
5334
	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5335
	buf->f_namelen = EXT4_NAME_LEN;
Pekka Enberg's avatar
Pekka Enberg committed
5336 5337 5338 5339
	fsid = le64_to_cpup((void *)es->s_uuid) ^
	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
5340

Li Xi's avatar
Li Xi committed
5341 5342 5343 5344 5345
#ifdef CONFIG_QUOTA
	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
#endif
5346 5347 5348 5349 5350 5351
	return 0;
}


#ifdef CONFIG_QUOTA

Jan Kara's avatar
Jan Kara committed
5352 5353 5354 5355
/*
 * Helper functions so that transaction is started before we acquire dqio_sem
 * to keep correct lock ordering of transaction > dqio_sem
 */
5356 5357
static inline struct inode *dquot_to_inode(struct dquot *dquot)
{
5358
	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5359 5360
}

5361
static int ext4_write_dquot(struct dquot *dquot)
5362 5363 5364 5365 5366 5367
{
	int ret, err;
	handle_t *handle;
	struct inode *inode;

	inode = dquot_to_inode(dquot);
5368
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5369
				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5370 5371 5372
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit(dquot);
5373
	err = ext4_journal_stop(handle);
5374 5375 5376 5377 5378
	if (!ret)
		ret = err;
	return ret;
}

5379
static int ext4_acquire_dquot(struct dquot *dquot)
5380 5381 5382 5383
{
	int ret, err;
	handle_t *handle;

5384
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5385
				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5386 5387 5388
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_acquire(dquot);
5389
	err = ext4_journal_stop(handle);
5390 5391 5392 5393 5394
	if (!ret)
		ret = err;
	return ret;
}

5395
static int ext4_release_dquot(struct dquot *dquot)
5396 5397 5398 5399
{
	int ret, err;
	handle_t *handle;

5400
	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5401
				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
Jan Kara's avatar
Jan Kara committed
5402 5403 5404
	if (IS_ERR(handle)) {
		/* Release dquot anyway to avoid endless cycle in dqput() */
		dquot_release(dquot);
5405
		return PTR_ERR(handle);
Jan Kara's avatar
Jan Kara committed
5406
	}
5407
	ret = dquot_release(dquot);
5408
	err = ext4_journal_stop(handle);
5409 5410 5411 5412 5413
	if (!ret)
		ret = err;
	return ret;
}

5414
static int ext4_mark_dquot_dirty(struct dquot *dquot)
5415
{
5416 5417 5418
	struct super_block *sb = dquot->dq_sb;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

5419
	/* Are we journaling quotas? */
5420
	if (ext4_has_feature_quota(sb) ||
5421
	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5422
		dquot_mark_dquot_dirty(dquot);
5423
		return ext4_write_dquot(dquot);
5424 5425 5426 5427 5428
	} else {
		return dquot_mark_dquot_dirty(dquot);
	}
}

5429
static int ext4_write_info(struct super_block *sb, int type)
5430 5431 5432 5433 5434
{
	int ret, err;
	handle_t *handle;

	/* Data block + inode block */
5435
	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5436 5437 5438
	if (IS_ERR(handle))
		return PTR_ERR(handle);
	ret = dquot_commit_info(sb, type);
5439
	err = ext4_journal_stop(handle);
5440 5441 5442 5443 5444 5445 5446 5447 5448
	if (!ret)
		ret = err;
	return ret;
}

/*
 * Turn on quotas during mount time - we need to find
 * the quota file and such...
 */
5449
static int ext4_quota_on_mount(struct super_block *sb, int type)
5450
{
5451 5452
	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
					EXT4_SB(sb)->s_jquota_fmt, type);
5453 5454
}

5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468
static void lockdep_set_quota_inode(struct inode *inode, int subclass)
{
	struct ext4_inode_info *ei = EXT4_I(inode);

	/* The first argument of lockdep_set_subclass has to be
	 * *exactly* the same as the argument to init_rwsem() --- in
	 * this case, in init_once() --- or lockdep gets unhappy
	 * because the name of the lock is set using the
	 * stringification of the argument to init_rwsem().
	 */
	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
	lockdep_set_subclass(&ei->i_data_sem, subclass);
}

5469 5470 5471
/*
 * Standard function to be called on quota_on
 */
5472
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
5473
			 const struct path *path)
5474 5475 5476 5477 5478
{
	int err;

	if (!test_opt(sb, QUOTA))
		return -EINVAL;
5479

5480
	/* Quotafile not on the same filesystem? */
5481
	if (path->dentry->d_sb != sb)
5482
		return -EXDEV;
5483 5484
	/* Journaling quota? */
	if (EXT4_SB(sb)->s_qf_names[type]) {
5485
		/* Quotafile not in fs root? */
5486
		if (path->dentry->d_parent != sb->s_root)
5487 5488 5489
			ext4_msg(sb, KERN_WARNING,
				"Quota file not on filesystem root. "
				"Journaled quota will not work");
5490 5491 5492 5493 5494 5495 5496
		sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY;
	} else {
		/*
		 * Clear the flag just in case mount options changed since
		 * last time.
		 */
		sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY;
5497
	}
5498 5499 5500 5501 5502

	/*
	 * When we journal data on quota file, we have to flush journal to see
	 * all updates to the file when we bypass pagecache...
	 */
5503
	if (EXT4_SB(sb)->s_journal &&
5504
	    ext4_should_journal_data(d_inode(path->dentry))) {
5505 5506 5507 5508 5509
		/*
		 * We don't need to lock updates but journal_flush() could
		 * otherwise be livelocked...
		 */
		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
5510
		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
5511
		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5512
		if (err)
5513
			return err;
5514
	}
5515

5516 5517
	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
	err = dquot_quota_on(sb, type, format_id, path);
5518
	if (err) {
5519 5520
		lockdep_set_quota_inode(path->dentry->d_inode,
					     I_DATA_SEM_NORMAL);
5521 5522 5523 5524
	} else {
		struct inode *inode = d_inode(path->dentry);
		handle_t *handle;

5525 5526 5527 5528 5529
		/*
		 * Set inode flags to prevent userspace from messing with quota
		 * files. If this fails, we return success anyway since quotas
		 * are already enabled and this is not a hard failure.
		 */
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541
		inode_lock(inode);
		handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
		if (IS_ERR(handle))
			goto unlock_inode;
		EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
		inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
				S_NOATIME | S_IMMUTABLE);
		ext4_mark_inode_dirty(handle, inode);
		ext4_journal_stop(handle);
	unlock_inode:
		inode_unlock(inode);
	}
5542
	return err;
5543 5544
}

5545 5546 5547 5548 5549
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
			     unsigned int flags)
{
	int err;
	struct inode *qf_inode;
Jan Kara's avatar
Jan Kara committed
5550
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5551
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5552 5553
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5554 5555
	};

5556
	BUG_ON(!ext4_has_feature_quota(sb));
5557 5558 5559 5560 5561 5562 5563 5564 5565 5566

	if (!qf_inums[type])
		return -EPERM;

	qf_inode = ext4_iget(sb, qf_inums[type]);
	if (IS_ERR(qf_inode)) {
		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
		return PTR_ERR(qf_inode);
	}

5567 5568
	/* Don't account quota for quota files to avoid recursion */
	qf_inode->i_flags |= S_NOQUOTA;
5569
	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
5570 5571
	err = dquot_enable(qf_inode, type, format_id, flags);
	iput(qf_inode);
5572 5573
	if (err)
		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
5574 5575 5576 5577 5578 5579 5580 5581

	return err;
}

/* Enable usage tracking for all quota types. */
static int ext4_enable_quotas(struct super_block *sb)
{
	int type, err = 0;
Jan Kara's avatar
Jan Kara committed
5582
	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5583
		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
Li Xi's avatar
Li Xi committed
5584 5585
		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
5586
	};
5587 5588 5589 5590 5591
	bool quota_mopt[EXT4_MAXQUOTAS] = {
		test_opt(sb, USRQUOTA),
		test_opt(sb, GRPQUOTA),
		test_opt(sb, PRJQUOTA),
	};
5592

5593
	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
Jan Kara's avatar
Jan Kara committed
5594
	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5595 5596
		if (qf_inums[type]) {
			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5597 5598
				DQUOT_USAGE_ENABLED |
				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
5599
			if (err) {
5600 5601 5602
				for (type--; type >= 0; type--)
					dquot_quota_off(sb, type);

5603
				ext4_warning(sb,
5604 5605 5606
					"Failed to enable quota tracking "
					"(type=%d, err=%d). Please run "
					"e2fsck to fix.", type, err);
5607 5608 5609 5610 5611 5612 5613
				return err;
			}
		}
	}
	return 0;
}

5614 5615
static int ext4_quota_off(struct super_block *sb, int type)
{
5616 5617
	struct inode *inode = sb_dqopt(sb)->files[type];
	handle_t *handle;
5618
	int err;
5619

5620 5621 5622
	/* Force all delayed allocation blocks to be allocated.
	 * Caller already holds s_umount sem */
	if (test_opt(sb, DELALLOC))
5623 5624
		sync_filesystem(sb);

5625
	if (!inode || !igrab(inode))
5626 5627
		goto out;

5628
	err = dquot_quota_off(sb, type);
5629
	if (err || ext4_has_feature_quota(sb))
5630 5631 5632
		goto out_put;

	inode_lock(inode);
5633 5634 5635 5636 5637
	/*
	 * Update modification times of quota files when userspace can
	 * start looking at them. If we fail, we return success anyway since
	 * this is not a hard failure and quotas are already disabled.
	 */
5638
	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
5639
	if (IS_ERR(handle))
5640 5641 5642
		goto out_unlock;
	EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
	inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
5643
	inode->i_mtime = inode->i_ctime = current_time(inode);
5644 5645
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);
5646 5647 5648
out_unlock:
	inode_unlock(inode);
out_put:
5649
	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5650 5651
	iput(inode);
	return err;
5652
out:
5653 5654 5655
	return dquot_quota_off(sb, type);
}

5656 5657
/* Read data from quotafile - avoid pagecache and such because we cannot afford
 * acquiring the locks... As quota files are never truncated and quota code
Lucas De Marchi's avatar
Lucas De Marchi committed
5658
 * itself serializes the operations (and no one else should touch the files)
5659
 * we don't have to be afraid of races */
5660
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5661 5662 5663
			       size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5664
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678
	int offset = off & (sb->s_blocksize - 1);
	int tocopy;
	size_t toread;
	struct buffer_head *bh;
	loff_t i_size = i_size_read(inode);

	if (off > i_size)
		return 0;
	if (off+len > i_size)
		len = i_size-off;
	toread = len;
	while (toread > 0) {
		tocopy = sb->s_blocksize - offset < toread ?
				sb->s_blocksize - offset : toread;
5679 5680 5681
		bh = ext4_bread(NULL, inode, blk, 0);
		if (IS_ERR(bh))
			return PTR_ERR(bh);
5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696
		if (!bh)	/* A hole? */
			memset(data, 0, tocopy);
		else
			memcpy(data, bh->b_data+offset, tocopy);
		brelse(bh);
		offset = 0;
		toread -= tocopy;
		data += tocopy;
		blk++;
	}
	return len;
}

/* Write to quotafile (we know the transaction is already started and has
 * enough credits) */
5697
static ssize_t ext4_quota_write(struct super_block *sb, int type,
5698 5699 5700
				const char *data, size_t len, loff_t off)
{
	struct inode *inode = sb_dqopt(sb)->files[type];
Aneesh Kumar K.V's avatar
Aneesh Kumar K.V committed
5701
	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5702
	int err, offset = off & (sb->s_blocksize - 1);
5703
	int retries = 0;
5704 5705 5706
	struct buffer_head *bh;
	handle_t *handle = journal_current_handle();

5707
	if (EXT4_SB(sb)->s_journal && !handle) {
5708 5709
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because transaction is not started",
Jan Kara's avatar
Jan Kara committed
5710 5711 5712
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}
5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723
	/*
	 * Since we account only one data block in transaction credits,
	 * then it is impossible to cross a block boundary.
	 */
	if (sb->s_blocksize - offset < len) {
		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
			" cancelled because not block aligned",
			(unsigned long long)off, (unsigned long long)len);
		return -EIO;
	}

5724 5725 5726 5727 5728 5729
	do {
		bh = ext4_bread(handle, inode, blk,
				EXT4_GET_BLOCKS_CREATE |
				EXT4_GET_BLOCKS_METADATA_NOFAIL);
	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
		 ext4_should_retry_alloc(inode->i_sb, &retries));
5730 5731
	if (IS_ERR(bh))
		return PTR_ERR(bh);
5732 5733
	if (!bh)
		goto out;
5734
	BUFFER_TRACE(bh, "get write access");
5735 5736 5737
	err = ext4_journal_get_write_access(handle, bh);
	if (err) {
		brelse(bh);
5738
		return err;
5739
	}
5740 5741 5742 5743
	lock_buffer(bh);
	memcpy(bh->b_data+offset, data, len);
	flush_dcache_page(bh->b_page);
	unlock_buffer(bh);
5744
	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5745
	brelse(bh);
5746
out:
5747 5748
	if (inode->i_size < off + len) {
		i_size_write(inode, off + len);
5749
		EXT4_I(inode)->i_disksize = inode->i_size;
5750
		ext4_mark_inode_dirty(handle, inode);
5751
	}
5752
	return len;
5753 5754
}

5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765
static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
{
	const struct quota_format_ops	*ops;

	if (!sb_has_quota_loaded(sb, qid->type))
		return -ESRCH;
	ops = sb_dqopt(sb)->ops[qid->type];
	if (!ops || !ops->get_next_id)
		return -ENOSYS;
	return dquot_get_next_id(sb, qid);
}
5766 5767
#endif

Al Viro's avatar
Al Viro committed
5768 5769
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
		       const char *dev_name, void *data)
5770
{
Al Viro's avatar
Al Viro committed
5771
	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5772 5773
}

Jan Kara's avatar
Jan Kara committed
5774
#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786
static inline void register_as_ext2(void)
{
	int err = register_filesystem(&ext2_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
}

static inline void unregister_as_ext2(void)
{
	unregister_filesystem(&ext2_fs_type);
}
5787 5788 5789

static inline int ext2_feature_set_ok(struct super_block *sb)
{
5790
	if (ext4_has_unknown_ext2_incompat_features(sb))
5791
		return 0;
5792
	if (sb_rdonly(sb))
5793
		return 1;
5794
	if (ext4_has_unknown_ext2_ro_compat_features(sb))
5795 5796 5797
		return 0;
	return 1;
}
5798 5799 5800
#else
static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
5801
static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815
#endif

static inline void register_as_ext3(void)
{
	int err = register_filesystem(&ext3_fs_type);
	if (err)
		printk(KERN_WARNING
		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
}

static inline void unregister_as_ext3(void)
{
	unregister_filesystem(&ext3_fs_type);
}
5816 5817 5818

static inline int ext3_feature_set_ok(struct super_block *sb)
{
5819
	if (ext4_has_unknown_ext3_incompat_features(sb))
5820
		return 0;
5821
	if (!ext4_has_feature_journal(sb))
5822
		return 0;
5823
	if (sb_rdonly(sb))
5824
		return 1;
5825
	if (ext4_has_unknown_ext3_ro_compat_features(sb))
5826 5827 5828
		return 0;
	return 1;
}
5829

Theodore Ts'o's avatar
Theodore Ts'o committed
5830 5831 5832
static struct file_system_type ext4_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "ext4",
Al Viro's avatar
Al Viro committed
5833
	.mount		= ext4_mount,
Theodore Ts'o's avatar
Theodore Ts'o committed
5834 5835 5836
	.kill_sb	= kill_block_super,
	.fs_flags	= FS_REQUIRES_DEV,
};
5837
MODULE_ALIAS_FS("ext4");
Theodore Ts'o's avatar
Theodore Ts'o committed
5838

5839 5840 5841
/* Shared across all ext4 file systems */
wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];

5842
static int __init ext4_init_fs(void)
5843
{
5844
	int i, err;
5845

5846
	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
5847 5848 5849
	ext4_li_info = NULL;
	mutex_init(&ext4_li_mtx);

5850
	/* Build-time check for flags consistency */
5851
	ext4_check_flag_values();
5852

5853
	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5854 5855
		init_waitqueue_head(&ext4__ioend_wq[i]);

5856
	err = ext4_init_es();
5857 5858
	if (err)
		return err;
5859 5860 5861

	err = ext4_init_pageio();
	if (err)
5862
		goto out5;
5863

5864
	err = ext4_init_system_zone();
5865
	if (err)
5866
		goto out4;
5867

5868
	err = ext4_init_sysfs();
5869
	if (err)
5870
		goto out3;
5871

5872
	err = ext4_init_mballoc();
5873 5874
	if (err)
		goto out2;
5875 5876 5877
	err = init_inodecache();
	if (err)
		goto out1;
5878
	register_as_ext3();
5879
	register_as_ext2();
Theodore Ts'o's avatar
Theodore Ts'o committed
5880
	err = register_filesystem(&ext4_fs_type);
5881 5882
	if (err)
		goto out;
5883

5884 5885
	return 0;
out:
5886 5887
	unregister_as_ext2();
	unregister_as_ext3();
5888 5889
	destroy_inodecache();
out1:
5890
	ext4_exit_mballoc();
5891
out2:
5892 5893
	ext4_exit_sysfs();
out3:
5894
	ext4_exit_system_zone();
5895
out4:
5896
	ext4_exit_pageio();
5897
out5:
5898 5899
	ext4_exit_es();

5900 5901 5902
	return err;
}

5903
static void __exit ext4_exit_fs(void)
5904
{
5905
	ext4_destroy_lazyinit_thread();
5906 5907
	unregister_as_ext2();
	unregister_as_ext3();
Theodore Ts'o's avatar
Theodore Ts'o committed
5908
	unregister_filesystem(&ext4_fs_type);
5909
	destroy_inodecache();
5910
	ext4_exit_mballoc();
5911
	ext4_exit_sysfs();
5912 5913
	ext4_exit_system_zone();
	ext4_exit_pageio();
5914
	ext4_exit_es();
5915 5916 5917
}

MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5918
MODULE_DESCRIPTION("Fourth Extended Filesystem");
5919
MODULE_LICENSE("GPL");
5920
MODULE_SOFTDEP("pre: crc32c");
5921 5922
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)